xref: /linux/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c (revision 85ffc6e4ed3712f8b3fedb3fbe42afae644a699c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <crypto/aes.h>
5 #include <crypto/authenc.h>
6 #include <crypto/cryptd.h>
7 #include <crypto/des.h>
8 #include <crypto/internal/aead.h>
9 #include <crypto/sha1.h>
10 #include <crypto/sha2.h>
11 #include <crypto/xts.h>
12 #include <crypto/gcm.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/sort.h>
15 #include <linux/module.h>
16 #include "otx2_cptvf.h"
17 #include "otx2_cptvf_algs.h"
18 #include "otx2_cpt_reqmgr.h"
19 #include "cn10k_cpt.h"
20 
21 /* Size of salt in AES GCM mode */
22 #define AES_GCM_SALT_SIZE 4
23 /* Size of IV in AES GCM mode */
24 #define AES_GCM_IV_SIZE 8
25 /* Size of ICV (Integrity Check Value) in AES GCM mode */
26 #define AES_GCM_ICV_SIZE 16
27 /* Offset of IV in AES GCM mode */
28 #define AES_GCM_IV_OFFSET 8
29 #define CONTROL_WORD_LEN 8
30 #define KEY2_OFFSET 48
31 #define DMA_MODE_FLAG(dma_mode) \
32 	(((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
33 
34 /* Truncated SHA digest size */
35 #define SHA1_TRUNC_DIGEST_SIZE 12
36 #define SHA256_TRUNC_DIGEST_SIZE 16
37 #define SHA384_TRUNC_DIGEST_SIZE 24
38 #define SHA512_TRUNC_DIGEST_SIZE 32
39 
40 static DEFINE_MUTEX(mutex);
41 static int is_crypto_registered;
42 
43 struct cpt_device_desc {
44 	struct pci_dev *dev;
45 	int num_queues;
46 };
47 
48 struct cpt_device_table {
49 	atomic_t count;
50 	struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
51 };
52 
53 static struct cpt_device_table se_devices = {
54 	.count = ATOMIC_INIT(0)
55 };
56 
57 static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
58 
get_se_device(struct pci_dev ** pdev,int * cpu_num)59 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
60 {
61 	int count;
62 
63 	count = atomic_read(&se_devices.count);
64 	if (count < 1)
65 		return -ENODEV;
66 
67 	*cpu_num = get_cpu();
68 	/*
69 	 * On OcteonTX2 platform CPT instruction queue is bound to each
70 	 * local function LF, in turn LFs can be attached to PF
71 	 * or VF therefore we always use first device. We get maximum
72 	 * performance if one CPT queue is available for each cpu
73 	 * otherwise CPT queues need to be shared between cpus.
74 	 */
75 	if (*cpu_num >= se_devices.desc[0].num_queues)
76 		*cpu_num %= se_devices.desc[0].num_queues;
77 	*pdev = se_devices.desc[0].dev;
78 
79 	put_cpu();
80 
81 	return 0;
82 }
83 
validate_hmac_cipher_null(struct otx2_cpt_req_info * cpt_req)84 static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
85 {
86 	struct otx2_cpt_req_ctx *rctx;
87 	struct aead_request *req;
88 	struct crypto_aead *tfm;
89 
90 	req = container_of(cpt_req->areq, struct aead_request, base);
91 	tfm = crypto_aead_reqtfm(req);
92 	rctx = aead_request_ctx_dma(req);
93 	if (memcmp(rctx->fctx.hmac.s.hmac_calc,
94 		   rctx->fctx.hmac.s.hmac_recv,
95 		   crypto_aead_authsize(tfm)) != 0)
96 		return -EBADMSG;
97 
98 	return 0;
99 }
100 
otx2_cpt_aead_callback(int status,void * arg1,void * arg2)101 static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
102 {
103 	struct otx2_cpt_inst_info *inst_info = arg2;
104 	struct crypto_async_request *areq = arg1;
105 	struct otx2_cpt_req_info *cpt_req;
106 	struct pci_dev *pdev;
107 
108 	if (inst_info) {
109 		cpt_req = inst_info->req;
110 		if (!status) {
111 			/*
112 			 * When selected cipher is NULL we need to manually
113 			 * verify whether calculated hmac value matches
114 			 * received hmac value
115 			 */
116 			if (cpt_req->req_type ==
117 			    OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
118 			    !cpt_req->is_enc)
119 				status = validate_hmac_cipher_null(cpt_req);
120 		}
121 		pdev = inst_info->pdev;
122 		otx2_cpt_info_destroy(pdev, inst_info);
123 	}
124 	if (areq)
125 		crypto_request_complete(areq, status);
126 }
127 
output_iv_copyback(struct crypto_async_request * areq)128 static void output_iv_copyback(struct crypto_async_request *areq)
129 {
130 	struct otx2_cpt_req_info *req_info;
131 	struct otx2_cpt_req_ctx *rctx;
132 	struct skcipher_request *sreq;
133 	struct crypto_skcipher *stfm;
134 	struct otx2_cpt_enc_ctx *ctx;
135 	u32 start, ivsize;
136 
137 	sreq = container_of(areq, struct skcipher_request, base);
138 	stfm = crypto_skcipher_reqtfm(sreq);
139 	ctx = crypto_skcipher_ctx(stfm);
140 	if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
141 	    ctx->cipher_type == OTX2_CPT_DES3_CBC) {
142 		rctx = skcipher_request_ctx_dma(sreq);
143 		req_info = &rctx->cpt_req;
144 		ivsize = crypto_skcipher_ivsize(stfm);
145 		start = sreq->cryptlen - ivsize;
146 
147 		if (req_info->is_enc) {
148 			scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
149 						 ivsize, 0);
150 		} else {
151 			if (sreq->src != sreq->dst) {
152 				scatterwalk_map_and_copy(sreq->iv, sreq->src,
153 							 start, ivsize, 0);
154 			} else {
155 				memcpy(sreq->iv, req_info->iv_out, ivsize);
156 				kfree(req_info->iv_out);
157 			}
158 		}
159 	}
160 }
161 
otx2_cpt_skcipher_callback(int status,void * arg1,void * arg2)162 static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
163 {
164 	struct otx2_cpt_inst_info *inst_info = arg2;
165 	struct crypto_async_request *areq = arg1;
166 	struct pci_dev *pdev;
167 
168 	if (areq) {
169 		if (!status)
170 			output_iv_copyback(areq);
171 		if (inst_info) {
172 			pdev = inst_info->pdev;
173 			otx2_cpt_info_destroy(pdev, inst_info);
174 		}
175 		crypto_request_complete(areq, status);
176 	}
177 }
178 
update_input_data(struct otx2_cpt_req_info * req_info,struct scatterlist * inp_sg,u32 nbytes,u32 * argcnt)179 static inline void update_input_data(struct otx2_cpt_req_info *req_info,
180 				     struct scatterlist *inp_sg,
181 				     u32 nbytes, u32 *argcnt)
182 {
183 	req_info->req.dlen += nbytes;
184 
185 	while (nbytes) {
186 		u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
187 		u8 *ptr = sg_virt(inp_sg);
188 
189 		req_info->in[*argcnt].vptr = (void *)ptr;
190 		req_info->in[*argcnt].size = len;
191 		nbytes -= len;
192 		++(*argcnt);
193 		inp_sg = sg_next(inp_sg);
194 	}
195 }
196 
update_output_data(struct otx2_cpt_req_info * req_info,struct scatterlist * outp_sg,u32 offset,u32 nbytes,u32 * argcnt)197 static inline void update_output_data(struct otx2_cpt_req_info *req_info,
198 				      struct scatterlist *outp_sg,
199 				      u32 offset, u32 nbytes, u32 *argcnt)
200 {
201 	u32 len, sg_len;
202 	u8 *ptr;
203 
204 	req_info->rlen += nbytes;
205 
206 	while (nbytes) {
207 		sg_len = outp_sg->length - offset;
208 		len = (nbytes < sg_len) ? nbytes : sg_len;
209 		ptr = sg_virt(outp_sg);
210 
211 		req_info->out[*argcnt].vptr = (void *) (ptr + offset);
212 		req_info->out[*argcnt].size = len;
213 		nbytes -= len;
214 		++(*argcnt);
215 		offset = 0;
216 		outp_sg = sg_next(outp_sg);
217 	}
218 }
219 
create_ctx_hdr(struct skcipher_request * req,u32 enc,u32 * argcnt)220 static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
221 				 u32 *argcnt)
222 {
223 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
224 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
225 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
226 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
227 	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
228 	int ivsize = crypto_skcipher_ivsize(stfm);
229 	u32 start = req->cryptlen - ivsize;
230 	gfp_t flags;
231 
232 	flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
233 			GFP_KERNEL : GFP_ATOMIC;
234 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
235 	req_info->ctrl.s.se_req = 1;
236 
237 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
238 				DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
239 	if (enc) {
240 		req_info->req.opcode.s.minor = 2;
241 	} else {
242 		req_info->req.opcode.s.minor = 3;
243 		if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
244 		    ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
245 		    req->src == req->dst) {
246 			req_info->iv_out = kmalloc(ivsize, flags);
247 			if (!req_info->iv_out)
248 				return -ENOMEM;
249 
250 			scatterwalk_map_and_copy(req_info->iv_out, req->src,
251 						 start, ivsize, 0);
252 		}
253 	}
254 	/* Encryption data length */
255 	req_info->req.param1 = req->cryptlen;
256 	/* Authentication data length */
257 	req_info->req.param2 = 0;
258 
259 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
260 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
261 	fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
262 
263 	if (ctx->cipher_type == OTX2_CPT_AES_XTS)
264 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
265 	else
266 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
267 
268 	memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
269 
270 	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
271 
272 	/*
273 	 * Storing  Packet Data Information in offset
274 	 * Control Word First 8 bytes
275 	 */
276 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
277 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
278 	req_info->req.dlen += CONTROL_WORD_LEN;
279 	++(*argcnt);
280 
281 	req_info->in[*argcnt].vptr = (u8 *)fctx;
282 	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
283 	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
284 
285 	++(*argcnt);
286 
287 	return 0;
288 }
289 
create_input_list(struct skcipher_request * req,u32 enc,u32 enc_iv_len)290 static inline int create_input_list(struct skcipher_request *req, u32 enc,
291 				    u32 enc_iv_len)
292 {
293 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
294 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
295 	u32 argcnt =  0;
296 	int ret;
297 
298 	ret = create_ctx_hdr(req, enc, &argcnt);
299 	if (ret)
300 		return ret;
301 
302 	update_input_data(req_info, req->src, req->cryptlen, &argcnt);
303 	req_info->in_cnt = argcnt;
304 
305 	return 0;
306 }
307 
create_output_list(struct skcipher_request * req,u32 enc_iv_len)308 static inline void create_output_list(struct skcipher_request *req,
309 				      u32 enc_iv_len)
310 {
311 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
312 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
313 	u32 argcnt = 0;
314 
315 	/*
316 	 * OUTPUT Buffer Processing
317 	 * AES encryption/decryption output would be
318 	 * received in the following format
319 	 *
320 	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
321 	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
322 	 */
323 	update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
324 	req_info->out_cnt = argcnt;
325 }
326 
skcipher_do_fallback(struct skcipher_request * req,bool is_enc)327 static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
328 {
329 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
330 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
331 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
332 	int ret;
333 
334 	if (ctx->fbk_cipher) {
335 		skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
336 		skcipher_request_set_callback(&rctx->sk_fbk_req,
337 					      req->base.flags,
338 					      req->base.complete,
339 					      req->base.data);
340 		skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
341 					   req->dst, req->cryptlen, req->iv);
342 		ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
343 			       crypto_skcipher_decrypt(&rctx->sk_fbk_req);
344 	} else {
345 		ret = -EINVAL;
346 	}
347 	return ret;
348 }
349 
cpt_enc_dec(struct skcipher_request * req,u32 enc)350 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
351 {
352 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
353 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
354 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
355 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
356 	u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
357 	struct pci_dev *pdev;
358 	int status, cpu_num;
359 
360 	if (req->cryptlen == 0)
361 		return 0;
362 
363 	if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
364 		return -EINVAL;
365 
366 	if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
367 		return skcipher_do_fallback(req, enc);
368 
369 	/* Clear control words */
370 	rctx->ctrl_word.flags = 0;
371 	rctx->fctx.enc.enc_ctrl.u = 0;
372 
373 	status = create_input_list(req, enc, enc_iv_len);
374 	if (status)
375 		return status;
376 	create_output_list(req, enc_iv_len);
377 
378 	status = get_se_device(&pdev, &cpu_num);
379 	if (status)
380 		return status;
381 
382 	req_info->callback = otx2_cpt_skcipher_callback;
383 	req_info->areq = &req->base;
384 	req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
385 	req_info->is_enc = enc;
386 	req_info->is_trunc_hmac = false;
387 	req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
388 
389 	req_info->req.cptr = ctx->er_ctx.hw_ctx;
390 	req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
391 
392 	/*
393 	 * We perform an asynchronous send and once
394 	 * the request is completed the driver would
395 	 * intimate through registered call back functions
396 	 */
397 	status = otx2_cpt_do_request(pdev, req_info, cpu_num);
398 
399 	return status;
400 }
401 
otx2_cpt_skcipher_encrypt(struct skcipher_request * req)402 static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
403 {
404 	return cpt_enc_dec(req, true);
405 }
406 
otx2_cpt_skcipher_decrypt(struct skcipher_request * req)407 static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
408 {
409 	return cpt_enc_dec(req, false);
410 }
411 
otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)412 static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
413 				       const u8 *key, u32 keylen)
414 {
415 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
416 	const u8 *key2 = key + (keylen / 2);
417 	const u8 *key1 = key;
418 	int ret;
419 
420 	ret = xts_verify_key(tfm, key, keylen);
421 	if (ret)
422 		return ret;
423 	ctx->key_len = keylen;
424 	ctx->enc_align_len = 1;
425 	memcpy(ctx->enc_key, key1, keylen / 2);
426 	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
427 	ctx->cipher_type = OTX2_CPT_AES_XTS;
428 	switch (ctx->key_len) {
429 	case 2 * AES_KEYSIZE_128:
430 		ctx->key_type = OTX2_CPT_AES_128_BIT;
431 		break;
432 	case 2 * AES_KEYSIZE_192:
433 		ctx->key_type = OTX2_CPT_AES_192_BIT;
434 		break;
435 	case 2 * AES_KEYSIZE_256:
436 		ctx->key_type = OTX2_CPT_AES_256_BIT;
437 		break;
438 	default:
439 		return -EINVAL;
440 	}
441 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
442 }
443 
cpt_des_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen,u8 cipher_type)444 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
445 			  u32 keylen, u8 cipher_type)
446 {
447 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
448 
449 	if (keylen != DES3_EDE_KEY_SIZE)
450 		return -EINVAL;
451 
452 	ctx->key_len = keylen;
453 	ctx->cipher_type = cipher_type;
454 	ctx->enc_align_len = 8;
455 
456 	memcpy(ctx->enc_key, key, keylen);
457 
458 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
459 }
460 
cpt_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen,u8 cipher_type)461 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
462 			  u32 keylen, u8 cipher_type)
463 {
464 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
465 
466 	switch (keylen) {
467 	case AES_KEYSIZE_128:
468 		ctx->key_type = OTX2_CPT_AES_128_BIT;
469 		break;
470 	case AES_KEYSIZE_192:
471 		ctx->key_type = OTX2_CPT_AES_192_BIT;
472 		break;
473 	case AES_KEYSIZE_256:
474 		ctx->key_type = OTX2_CPT_AES_256_BIT;
475 		break;
476 	default:
477 		return -EINVAL;
478 	}
479 	if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
480 		ctx->enc_align_len = 16;
481 	else
482 		ctx->enc_align_len = 1;
483 
484 	ctx->key_len = keylen;
485 	ctx->cipher_type = cipher_type;
486 
487 	memcpy(ctx->enc_key, key, keylen);
488 
489 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
490 }
491 
otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)492 static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
493 					    const u8 *key, u32 keylen)
494 {
495 	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
496 }
497 
otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)498 static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
499 					    const u8 *key, u32 keylen)
500 {
501 	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
502 }
503 
otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)504 static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
505 					     const u8 *key, u32 keylen)
506 {
507 	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
508 }
509 
otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)510 static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
511 					     const u8 *key, u32 keylen)
512 {
513 	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
514 }
515 
cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx * ctx,struct crypto_alg * alg)516 static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
517 				      struct crypto_alg *alg)
518 {
519 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
520 		ctx->fbk_cipher =
521 				crypto_alloc_skcipher(alg->cra_name, 0,
522 						      CRYPTO_ALG_ASYNC |
523 						      CRYPTO_ALG_NEED_FALLBACK);
524 		if (IS_ERR(ctx->fbk_cipher)) {
525 			pr_err("%s() failed to allocate fallback for %s\n",
526 				__func__, alg->cra_name);
527 			return PTR_ERR(ctx->fbk_cipher);
528 		}
529 	}
530 	return 0;
531 }
532 
otx2_cpt_enc_dec_init(struct crypto_skcipher * stfm)533 static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
534 {
535 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
536 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
537 	struct crypto_alg *alg = tfm->__crt_alg;
538 	struct pci_dev *pdev;
539 	int ret, cpu_num;
540 
541 	memset(ctx, 0, sizeof(*ctx));
542 	/*
543 	 * Additional memory for skcipher_request is
544 	 * allocated since the cryptd daemon uses
545 	 * this memory for request_ctx information
546 	 */
547 	crypto_skcipher_set_reqsize_dma(
548 		stfm, sizeof(struct otx2_cpt_req_ctx) +
549 		      sizeof(struct skcipher_request));
550 
551 	ret = get_se_device(&pdev, &cpu_num);
552 	if (ret)
553 		return ret;
554 
555 	ctx->pdev = pdev;
556 	ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx);
557 	if (ret)
558 		return ret;
559 
560 	return cpt_skcipher_fallback_init(ctx, alg);
561 }
562 
otx2_cpt_skcipher_exit(struct crypto_skcipher * tfm)563 static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
564 {
565 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
566 
567 	if (ctx->fbk_cipher) {
568 		crypto_free_skcipher(ctx->fbk_cipher);
569 		ctx->fbk_cipher = NULL;
570 	}
571 	cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx);
572 }
573 
cpt_aead_fallback_init(struct otx2_cpt_aead_ctx * ctx,struct crypto_alg * alg)574 static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
575 				  struct crypto_alg *alg)
576 {
577 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
578 		ctx->fbk_cipher =
579 			    crypto_alloc_aead(alg->cra_name, 0,
580 					      CRYPTO_ALG_ASYNC |
581 					      CRYPTO_ALG_NEED_FALLBACK);
582 		if (IS_ERR(ctx->fbk_cipher)) {
583 			pr_err("%s() failed to allocate fallback for %s\n",
584 				__func__, alg->cra_name);
585 			return PTR_ERR(ctx->fbk_cipher);
586 		}
587 	}
588 	return 0;
589 }
590 
cpt_aead_init(struct crypto_aead * atfm,u8 cipher_type,u8 mac_type)591 static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
592 {
593 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(atfm);
594 	struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
595 	struct crypto_alg *alg = tfm->__crt_alg;
596 	struct pci_dev *pdev;
597 	int ret, cpu_num;
598 
599 	ctx->cipher_type = cipher_type;
600 	ctx->mac_type = mac_type;
601 
602 	switch (ctx->mac_type) {
603 	case OTX2_CPT_SHA1:
604 		ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
605 		break;
606 
607 	case OTX2_CPT_SHA256:
608 		ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
609 		break;
610 
611 	case OTX2_CPT_SHA384:
612 		ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
613 		break;
614 
615 	case OTX2_CPT_SHA512:
616 		ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
617 		break;
618 	}
619 
620 	if (IS_ERR(ctx->hashalg))
621 		return PTR_ERR(ctx->hashalg);
622 
623 	if (ctx->hashalg) {
624 		ctx->sdesc = alloc_sdesc(ctx->hashalg);
625 		if (!ctx->sdesc) {
626 			crypto_free_shash(ctx->hashalg);
627 			return -ENOMEM;
628 		}
629 	}
630 
631 	/*
632 	 * When selected cipher is NULL we use HMAC opcode instead of
633 	 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
634 	 * for calculating ipad and opad
635 	 */
636 	if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL && ctx->hashalg) {
637 		int ss = crypto_shash_statesize(ctx->hashalg);
638 
639 		ctx->ipad = kzalloc(ss, GFP_KERNEL);
640 		if (!ctx->ipad) {
641 			kfree(ctx->sdesc);
642 			crypto_free_shash(ctx->hashalg);
643 			return -ENOMEM;
644 		}
645 
646 		ctx->opad = kzalloc(ss, GFP_KERNEL);
647 		if (!ctx->opad) {
648 			kfree(ctx->ipad);
649 			kfree(ctx->sdesc);
650 			crypto_free_shash(ctx->hashalg);
651 			return -ENOMEM;
652 		}
653 	}
654 	switch (ctx->cipher_type) {
655 	case OTX2_CPT_AES_CBC:
656 	case OTX2_CPT_AES_ECB:
657 		ctx->enc_align_len = 16;
658 		break;
659 	case OTX2_CPT_DES3_CBC:
660 	case OTX2_CPT_DES3_ECB:
661 		ctx->enc_align_len = 8;
662 		break;
663 	case OTX2_CPT_AES_GCM:
664 	case OTX2_CPT_CIPHER_NULL:
665 		ctx->enc_align_len = 1;
666 		break;
667 	}
668 	crypto_aead_set_reqsize_dma(atfm, sizeof(struct otx2_cpt_req_ctx));
669 
670 	ret = get_se_device(&pdev, &cpu_num);
671 	if (ret)
672 		return ret;
673 
674 	ctx->pdev = pdev;
675 	ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx);
676 	if (ret)
677 		return ret;
678 
679 	return cpt_aead_fallback_init(ctx, alg);
680 }
681 
otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead * tfm)682 static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
683 {
684 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
685 }
686 
otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead * tfm)687 static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
688 {
689 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
690 }
691 
otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead * tfm)692 static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
693 {
694 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
695 }
696 
otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead * tfm)697 static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
698 {
699 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
700 }
701 
otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead * tfm)702 static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
703 {
704 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
705 }
706 
otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead * tfm)707 static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
708 {
709 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
710 }
711 
otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead * tfm)712 static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
713 {
714 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
715 }
716 
otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead * tfm)717 static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
718 {
719 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
720 }
721 
otx2_cpt_aead_gcm_aes_init(struct crypto_aead * tfm)722 static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
723 {
724 	return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
725 }
726 
otx2_cpt_aead_exit(struct crypto_aead * tfm)727 static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
728 {
729 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
730 
731 	kfree(ctx->ipad);
732 	kfree(ctx->opad);
733 	crypto_free_shash(ctx->hashalg);
734 	kfree(ctx->sdesc);
735 
736 	if (ctx->fbk_cipher) {
737 		crypto_free_aead(ctx->fbk_cipher);
738 		ctx->fbk_cipher = NULL;
739 	}
740 	cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx);
741 }
742 
otx2_cpt_aead_gcm_set_authsize(struct crypto_aead * tfm,unsigned int authsize)743 static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
744 					  unsigned int authsize)
745 {
746 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
747 
748 	if (crypto_rfc4106_check_authsize(authsize))
749 		return -EINVAL;
750 
751 	tfm->authsize = authsize;
752 	/* Set authsize for fallback case */
753 	if (ctx->fbk_cipher)
754 		ctx->fbk_cipher->authsize = authsize;
755 
756 	return 0;
757 }
758 
otx2_cpt_aead_set_authsize(struct crypto_aead * tfm,unsigned int authsize)759 static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
760 				      unsigned int authsize)
761 {
762 	tfm->authsize = authsize;
763 
764 	return 0;
765 }
766 
otx2_cpt_aead_null_set_authsize(struct crypto_aead * tfm,unsigned int authsize)767 static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
768 					   unsigned int authsize)
769 {
770 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
771 
772 	ctx->is_trunc_hmac = true;
773 	tfm->authsize = authsize;
774 
775 	return 0;
776 }
777 
alloc_sdesc(struct crypto_shash * alg)778 static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
779 {
780 	struct otx2_cpt_sdesc *sdesc;
781 	int size;
782 
783 	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
784 	sdesc = kmalloc(size, GFP_KERNEL);
785 	if (!sdesc)
786 		return NULL;
787 
788 	sdesc->shash.tfm = alg;
789 
790 	return sdesc;
791 }
792 
swap_data32(void * buf,u32 len)793 static inline void swap_data32(void *buf, u32 len)
794 {
795 	cpu_to_be32_array(buf, buf, len / 4);
796 }
797 
swap_data64(void * buf,u32 len)798 static inline void swap_data64(void *buf, u32 len)
799 {
800 	u64 *src = buf;
801 	int i = 0;
802 
803 	for (i = 0 ; i < len / 8; i++, src++)
804 		cpu_to_be64s(src);
805 }
806 
swap_pad(u8 mac_type,u8 * pad)807 static int swap_pad(u8 mac_type, u8 *pad)
808 {
809 	struct sha512_state *sha512;
810 	struct sha256_state *sha256;
811 	struct sha1_state *sha1;
812 
813 	switch (mac_type) {
814 	case OTX2_CPT_SHA1:
815 		sha1 = (struct sha1_state *)pad;
816 		swap_data32(sha1->state, SHA1_DIGEST_SIZE);
817 		break;
818 
819 	case OTX2_CPT_SHA256:
820 		sha256 = (struct sha256_state *)pad;
821 		swap_data32(sha256->state, SHA256_DIGEST_SIZE);
822 		break;
823 
824 	case OTX2_CPT_SHA384:
825 	case OTX2_CPT_SHA512:
826 		sha512 = (struct sha512_state *)pad;
827 		swap_data64(sha512->state, SHA512_DIGEST_SIZE);
828 		break;
829 
830 	default:
831 		return -EINVAL;
832 	}
833 
834 	return 0;
835 }
836 
aead_hmac_init(struct crypto_aead * cipher,struct crypto_authenc_keys * keys)837 static int aead_hmac_init(struct crypto_aead *cipher,
838 			  struct crypto_authenc_keys *keys)
839 {
840 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
841 	int ds = crypto_shash_digestsize(ctx->hashalg);
842 	int bs = crypto_shash_blocksize(ctx->hashalg);
843 	int authkeylen = keys->authkeylen;
844 	u8 *ipad = NULL, *opad = NULL;
845 	int icount = 0;
846 	int ret;
847 
848 	if (authkeylen > bs) {
849 		ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
850 					  authkeylen, ctx->key);
851 		if (ret)
852 			goto calc_fail;
853 
854 		authkeylen = ds;
855 	} else
856 		memcpy(ctx->key, keys->authkey, authkeylen);
857 
858 	ctx->enc_key_len = keys->enckeylen;
859 	ctx->auth_key_len = authkeylen;
860 
861 	if (ctx->cipher_type == OTX2_CPT_CIPHER_NULL)
862 		return keys->enckeylen ? -EINVAL : 0;
863 
864 	switch (keys->enckeylen) {
865 	case AES_KEYSIZE_128:
866 		ctx->key_type = OTX2_CPT_AES_128_BIT;
867 		break;
868 	case AES_KEYSIZE_192:
869 		ctx->key_type = OTX2_CPT_AES_192_BIT;
870 		break;
871 	case AES_KEYSIZE_256:
872 		ctx->key_type = OTX2_CPT_AES_256_BIT;
873 		break;
874 	default:
875 		/* Invalid key length */
876 		return -EINVAL;
877 	}
878 
879 	memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
880 
881 	ipad = ctx->ipad;
882 	opad = ctx->opad;
883 
884 	memcpy(ipad, ctx->key, authkeylen);
885 	memset(ipad + authkeylen, 0, bs - authkeylen);
886 	memcpy(opad, ipad, bs);
887 
888 	for (icount = 0; icount < bs; icount++) {
889 		ipad[icount] ^= 0x36;
890 		opad[icount] ^= 0x5c;
891 	}
892 
893 	/*
894 	 * Partial Hash calculated from the software
895 	 * algorithm is retrieved for IPAD & OPAD
896 	 */
897 
898 	/* IPAD Calculation */
899 	crypto_shash_init(&ctx->sdesc->shash);
900 	crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
901 	crypto_shash_export(&ctx->sdesc->shash, ipad);
902 	ret = swap_pad(ctx->mac_type, ipad);
903 	if (ret)
904 		goto calc_fail;
905 
906 	/* OPAD Calculation */
907 	crypto_shash_init(&ctx->sdesc->shash);
908 	crypto_shash_update(&ctx->sdesc->shash, opad, bs);
909 	crypto_shash_export(&ctx->sdesc->shash, opad);
910 	ret = swap_pad(ctx->mac_type, opad);
911 
912 calc_fail:
913 	return ret;
914 }
915 
otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)916 static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
917 					    const unsigned char *key,
918 					    unsigned int keylen)
919 {
920 	struct crypto_authenc_keys authenc_keys;
921 
922 	return crypto_authenc_extractkeys(&authenc_keys, key, keylen) ?:
923 	       aead_hmac_init(cipher, &authenc_keys);
924 }
925 
otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)926 static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
927 					     const unsigned char *key,
928 					     unsigned int keylen)
929 {
930 	return otx2_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
931 }
932 
otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)933 static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
934 					const unsigned char *key,
935 					unsigned int keylen)
936 {
937 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
938 
939 	/*
940 	 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
941 	 * and salt (4 bytes)
942 	 */
943 	switch (keylen) {
944 	case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
945 		ctx->key_type = OTX2_CPT_AES_128_BIT;
946 		ctx->enc_key_len = AES_KEYSIZE_128;
947 		break;
948 	case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
949 		ctx->key_type = OTX2_CPT_AES_192_BIT;
950 		ctx->enc_key_len = AES_KEYSIZE_192;
951 		break;
952 	case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
953 		ctx->key_type = OTX2_CPT_AES_256_BIT;
954 		ctx->enc_key_len = AES_KEYSIZE_256;
955 		break;
956 	default:
957 		/* Invalid key and salt length */
958 		return -EINVAL;
959 	}
960 
961 	/* Store encryption key and salt */
962 	memcpy(ctx->key, key, keylen);
963 
964 	return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
965 }
966 
create_aead_ctx_hdr(struct aead_request * req,u32 enc,u32 * argcnt)967 static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
968 				      u32 *argcnt)
969 {
970 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
971 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
972 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
973 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
974 	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
975 	int mac_len = crypto_aead_authsize(tfm);
976 	int ds;
977 
978 	rctx->ctrl_word.e.enc_data_offset = req->assoclen;
979 
980 	switch (ctx->cipher_type) {
981 	case OTX2_CPT_AES_CBC:
982 		if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
983 			return -EINVAL;
984 
985 		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
986 		/* Copy encryption key to context */
987 		memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
988 		       ctx->enc_key_len);
989 		/* Copy IV to context */
990 		memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
991 
992 		ds = crypto_shash_digestsize(ctx->hashalg);
993 		if (ctx->mac_type == OTX2_CPT_SHA384)
994 			ds = SHA512_DIGEST_SIZE;
995 		if (ctx->ipad)
996 			memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
997 		if (ctx->opad)
998 			memcpy(fctx->hmac.e.opad, ctx->opad, ds);
999 		break;
1000 
1001 	case OTX2_CPT_AES_GCM:
1002 		if (crypto_ipsec_check_assoclen(req->assoclen))
1003 			return -EINVAL;
1004 
1005 		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
1006 		/* Copy encryption key to context */
1007 		memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
1008 		/* Copy salt to context */
1009 		memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
1010 		       AES_GCM_SALT_SIZE);
1011 
1012 		rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1013 		break;
1014 
1015 	default:
1016 		/* Unknown cipher type */
1017 		return -EINVAL;
1018 	}
1019 	cpu_to_be64s(&rctx->ctrl_word.flags);
1020 
1021 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1022 	req_info->ctrl.s.se_req = 1;
1023 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
1024 				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1025 	if (enc) {
1026 		req_info->req.opcode.s.minor = 2;
1027 		req_info->req.param1 = req->cryptlen;
1028 		req_info->req.param2 = req->cryptlen + req->assoclen;
1029 	} else {
1030 		req_info->req.opcode.s.minor = 3;
1031 		req_info->req.param1 = req->cryptlen - mac_len;
1032 		req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1033 	}
1034 
1035 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1036 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1037 	fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1038 	fctx->enc.enc_ctrl.e.mac_len = mac_len;
1039 	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
1040 
1041 	/*
1042 	 * Storing Packet Data Information in offset
1043 	 * Control Word First 8 bytes
1044 	 */
1045 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1046 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1047 	req_info->req.dlen += CONTROL_WORD_LEN;
1048 	++(*argcnt);
1049 
1050 	req_info->in[*argcnt].vptr = (u8 *)fctx;
1051 	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
1052 	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
1053 	++(*argcnt);
1054 
1055 	return 0;
1056 }
1057 
create_hmac_ctx_hdr(struct aead_request * req,u32 * argcnt,u32 enc)1058 static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1059 				      u32 enc)
1060 {
1061 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1062 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1063 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1064 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1065 
1066 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1067 	req_info->ctrl.s.se_req = 1;
1068 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
1069 				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1070 	req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1071 
1072 	req_info->req.opcode.s.minor = 0;
1073 	req_info->req.param1 = ctx->auth_key_len;
1074 	req_info->req.param2 = ctx->mac_type << 8;
1075 
1076 	/* Add authentication key */
1077 	req_info->in[*argcnt].vptr = ctx->key;
1078 	req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1079 	req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1080 	++(*argcnt);
1081 }
1082 
create_aead_input_list(struct aead_request * req,u32 enc)1083 static inline int create_aead_input_list(struct aead_request *req, u32 enc)
1084 {
1085 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1086 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1087 	u32 inputlen =  req->cryptlen + req->assoclen;
1088 	u32 status, argcnt = 0;
1089 
1090 	status = create_aead_ctx_hdr(req, enc, &argcnt);
1091 	if (status)
1092 		return status;
1093 	update_input_data(req_info, req->src, inputlen, &argcnt);
1094 	req_info->in_cnt = argcnt;
1095 
1096 	return 0;
1097 }
1098 
create_aead_output_list(struct aead_request * req,u32 enc,u32 mac_len)1099 static inline void create_aead_output_list(struct aead_request *req, u32 enc,
1100 					   u32 mac_len)
1101 {
1102 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1103 	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1104 	u32 argcnt = 0, outputlen = 0;
1105 
1106 	if (enc)
1107 		outputlen = req->cryptlen +  req->assoclen + mac_len;
1108 	else
1109 		outputlen = req->cryptlen + req->assoclen - mac_len;
1110 
1111 	update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1112 	req_info->out_cnt = argcnt;
1113 }
1114 
create_aead_null_input_list(struct aead_request * req,u32 enc,u32 mac_len)1115 static inline void create_aead_null_input_list(struct aead_request *req,
1116 					       u32 enc, u32 mac_len)
1117 {
1118 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1119 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1120 	u32 inputlen, argcnt = 0;
1121 
1122 	if (enc)
1123 		inputlen =  req->cryptlen + req->assoclen;
1124 	else
1125 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1126 
1127 	create_hmac_ctx_hdr(req, &argcnt, enc);
1128 	update_input_data(req_info, req->src, inputlen, &argcnt);
1129 	req_info->in_cnt = argcnt;
1130 }
1131 
create_aead_null_output_list(struct aead_request * req,u32 enc,u32 mac_len)1132 static inline int create_aead_null_output_list(struct aead_request *req,
1133 					       u32 enc, u32 mac_len)
1134 {
1135 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1136 	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1137 	struct scatterlist *dst;
1138 	u8 *ptr = NULL;
1139 	int argcnt = 0, status, offset;
1140 	u32 inputlen;
1141 
1142 	if (enc)
1143 		inputlen =  req->cryptlen + req->assoclen;
1144 	else
1145 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1146 
1147 	/*
1148 	 * If source and destination are different
1149 	 * then copy payload to destination
1150 	 */
1151 	if (req->src != req->dst) {
1152 
1153 		ptr = kmalloc(inputlen, (req_info->areq->flags &
1154 					 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1155 					 GFP_KERNEL : GFP_ATOMIC);
1156 		if (!ptr)
1157 			return -ENOMEM;
1158 
1159 		status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1160 					   inputlen);
1161 		if (status != inputlen) {
1162 			status = -EINVAL;
1163 			goto error_free;
1164 		}
1165 		status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1166 					     inputlen);
1167 		if (status != inputlen) {
1168 			status = -EINVAL;
1169 			goto error_free;
1170 		}
1171 		kfree(ptr);
1172 	}
1173 
1174 	if (enc) {
1175 		/*
1176 		 * In an encryption scenario hmac needs
1177 		 * to be appended after payload
1178 		 */
1179 		dst = req->dst;
1180 		offset = inputlen;
1181 		while (offset >= dst->length) {
1182 			offset -= dst->length;
1183 			dst = sg_next(dst);
1184 			if (!dst)
1185 				return -ENOENT;
1186 		}
1187 
1188 		update_output_data(req_info, dst, offset, mac_len, &argcnt);
1189 	} else {
1190 		/*
1191 		 * In a decryption scenario calculated hmac for received
1192 		 * payload needs to be compare with hmac received
1193 		 */
1194 		status = sg_copy_buffer(req->src, sg_nents(req->src),
1195 					rctx->fctx.hmac.s.hmac_recv, mac_len,
1196 					inputlen, true);
1197 		if (status != mac_len)
1198 			return -EINVAL;
1199 
1200 		req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1201 		req_info->out[argcnt].size = mac_len;
1202 		argcnt++;
1203 	}
1204 
1205 	req_info->out_cnt = argcnt;
1206 	return 0;
1207 
1208 error_free:
1209 	kfree(ptr);
1210 	return status;
1211 }
1212 
aead_do_fallback(struct aead_request * req,bool is_enc)1213 static int aead_do_fallback(struct aead_request *req, bool is_enc)
1214 {
1215 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1216 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1217 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(aead);
1218 	int ret;
1219 
1220 	if (ctx->fbk_cipher) {
1221 		/* Store the cipher tfm and then use the fallback tfm */
1222 		aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
1223 		aead_request_set_callback(&rctx->fbk_req, req->base.flags,
1224 					  req->base.complete, req->base.data);
1225 		aead_request_set_crypt(&rctx->fbk_req, req->src,
1226 				       req->dst, req->cryptlen, req->iv);
1227 		aead_request_set_ad(&rctx->fbk_req, req->assoclen);
1228 		ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
1229 			       crypto_aead_decrypt(&rctx->fbk_req);
1230 	} else {
1231 		ret = -EINVAL;
1232 	}
1233 
1234 	return ret;
1235 }
1236 
cpt_aead_enc_dec(struct aead_request * req,u8 reg_type,u8 enc)1237 static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1238 {
1239 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1240 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1241 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1242 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1243 	struct pci_dev *pdev;
1244 	int status, cpu_num;
1245 
1246 	/* Clear control words */
1247 	rctx->ctrl_word.flags = 0;
1248 	rctx->fctx.enc.enc_ctrl.u = 0;
1249 
1250 	req_info->callback = otx2_cpt_aead_callback;
1251 	req_info->areq = &req->base;
1252 	req_info->req_type = reg_type;
1253 	req_info->is_enc = enc;
1254 	req_info->is_trunc_hmac = false;
1255 
1256 	req_info->req.cptr = ctx->er_ctx.hw_ctx;
1257 	req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
1258 
1259 	switch (reg_type) {
1260 	case OTX2_CPT_AEAD_ENC_DEC_REQ:
1261 		status = create_aead_input_list(req, enc);
1262 		if (status)
1263 			return status;
1264 		create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
1265 		break;
1266 
1267 	case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
1268 		create_aead_null_input_list(req, enc,
1269 					    crypto_aead_authsize(tfm));
1270 		status = create_aead_null_output_list(req, enc,
1271 						crypto_aead_authsize(tfm));
1272 		if (status)
1273 			return status;
1274 		break;
1275 
1276 	default:
1277 		return -EINVAL;
1278 	}
1279 	if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
1280 		return -EINVAL;
1281 
1282 	if (!req_info->req.param2 ||
1283 	    (req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
1284 	    (req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
1285 		return aead_do_fallback(req, enc);
1286 
1287 	status = get_se_device(&pdev, &cpu_num);
1288 	if (status)
1289 		return status;
1290 
1291 	req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
1292 
1293 	/*
1294 	 * We perform an asynchronous send and once
1295 	 * the request is completed the driver would
1296 	 * intimate through registered call back functions
1297 	 */
1298 	return otx2_cpt_do_request(pdev, req_info, cpu_num);
1299 }
1300 
otx2_cpt_aead_encrypt(struct aead_request * req)1301 static int otx2_cpt_aead_encrypt(struct aead_request *req)
1302 {
1303 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
1304 }
1305 
otx2_cpt_aead_decrypt(struct aead_request * req)1306 static int otx2_cpt_aead_decrypt(struct aead_request *req)
1307 {
1308 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
1309 }
1310 
otx2_cpt_aead_null_encrypt(struct aead_request * req)1311 static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
1312 {
1313 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1314 }
1315 
otx2_cpt_aead_null_decrypt(struct aead_request * req)1316 static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
1317 {
1318 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1319 }
1320 
1321 static struct skcipher_alg otx2_cpt_skciphers[] = { {
1322 	.base.cra_name = "xts(aes)",
1323 	.base.cra_driver_name = "cpt_xts_aes",
1324 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1325 	.base.cra_blocksize = AES_BLOCK_SIZE,
1326 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1327 	.base.cra_alignmask = 7,
1328 	.base.cra_priority = 4001,
1329 	.base.cra_module = THIS_MODULE,
1330 
1331 	.init = otx2_cpt_enc_dec_init,
1332 	.exit = otx2_cpt_skcipher_exit,
1333 	.ivsize = AES_BLOCK_SIZE,
1334 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1335 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1336 	.setkey = otx2_cpt_skcipher_xts_setkey,
1337 	.encrypt = otx2_cpt_skcipher_encrypt,
1338 	.decrypt = otx2_cpt_skcipher_decrypt,
1339 }, {
1340 	.base.cra_name = "cbc(aes)",
1341 	.base.cra_driver_name = "cpt_cbc_aes",
1342 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1343 	.base.cra_blocksize = AES_BLOCK_SIZE,
1344 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1345 	.base.cra_alignmask = 7,
1346 	.base.cra_priority = 4001,
1347 	.base.cra_module = THIS_MODULE,
1348 
1349 	.init = otx2_cpt_enc_dec_init,
1350 	.exit = otx2_cpt_skcipher_exit,
1351 	.ivsize = AES_BLOCK_SIZE,
1352 	.min_keysize = AES_MIN_KEY_SIZE,
1353 	.max_keysize = AES_MAX_KEY_SIZE,
1354 	.setkey = otx2_cpt_skcipher_cbc_aes_setkey,
1355 	.encrypt = otx2_cpt_skcipher_encrypt,
1356 	.decrypt = otx2_cpt_skcipher_decrypt,
1357 }, {
1358 	.base.cra_name = "ecb(aes)",
1359 	.base.cra_driver_name = "cpt_ecb_aes",
1360 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1361 	.base.cra_blocksize = AES_BLOCK_SIZE,
1362 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1363 	.base.cra_alignmask = 7,
1364 	.base.cra_priority = 4001,
1365 	.base.cra_module = THIS_MODULE,
1366 
1367 	.init = otx2_cpt_enc_dec_init,
1368 	.exit = otx2_cpt_skcipher_exit,
1369 	.ivsize = 0,
1370 	.min_keysize = AES_MIN_KEY_SIZE,
1371 	.max_keysize = AES_MAX_KEY_SIZE,
1372 	.setkey = otx2_cpt_skcipher_ecb_aes_setkey,
1373 	.encrypt = otx2_cpt_skcipher_encrypt,
1374 	.decrypt = otx2_cpt_skcipher_decrypt,
1375 }, {
1376 	.base.cra_name = "cbc(des3_ede)",
1377 	.base.cra_driver_name = "cpt_cbc_des3_ede",
1378 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1379 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1380 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1381 	.base.cra_alignmask = 7,
1382 	.base.cra_priority = 4001,
1383 	.base.cra_module = THIS_MODULE,
1384 
1385 	.init = otx2_cpt_enc_dec_init,
1386 	.exit = otx2_cpt_skcipher_exit,
1387 	.min_keysize = DES3_EDE_KEY_SIZE,
1388 	.max_keysize = DES3_EDE_KEY_SIZE,
1389 	.ivsize = DES_BLOCK_SIZE,
1390 	.setkey = otx2_cpt_skcipher_cbc_des3_setkey,
1391 	.encrypt = otx2_cpt_skcipher_encrypt,
1392 	.decrypt = otx2_cpt_skcipher_decrypt,
1393 }, {
1394 	.base.cra_name = "ecb(des3_ede)",
1395 	.base.cra_driver_name = "cpt_ecb_des3_ede",
1396 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1397 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1398 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1399 	.base.cra_alignmask = 7,
1400 	.base.cra_priority = 4001,
1401 	.base.cra_module = THIS_MODULE,
1402 
1403 	.init = otx2_cpt_enc_dec_init,
1404 	.exit = otx2_cpt_skcipher_exit,
1405 	.min_keysize = DES3_EDE_KEY_SIZE,
1406 	.max_keysize = DES3_EDE_KEY_SIZE,
1407 	.ivsize = 0,
1408 	.setkey = otx2_cpt_skcipher_ecb_des3_setkey,
1409 	.encrypt = otx2_cpt_skcipher_encrypt,
1410 	.decrypt = otx2_cpt_skcipher_decrypt,
1411 } };
1412 
1413 static struct aead_alg otx2_cpt_aeads[] = { {
1414 	.base = {
1415 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1416 		.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1417 		.cra_blocksize = AES_BLOCK_SIZE,
1418 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1419 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1420 		.cra_priority = 4001,
1421 		.cra_alignmask = 0,
1422 		.cra_module = THIS_MODULE,
1423 	},
1424 	.init = otx2_cpt_aead_cbc_aes_sha1_init,
1425 	.exit = otx2_cpt_aead_exit,
1426 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1427 	.setauthsize = otx2_cpt_aead_set_authsize,
1428 	.encrypt = otx2_cpt_aead_encrypt,
1429 	.decrypt = otx2_cpt_aead_decrypt,
1430 	.ivsize = AES_BLOCK_SIZE,
1431 	.maxauthsize = SHA1_DIGEST_SIZE,
1432 }, {
1433 	.base = {
1434 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1435 		.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1436 		.cra_blocksize = AES_BLOCK_SIZE,
1437 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1438 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1439 		.cra_priority = 4001,
1440 		.cra_alignmask = 0,
1441 		.cra_module = THIS_MODULE,
1442 	},
1443 	.init = otx2_cpt_aead_cbc_aes_sha256_init,
1444 	.exit = otx2_cpt_aead_exit,
1445 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1446 	.setauthsize = otx2_cpt_aead_set_authsize,
1447 	.encrypt = otx2_cpt_aead_encrypt,
1448 	.decrypt = otx2_cpt_aead_decrypt,
1449 	.ivsize = AES_BLOCK_SIZE,
1450 	.maxauthsize = SHA256_DIGEST_SIZE,
1451 }, {
1452 	.base = {
1453 		.cra_name = "authenc(hmac(sha384),cbc(aes))",
1454 		.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1455 		.cra_blocksize = AES_BLOCK_SIZE,
1456 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1457 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1458 		.cra_priority = 4001,
1459 		.cra_alignmask = 0,
1460 		.cra_module = THIS_MODULE,
1461 	},
1462 	.init = otx2_cpt_aead_cbc_aes_sha384_init,
1463 	.exit = otx2_cpt_aead_exit,
1464 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1465 	.setauthsize = otx2_cpt_aead_set_authsize,
1466 	.encrypt = otx2_cpt_aead_encrypt,
1467 	.decrypt = otx2_cpt_aead_decrypt,
1468 	.ivsize = AES_BLOCK_SIZE,
1469 	.maxauthsize = SHA384_DIGEST_SIZE,
1470 }, {
1471 	.base = {
1472 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1473 		.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1474 		.cra_blocksize = AES_BLOCK_SIZE,
1475 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1476 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1477 		.cra_priority = 4001,
1478 		.cra_alignmask = 0,
1479 		.cra_module = THIS_MODULE,
1480 	},
1481 	.init = otx2_cpt_aead_cbc_aes_sha512_init,
1482 	.exit = otx2_cpt_aead_exit,
1483 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1484 	.setauthsize = otx2_cpt_aead_set_authsize,
1485 	.encrypt = otx2_cpt_aead_encrypt,
1486 	.decrypt = otx2_cpt_aead_decrypt,
1487 	.ivsize = AES_BLOCK_SIZE,
1488 	.maxauthsize = SHA512_DIGEST_SIZE,
1489 }, {
1490 	.base = {
1491 		.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1492 		.cra_driver_name = "cpt_hmac_sha1_ecb_null",
1493 		.cra_blocksize = 1,
1494 		.cra_flags = CRYPTO_ALG_ASYNC,
1495 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1496 		.cra_priority = 4001,
1497 		.cra_alignmask = 0,
1498 		.cra_module = THIS_MODULE,
1499 	},
1500 	.init = otx2_cpt_aead_ecb_null_sha1_init,
1501 	.exit = otx2_cpt_aead_exit,
1502 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1503 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1504 	.encrypt = otx2_cpt_aead_null_encrypt,
1505 	.decrypt = otx2_cpt_aead_null_decrypt,
1506 	.ivsize = 0,
1507 	.maxauthsize = SHA1_DIGEST_SIZE,
1508 }, {
1509 	.base = {
1510 		.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1511 		.cra_driver_name = "cpt_hmac_sha256_ecb_null",
1512 		.cra_blocksize = 1,
1513 		.cra_flags = CRYPTO_ALG_ASYNC,
1514 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1515 		.cra_priority = 4001,
1516 		.cra_alignmask = 0,
1517 		.cra_module = THIS_MODULE,
1518 	},
1519 	.init = otx2_cpt_aead_ecb_null_sha256_init,
1520 	.exit = otx2_cpt_aead_exit,
1521 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1522 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1523 	.encrypt = otx2_cpt_aead_null_encrypt,
1524 	.decrypt = otx2_cpt_aead_null_decrypt,
1525 	.ivsize = 0,
1526 	.maxauthsize = SHA256_DIGEST_SIZE,
1527 }, {
1528 	.base = {
1529 		.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1530 		.cra_driver_name = "cpt_hmac_sha384_ecb_null",
1531 		.cra_blocksize = 1,
1532 		.cra_flags = CRYPTO_ALG_ASYNC,
1533 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1534 		.cra_priority = 4001,
1535 		.cra_alignmask = 0,
1536 		.cra_module = THIS_MODULE,
1537 	},
1538 	.init = otx2_cpt_aead_ecb_null_sha384_init,
1539 	.exit = otx2_cpt_aead_exit,
1540 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1541 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1542 	.encrypt = otx2_cpt_aead_null_encrypt,
1543 	.decrypt = otx2_cpt_aead_null_decrypt,
1544 	.ivsize = 0,
1545 	.maxauthsize = SHA384_DIGEST_SIZE,
1546 }, {
1547 	.base = {
1548 		.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1549 		.cra_driver_name = "cpt_hmac_sha512_ecb_null",
1550 		.cra_blocksize = 1,
1551 		.cra_flags = CRYPTO_ALG_ASYNC,
1552 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1553 		.cra_priority = 4001,
1554 		.cra_alignmask = 0,
1555 		.cra_module = THIS_MODULE,
1556 	},
1557 	.init = otx2_cpt_aead_ecb_null_sha512_init,
1558 	.exit = otx2_cpt_aead_exit,
1559 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1560 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1561 	.encrypt = otx2_cpt_aead_null_encrypt,
1562 	.decrypt = otx2_cpt_aead_null_decrypt,
1563 	.ivsize = 0,
1564 	.maxauthsize = SHA512_DIGEST_SIZE,
1565 }, {
1566 	.base = {
1567 		.cra_name = "rfc4106(gcm(aes))",
1568 		.cra_driver_name = "cpt_rfc4106_gcm_aes",
1569 		.cra_blocksize = 1,
1570 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1571 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1572 		.cra_priority = 4001,
1573 		.cra_alignmask = 0,
1574 		.cra_module = THIS_MODULE,
1575 	},
1576 	.init = otx2_cpt_aead_gcm_aes_init,
1577 	.exit = otx2_cpt_aead_exit,
1578 	.setkey = otx2_cpt_aead_gcm_aes_setkey,
1579 	.setauthsize = otx2_cpt_aead_gcm_set_authsize,
1580 	.encrypt = otx2_cpt_aead_encrypt,
1581 	.decrypt = otx2_cpt_aead_decrypt,
1582 	.ivsize = AES_GCM_IV_SIZE,
1583 	.maxauthsize = AES_GCM_ICV_SIZE,
1584 } };
1585 
cpt_register_algs(void)1586 static inline int cpt_register_algs(void)
1587 {
1588 	int i, err = 0;
1589 
1590 	for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
1591 		otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1592 
1593 	err = crypto_register_skciphers(otx2_cpt_skciphers,
1594 					ARRAY_SIZE(otx2_cpt_skciphers));
1595 	if (err)
1596 		return err;
1597 
1598 	for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
1599 		otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1600 
1601 	err = crypto_register_aeads(otx2_cpt_aeads,
1602 				    ARRAY_SIZE(otx2_cpt_aeads));
1603 	if (err) {
1604 		crypto_unregister_skciphers(otx2_cpt_skciphers,
1605 					    ARRAY_SIZE(otx2_cpt_skciphers));
1606 		return err;
1607 	}
1608 
1609 	return 0;
1610 }
1611 
cpt_unregister_algs(void)1612 static inline void cpt_unregister_algs(void)
1613 {
1614 	crypto_unregister_skciphers(otx2_cpt_skciphers,
1615 				    ARRAY_SIZE(otx2_cpt_skciphers));
1616 	crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
1617 }
1618 
compare_func(const void * lptr,const void * rptr)1619 static int compare_func(const void *lptr, const void *rptr)
1620 {
1621 	const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1622 	const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1623 
1624 	if (ldesc->dev->devfn < rdesc->dev->devfn)
1625 		return -1;
1626 	if (ldesc->dev->devfn > rdesc->dev->devfn)
1627 		return 1;
1628 	return 0;
1629 }
1630 
otx2_cpt_crypto_init(struct pci_dev * pdev,struct module * mod,int num_queues,int num_devices)1631 int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1632 			 int num_queues, int num_devices)
1633 {
1634 	int ret = 0;
1635 	int count;
1636 
1637 	mutex_lock(&mutex);
1638 	count = atomic_read(&se_devices.count);
1639 	if (count >= OTX2_CPT_MAX_LFS_NUM) {
1640 		dev_err(&pdev->dev, "No space to add a new device\n");
1641 		ret = -ENOSPC;
1642 		goto unlock;
1643 	}
1644 	se_devices.desc[count].num_queues = num_queues;
1645 	se_devices.desc[count++].dev = pdev;
1646 	atomic_inc(&se_devices.count);
1647 
1648 	if (atomic_read(&se_devices.count) == num_devices &&
1649 	    is_crypto_registered == false) {
1650 		if (cpt_register_algs()) {
1651 			dev_err(&pdev->dev,
1652 				"Error in registering crypto algorithms\n");
1653 			ret =  -EINVAL;
1654 			goto unlock;
1655 		}
1656 		try_module_get(mod);
1657 		is_crypto_registered = true;
1658 	}
1659 	sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1660 	     compare_func, NULL);
1661 
1662 unlock:
1663 	mutex_unlock(&mutex);
1664 	return ret;
1665 }
1666 
otx2_cpt_crypto_exit(struct pci_dev * pdev,struct module * mod)1667 void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
1668 {
1669 	struct cpt_device_table *dev_tbl;
1670 	bool dev_found = false;
1671 	int i, j, count;
1672 
1673 	mutex_lock(&mutex);
1674 
1675 	dev_tbl = &se_devices;
1676 	count = atomic_read(&dev_tbl->count);
1677 	for (i = 0; i < count; i++) {
1678 		if (pdev == dev_tbl->desc[i].dev) {
1679 			for (j = i; j < count-1; j++)
1680 				dev_tbl->desc[j] = dev_tbl->desc[j+1];
1681 			dev_found = true;
1682 			break;
1683 		}
1684 	}
1685 
1686 	if (!dev_found) {
1687 		dev_err(&pdev->dev, "%s device not found\n", __func__);
1688 		goto unlock;
1689 	}
1690 	if (atomic_dec_and_test(&se_devices.count)) {
1691 		cpt_unregister_algs();
1692 		module_put(mod);
1693 		is_crypto_registered = false;
1694 	}
1695 
1696 unlock:
1697 	mutex_unlock(&mutex);
1698 }
1699