xref: /linux/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/ctype.h>
5 #include <linux/firmware.h>
6 #include <linux/string_choices.h>
7 #include "otx2_cptpf_ucode.h"
8 #include "otx2_cpt_common.h"
9 #include "otx2_cptpf.h"
10 #include "otx2_cptlf.h"
11 #include "otx2_cpt_reqmgr.h"
12 #include "rvu_reg.h"
13 
14 #define CSR_DELAY 30
15 
16 #define LOADFVC_RLEN 8
17 #define LOADFVC_MAJOR_OP 0x01
18 #define LOADFVC_MINOR_OP 0x08
19 
20 /*
21  * Interval to flush dirty data for next CTX entry. The interval is measured
22  * in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns).
23  */
24 #define CTX_FLUSH_TIMER_CNT 0x2FAF0
25 
26 struct fw_info_t {
27 	struct list_head ucodes;
28 };
29 
get_cores_bmap(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)30 static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
31 					struct otx2_cpt_eng_grp_info *eng_grp)
32 {
33 	struct otx2_cpt_bitmap bmap = { {0} };
34 	bool found = false;
35 	int i;
36 
37 	if (eng_grp->g->engs_num < 0 ||
38 	    eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
39 		dev_err(dev, "unsupported number of engines %d on octeontx2\n",
40 			eng_grp->g->engs_num);
41 		return bmap;
42 	}
43 
44 	for (i = 0; i  < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
45 		if (eng_grp->engs[i].type) {
46 			bitmap_or(bmap.bits, bmap.bits,
47 				  eng_grp->engs[i].bmap,
48 				  eng_grp->g->engs_num);
49 			bmap.size = eng_grp->g->engs_num;
50 			found = true;
51 		}
52 	}
53 
54 	if (!found)
55 		dev_err(dev, "No engines reserved for engine group %d\n",
56 			eng_grp->idx);
57 	return bmap;
58 }
59 
is_eng_type(int val,int eng_type)60 static int is_eng_type(int val, int eng_type)
61 {
62 	return val & (1 << eng_type);
63 }
64 
is_2nd_ucode_used(struct otx2_cpt_eng_grp_info * eng_grp)65 static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
66 {
67 	if (eng_grp->ucode[1].type)
68 		return true;
69 	else
70 		return false;
71 }
72 
set_ucode_filename(struct otx2_cpt_ucode * ucode,const char * filename)73 static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
74 			       const char *filename)
75 {
76 	strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
77 }
78 
get_eng_type_str(int eng_type)79 static char *get_eng_type_str(int eng_type)
80 {
81 	char *str = "unknown";
82 
83 	switch (eng_type) {
84 	case OTX2_CPT_SE_TYPES:
85 		str = "SE";
86 		break;
87 
88 	case OTX2_CPT_IE_TYPES:
89 		str = "IE";
90 		break;
91 
92 	case OTX2_CPT_AE_TYPES:
93 		str = "AE";
94 		break;
95 	}
96 	return str;
97 }
98 
get_ucode_type_str(int ucode_type)99 static char *get_ucode_type_str(int ucode_type)
100 {
101 	char *str = "unknown";
102 
103 	switch (ucode_type) {
104 	case (1 << OTX2_CPT_SE_TYPES):
105 		str = "SE";
106 		break;
107 
108 	case (1 << OTX2_CPT_IE_TYPES):
109 		str = "IE";
110 		break;
111 
112 	case (1 << OTX2_CPT_AE_TYPES):
113 		str = "AE";
114 		break;
115 
116 	case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
117 		str = "SE+IPSEC";
118 		break;
119 	}
120 	return str;
121 }
122 
get_ucode_type(struct device * dev,struct otx2_cpt_ucode_hdr * ucode_hdr,int * ucode_type,u16 rid)123 static int get_ucode_type(struct device *dev,
124 			  struct otx2_cpt_ucode_hdr *ucode_hdr,
125 			  int *ucode_type, u16 rid)
126 {
127 	char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
128 	char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
129 	int i, val = 0;
130 	u8 nn;
131 
132 	strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
133 	for (i = 0; i < strlen(tmp_ver_str); i++)
134 		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
135 
136 	sprintf(ver_str_prefix, "ocpt-%02d", rid);
137 	if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
138 		return -EINVAL;
139 
140 	nn = ucode_hdr->ver_num.nn;
141 	if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
142 	    (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
143 	     nn == OTX2_CPT_SE_UC_TYPE3))
144 		val |= 1 << OTX2_CPT_SE_TYPES;
145 	if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
146 	    (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
147 	     nn == OTX2_CPT_IE_UC_TYPE3))
148 		val |= 1 << OTX2_CPT_IE_TYPES;
149 	if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
150 	    nn == OTX2_CPT_AE_UC_TYPE)
151 		val |= 1 << OTX2_CPT_AE_TYPES;
152 
153 	*ucode_type = val;
154 
155 	if (!val)
156 		return -EINVAL;
157 
158 	return 0;
159 }
160 
__write_ucode_base(struct otx2_cptpf_dev * cptpf,int eng,dma_addr_t dma_addr,int blkaddr)161 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
162 			      dma_addr_t dma_addr, int blkaddr)
163 {
164 	return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
165 				     CPT_AF_EXEX_UCODE_BASE(eng),
166 				     (u64)dma_addr, blkaddr);
167 }
168 
cptx_set_ucode_base(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,int blkaddr)169 static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
170 			       struct otx2_cptpf_dev *cptpf, int blkaddr)
171 {
172 	struct otx2_cpt_engs_rsvd *engs;
173 	dma_addr_t dma_addr;
174 	int i, bit, ret;
175 
176 	/* Set PF number for microcode fetches */
177 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
178 				    CPT_AF_PF_FUNC,
179 				    rvu_make_pcifunc(cptpf->pdev,
180 						     cptpf->pf_id, 0),
181 				    blkaddr);
182 	if (ret)
183 		return ret;
184 
185 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
186 		engs = &eng_grp->engs[i];
187 		if (!engs->type)
188 			continue;
189 
190 		dma_addr = engs->ucode->dma;
191 
192 		/*
193 		 * Set UCODE_BASE only for the cores which are not used,
194 		 * other cores should have already valid UCODE_BASE set
195 		 */
196 		for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
197 			if (!eng_grp->g->eng_ref_cnt[bit]) {
198 				ret = __write_ucode_base(cptpf, bit, dma_addr,
199 							 blkaddr);
200 				if (ret)
201 					return ret;
202 			}
203 	}
204 	return 0;
205 }
206 
cpt_set_ucode_base(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)207 static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
208 {
209 	struct otx2_cptpf_dev *cptpf = obj;
210 	int ret;
211 
212 	if (cptpf->has_cpt1) {
213 		ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
214 		if (ret)
215 			return ret;
216 	}
217 	return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
218 }
219 
cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,struct otx2_cpt_bitmap bmap,int blkaddr)220 static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
221 					 struct otx2_cptpf_dev *cptpf,
222 					 struct otx2_cpt_bitmap bmap,
223 					 int blkaddr)
224 {
225 	int i, timeout = 10;
226 	int busy, ret;
227 	u64 reg = 0;
228 
229 	/* Detach the cores from group */
230 	for_each_set_bit(i, bmap.bits, bmap.size) {
231 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
232 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
233 		if (ret)
234 			return ret;
235 
236 		if (reg & (1ull << eng_grp->idx)) {
237 			eng_grp->g->eng_ref_cnt[i]--;
238 			reg &= ~(1ull << eng_grp->idx);
239 
240 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
241 						    cptpf->pdev,
242 						    CPT_AF_EXEX_CTL2(i), reg,
243 						    blkaddr);
244 			if (ret)
245 				return ret;
246 		}
247 	}
248 
249 	/* Wait for cores to become idle */
250 	do {
251 		busy = 0;
252 		usleep_range(10000, 20000);
253 		if (timeout-- < 0)
254 			return -EBUSY;
255 
256 		for_each_set_bit(i, bmap.bits, bmap.size) {
257 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
258 						   cptpf->pdev,
259 						   CPT_AF_EXEX_STS(i), &reg,
260 						   blkaddr);
261 			if (ret)
262 				return ret;
263 
264 			if (reg & 0x1) {
265 				busy = 1;
266 				break;
267 			}
268 		}
269 	} while (busy);
270 
271 	/* Disable the cores only if they are not used anymore */
272 	for_each_set_bit(i, bmap.bits, bmap.size) {
273 		if (!eng_grp->g->eng_ref_cnt[i]) {
274 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
275 						    cptpf->pdev,
276 						    CPT_AF_EXEX_CTL(i), 0x0,
277 						    blkaddr);
278 			if (ret)
279 				return ret;
280 		}
281 	}
282 
283 	return 0;
284 }
285 
cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)286 static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
287 					void *obj)
288 {
289 	struct otx2_cptpf_dev *cptpf = obj;
290 	struct otx2_cpt_bitmap bmap;
291 	int ret;
292 
293 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
294 	if (!bmap.size)
295 		return -EINVAL;
296 
297 	if (cptpf->has_cpt1) {
298 		ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
299 						    BLKADDR_CPT1);
300 		if (ret)
301 			return ret;
302 	}
303 	return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
304 					     BLKADDR_CPT0);
305 }
306 
cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,struct otx2_cpt_bitmap bmap,int blkaddr)307 static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
308 					struct otx2_cptpf_dev *cptpf,
309 					struct otx2_cpt_bitmap bmap,
310 					int blkaddr)
311 {
312 	u64 reg = 0;
313 	int i, ret;
314 
315 	/* Attach the cores to the group */
316 	for_each_set_bit(i, bmap.bits, bmap.size) {
317 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
318 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
319 		if (ret)
320 			return ret;
321 
322 		if (!(reg & (1ull << eng_grp->idx))) {
323 			eng_grp->g->eng_ref_cnt[i]++;
324 			reg |= 1ull << eng_grp->idx;
325 
326 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
327 						    cptpf->pdev,
328 						    CPT_AF_EXEX_CTL2(i), reg,
329 						    blkaddr);
330 			if (ret)
331 				return ret;
332 		}
333 	}
334 
335 	/* Enable the cores */
336 	for_each_set_bit(i, bmap.bits, bmap.size) {
337 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
338 						CPT_AF_EXEX_CTL(i), 0x1,
339 						blkaddr);
340 		if (ret)
341 			return ret;
342 	}
343 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
344 }
345 
cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)346 static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
347 				       void *obj)
348 {
349 	struct otx2_cptpf_dev *cptpf = obj;
350 	struct otx2_cpt_bitmap bmap;
351 	int ret;
352 
353 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
354 	if (!bmap.size)
355 		return -EINVAL;
356 
357 	if (cptpf->has_cpt1) {
358 		ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
359 						   BLKADDR_CPT1);
360 		if (ret)
361 			return ret;
362 	}
363 	return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
364 }
365 
load_fw(struct device * dev,struct fw_info_t * fw_info,char * filename,u16 rid)366 static int load_fw(struct device *dev, struct fw_info_t *fw_info,
367 		   char *filename, u16 rid)
368 {
369 	struct otx2_cpt_ucode_hdr *ucode_hdr;
370 	struct otx2_cpt_uc_info_t *uc_info;
371 	int ucode_type, ucode_size;
372 	int ret;
373 
374 	uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
375 	if (!uc_info)
376 		return -ENOMEM;
377 
378 	ret = request_firmware(&uc_info->fw, filename, dev);
379 	if (ret)
380 		goto free_uc_info;
381 
382 	ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
383 	ret = get_ucode_type(dev, ucode_hdr, &ucode_type, rid);
384 	if (ret)
385 		goto release_fw;
386 
387 	ucode_size = ntohl(ucode_hdr->code_length) * 2;
388 	if (!ucode_size) {
389 		dev_err(dev, "Ucode %s invalid size\n", filename);
390 		ret = -EINVAL;
391 		goto release_fw;
392 	}
393 
394 	set_ucode_filename(&uc_info->ucode, filename);
395 	memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
396 	       OTX2_CPT_UCODE_VER_STR_SZ);
397 	uc_info->ucode.ver_str[OTX2_CPT_UCODE_VER_STR_SZ] = 0;
398 	uc_info->ucode.ver_num = ucode_hdr->ver_num;
399 	uc_info->ucode.type = ucode_type;
400 	uc_info->ucode.size = ucode_size;
401 	list_add_tail(&uc_info->list, &fw_info->ucodes);
402 
403 	return 0;
404 
405 release_fw:
406 	release_firmware(uc_info->fw);
407 free_uc_info:
408 	kfree(uc_info);
409 	return ret;
410 }
411 
cpt_ucode_release_fw(struct fw_info_t * fw_info)412 static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
413 {
414 	struct otx2_cpt_uc_info_t *curr, *temp;
415 
416 	if (!fw_info)
417 		return;
418 
419 	list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
420 		list_del(&curr->list);
421 		release_firmware(curr->fw);
422 		kfree(curr);
423 	}
424 }
425 
get_ucode(struct fw_info_t * fw_info,int ucode_type)426 static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
427 					    int ucode_type)
428 {
429 	struct otx2_cpt_uc_info_t *curr;
430 
431 	list_for_each_entry(curr, &fw_info->ucodes, list) {
432 		if (!is_eng_type(curr->ucode.type, ucode_type))
433 			continue;
434 
435 		return curr;
436 	}
437 	return NULL;
438 }
439 
print_uc_info(struct fw_info_t * fw_info)440 static void print_uc_info(struct fw_info_t *fw_info)
441 {
442 	struct otx2_cpt_uc_info_t *curr;
443 
444 	list_for_each_entry(curr, &fw_info->ucodes, list) {
445 		pr_debug("Ucode filename %s\n", curr->ucode.filename);
446 		pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
447 		pr_debug("Ucode version %d.%d.%d.%d\n",
448 			 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
449 			 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
450 		pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
451 			 get_ucode_type_str(curr->ucode.type));
452 		pr_debug("Ucode size %d\n", curr->ucode.size);
453 		pr_debug("Ucode ptr %p\n", curr->fw->data);
454 	}
455 }
456 
cpt_ucode_load_fw(struct pci_dev * pdev,struct fw_info_t * fw_info,u16 rid)457 static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,
458 			     u16 rid)
459 {
460 	char filename[OTX2_CPT_NAME_LENGTH];
461 	char eng_type[8] = {0};
462 	int ret, e, i;
463 
464 	INIT_LIST_HEAD(&fw_info->ucodes);
465 
466 	for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
467 		strcpy(eng_type, get_eng_type_str(e));
468 		for (i = 0; i < strlen(eng_type); i++)
469 			eng_type[i] = tolower(eng_type[i]);
470 
471 		snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
472 			 rid, eng_type);
473 		/* Request firmware for each engine type */
474 		ret = load_fw(&pdev->dev, fw_info, filename, rid);
475 		if (ret)
476 			goto release_fw;
477 	}
478 	print_uc_info(fw_info);
479 	return 0;
480 
481 release_fw:
482 	cpt_ucode_release_fw(fw_info);
483 	return ret;
484 }
485 
find_engines_by_type(struct otx2_cpt_eng_grp_info * eng_grp,int eng_type)486 struct otx2_cpt_engs_rsvd *find_engines_by_type(
487 					struct otx2_cpt_eng_grp_info *eng_grp,
488 					int eng_type)
489 {
490 	int i;
491 
492 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
493 		if (!eng_grp->engs[i].type)
494 			continue;
495 
496 		if (eng_grp->engs[i].type == eng_type)
497 			return &eng_grp->engs[i];
498 	}
499 	return NULL;
500 }
501 
eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info * eng_grp,int eng_type)502 static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
503 				int eng_type)
504 {
505 	struct otx2_cpt_engs_rsvd *engs;
506 
507 	engs = find_engines_by_type(eng_grp, eng_type);
508 
509 	return (engs != NULL ? 1 : 0);
510 }
511 
update_engines_avail_count(struct device * dev,struct otx2_cpt_engs_available * avail,struct otx2_cpt_engs_rsvd * engs,int val)512 static int update_engines_avail_count(struct device *dev,
513 				      struct otx2_cpt_engs_available *avail,
514 				      struct otx2_cpt_engs_rsvd *engs, int val)
515 {
516 	switch (engs->type) {
517 	case OTX2_CPT_SE_TYPES:
518 		avail->se_cnt += val;
519 		break;
520 
521 	case OTX2_CPT_IE_TYPES:
522 		avail->ie_cnt += val;
523 		break;
524 
525 	case OTX2_CPT_AE_TYPES:
526 		avail->ae_cnt += val;
527 		break;
528 
529 	default:
530 		dev_err(dev, "Invalid engine type %d\n", engs->type);
531 		return -EINVAL;
532 	}
533 	return 0;
534 }
535 
update_engines_offset(struct device * dev,struct otx2_cpt_engs_available * avail,struct otx2_cpt_engs_rsvd * engs)536 static int update_engines_offset(struct device *dev,
537 				 struct otx2_cpt_engs_available *avail,
538 				 struct otx2_cpt_engs_rsvd *engs)
539 {
540 	switch (engs->type) {
541 	case OTX2_CPT_SE_TYPES:
542 		engs->offset = 0;
543 		break;
544 
545 	case OTX2_CPT_IE_TYPES:
546 		engs->offset = avail->max_se_cnt;
547 		break;
548 
549 	case OTX2_CPT_AE_TYPES:
550 		engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
551 		break;
552 
553 	default:
554 		dev_err(dev, "Invalid engine type %d\n", engs->type);
555 		return -EINVAL;
556 	}
557 	return 0;
558 }
559 
release_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp)560 static int release_engines(struct device *dev,
561 			   struct otx2_cpt_eng_grp_info *grp)
562 {
563 	int i, ret = 0;
564 
565 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
566 		if (!grp->engs[i].type)
567 			continue;
568 
569 		if (grp->engs[i].count > 0) {
570 			ret = update_engines_avail_count(dev, &grp->g->avail,
571 							 &grp->engs[i],
572 							 grp->engs[i].count);
573 			if (ret)
574 				return ret;
575 		}
576 
577 		grp->engs[i].type = 0;
578 		grp->engs[i].count = 0;
579 		grp->engs[i].offset = 0;
580 		grp->engs[i].ucode = NULL;
581 		bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
582 	}
583 	return 0;
584 }
585 
do_reserve_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_engs)586 static int do_reserve_engines(struct device *dev,
587 			      struct otx2_cpt_eng_grp_info *grp,
588 			      struct otx2_cpt_engines *req_engs)
589 {
590 	struct otx2_cpt_engs_rsvd *engs = NULL;
591 	int i, ret;
592 
593 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
594 		if (!grp->engs[i].type) {
595 			engs = &grp->engs[i];
596 			break;
597 		}
598 	}
599 
600 	if (!engs)
601 		return -ENOMEM;
602 
603 	engs->type = req_engs->type;
604 	engs->count = req_engs->count;
605 
606 	ret = update_engines_offset(dev, &grp->g->avail, engs);
607 	if (ret)
608 		return ret;
609 
610 	if (engs->count > 0) {
611 		ret = update_engines_avail_count(dev, &grp->g->avail, engs,
612 						 -engs->count);
613 		if (ret)
614 			return ret;
615 	}
616 
617 	return 0;
618 }
619 
check_engines_availability(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_eng)620 static int check_engines_availability(struct device *dev,
621 				      struct otx2_cpt_eng_grp_info *grp,
622 				      struct otx2_cpt_engines *req_eng)
623 {
624 	int avail_cnt = 0;
625 
626 	switch (req_eng->type) {
627 	case OTX2_CPT_SE_TYPES:
628 		avail_cnt = grp->g->avail.se_cnt;
629 		break;
630 
631 	case OTX2_CPT_IE_TYPES:
632 		avail_cnt = grp->g->avail.ie_cnt;
633 		break;
634 
635 	case OTX2_CPT_AE_TYPES:
636 		avail_cnt = grp->g->avail.ae_cnt;
637 		break;
638 
639 	default:
640 		dev_err(dev, "Invalid engine type %d\n", req_eng->type);
641 		return -EINVAL;
642 	}
643 
644 	if (avail_cnt < req_eng->count) {
645 		dev_err(dev,
646 			"Error available %s engines %d < than requested %d\n",
647 			get_eng_type_str(req_eng->type),
648 			avail_cnt, req_eng->count);
649 		return -EBUSY;
650 	}
651 	return 0;
652 }
653 
reserve_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_engs,int ucodes_cnt)654 static int reserve_engines(struct device *dev,
655 			   struct otx2_cpt_eng_grp_info *grp,
656 			   struct otx2_cpt_engines *req_engs, int ucodes_cnt)
657 {
658 	int i, ret = 0;
659 
660 	/* Validate if a number of requested engines are available */
661 	for (i = 0; i < ucodes_cnt; i++) {
662 		ret = check_engines_availability(dev, grp, &req_engs[i]);
663 		if (ret)
664 			return ret;
665 	}
666 
667 	/* Reserve requested engines for this engine group */
668 	for (i = 0; i < ucodes_cnt; i++) {
669 		ret = do_reserve_engines(dev, grp, &req_engs[i]);
670 		if (ret)
671 			return ret;
672 	}
673 	return 0;
674 }
675 
ucode_unload(struct device * dev,struct otx2_cpt_ucode * ucode)676 static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
677 {
678 	if (ucode->va) {
679 		dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
680 				  ucode->dma);
681 		ucode->va = NULL;
682 		ucode->dma = 0;
683 		ucode->size = 0;
684 	}
685 
686 	memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
687 	memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
688 	set_ucode_filename(ucode, "");
689 	ucode->type = 0;
690 }
691 
copy_ucode_to_dma_mem(struct device * dev,struct otx2_cpt_ucode * ucode,const u8 * ucode_data)692 static int copy_ucode_to_dma_mem(struct device *dev,
693 				 struct otx2_cpt_ucode *ucode,
694 				 const u8 *ucode_data)
695 {
696 	u32 i;
697 
698 	/*  Allocate DMAable space */
699 	ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
700 				       GFP_KERNEL);
701 	if (!ucode->va)
702 		return -ENOMEM;
703 
704 	memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
705 	       ucode->size);
706 
707 	/* Byte swap 64-bit */
708 	for (i = 0; i < (ucode->size / 8); i++)
709 		cpu_to_be64s(&((u64 *)ucode->va)[i]);
710 	/*  Ucode needs 16-bit swap */
711 	for (i = 0; i < (ucode->size / 2); i++)
712 		cpu_to_be16s(&((u16 *)ucode->va)[i]);
713 	return 0;
714 }
715 
enable_eng_grp(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)716 static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
717 			  void *obj)
718 {
719 	int ret;
720 
721 	/* Point microcode to each core of the group */
722 	ret = cpt_set_ucode_base(eng_grp, obj);
723 	if (ret)
724 		return ret;
725 
726 	/* Attach the cores to the group and enable them */
727 	ret = cpt_attach_and_enable_cores(eng_grp, obj);
728 
729 	return ret;
730 }
731 
disable_eng_grp(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp,void * obj)732 static int disable_eng_grp(struct device *dev,
733 			   struct otx2_cpt_eng_grp_info *eng_grp,
734 			   void *obj)
735 {
736 	int i, ret;
737 
738 	/* Disable all engines used by this group */
739 	ret = cpt_detach_and_disable_cores(eng_grp, obj);
740 	if (ret)
741 		return ret;
742 
743 	/* Unload ucode used by this engine group */
744 	ucode_unload(dev, &eng_grp->ucode[0]);
745 	ucode_unload(dev, &eng_grp->ucode[1]);
746 
747 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
748 		if (!eng_grp->engs[i].type)
749 			continue;
750 
751 		eng_grp->engs[i].ucode = &eng_grp->ucode[0];
752 	}
753 
754 	/* Clear UCODE_BASE register for each engine used by this group */
755 	ret = cpt_set_ucode_base(eng_grp, obj);
756 
757 	return ret;
758 }
759 
setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info * dst_grp,struct otx2_cpt_eng_grp_info * src_grp)760 static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
761 				    struct otx2_cpt_eng_grp_info *src_grp)
762 {
763 	/* Setup fields for engine group which is mirrored */
764 	src_grp->mirror.is_ena = false;
765 	src_grp->mirror.idx = 0;
766 	src_grp->mirror.ref_count++;
767 
768 	/* Setup fields for mirroring engine group */
769 	dst_grp->mirror.is_ena = true;
770 	dst_grp->mirror.idx = src_grp->idx;
771 	dst_grp->mirror.ref_count = 0;
772 }
773 
remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info * dst_grp)774 static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
775 {
776 	struct otx2_cpt_eng_grp_info *src_grp;
777 
778 	if (!dst_grp->mirror.is_ena)
779 		return;
780 
781 	src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
782 
783 	src_grp->mirror.ref_count--;
784 	dst_grp->mirror.is_ena = false;
785 	dst_grp->mirror.idx = 0;
786 	dst_grp->mirror.ref_count = 0;
787 }
788 
update_requested_engs(struct otx2_cpt_eng_grp_info * mirror_eng_grp,struct otx2_cpt_engines * engs,int engs_cnt)789 static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
790 				  struct otx2_cpt_engines *engs, int engs_cnt)
791 {
792 	struct otx2_cpt_engs_rsvd *mirrored_engs;
793 	int i;
794 
795 	for (i = 0; i < engs_cnt; i++) {
796 		mirrored_engs = find_engines_by_type(mirror_eng_grp,
797 						     engs[i].type);
798 		if (!mirrored_engs)
799 			continue;
800 
801 		/*
802 		 * If mirrored group has this type of engines attached then
803 		 * there are 3 scenarios possible:
804 		 * 1) mirrored_engs.count == engs[i].count then all engines
805 		 * from mirrored engine group will be shared with this engine
806 		 * group
807 		 * 2) mirrored_engs.count > engs[i].count then only a subset of
808 		 * engines from mirrored engine group will be shared with this
809 		 * engine group
810 		 * 3) mirrored_engs.count < engs[i].count then all engines
811 		 * from mirrored engine group will be shared with this group
812 		 * and additional engines will be reserved for exclusively use
813 		 * by this engine group
814 		 */
815 		engs[i].count -= mirrored_engs->count;
816 	}
817 }
818 
find_mirrored_eng_grp(struct otx2_cpt_eng_grp_info * grp)819 static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
820 					struct otx2_cpt_eng_grp_info *grp)
821 {
822 	struct otx2_cpt_eng_grps *eng_grps = grp->g;
823 	int i;
824 
825 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
826 		if (!eng_grps->grp[i].is_enabled)
827 			continue;
828 		if (eng_grps->grp[i].ucode[0].type &&
829 		    eng_grps->grp[i].ucode[1].type)
830 			continue;
831 		if (grp->idx == i)
832 			continue;
833 		if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
834 				 grp->ucode[0].ver_str,
835 				 OTX2_CPT_UCODE_VER_STR_SZ))
836 			return &eng_grps->grp[i];
837 	}
838 
839 	return NULL;
840 }
841 
find_unused_eng_grp(struct otx2_cpt_eng_grps * eng_grps)842 static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
843 					struct otx2_cpt_eng_grps *eng_grps)
844 {
845 	int i;
846 
847 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
848 		if (!eng_grps->grp[i].is_enabled)
849 			return &eng_grps->grp[i];
850 	}
851 	return NULL;
852 }
853 
eng_grp_update_masks(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)854 static int eng_grp_update_masks(struct device *dev,
855 				struct otx2_cpt_eng_grp_info *eng_grp)
856 {
857 	struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
858 	struct otx2_cpt_bitmap tmp_bmap = { {0} };
859 	int i, j, cnt, max_cnt;
860 	int bit;
861 
862 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
863 		engs = &eng_grp->engs[i];
864 		if (!engs->type)
865 			continue;
866 		if (engs->count <= 0)
867 			continue;
868 
869 		switch (engs->type) {
870 		case OTX2_CPT_SE_TYPES:
871 			max_cnt = eng_grp->g->avail.max_se_cnt;
872 			break;
873 
874 		case OTX2_CPT_IE_TYPES:
875 			max_cnt = eng_grp->g->avail.max_ie_cnt;
876 			break;
877 
878 		case OTX2_CPT_AE_TYPES:
879 			max_cnt = eng_grp->g->avail.max_ae_cnt;
880 			break;
881 
882 		default:
883 			dev_err(dev, "Invalid engine type %d\n", engs->type);
884 			return -EINVAL;
885 		}
886 
887 		cnt = engs->count;
888 		WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
889 		bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
890 		for (j = engs->offset; j < engs->offset + max_cnt; j++) {
891 			if (!eng_grp->g->eng_ref_cnt[j]) {
892 				bitmap_set(tmp_bmap.bits, j, 1);
893 				cnt--;
894 				if (!cnt)
895 					break;
896 			}
897 		}
898 
899 		if (cnt)
900 			return -ENOSPC;
901 
902 		bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
903 	}
904 
905 	if (!eng_grp->mirror.is_ena)
906 		return 0;
907 
908 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
909 		engs = &eng_grp->engs[i];
910 		if (!engs->type)
911 			continue;
912 
913 		mirrored_engs = find_engines_by_type(
914 					&eng_grp->g->grp[eng_grp->mirror.idx],
915 					engs->type);
916 		WARN_ON(!mirrored_engs && engs->count <= 0);
917 		if (!mirrored_engs)
918 			continue;
919 
920 		bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
921 			    eng_grp->g->engs_num);
922 		if (engs->count < 0) {
923 			bit = find_first_bit(mirrored_engs->bmap,
924 					     eng_grp->g->engs_num);
925 			bitmap_clear(tmp_bmap.bits, bit, -engs->count);
926 		}
927 		bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
928 			  eng_grp->g->engs_num);
929 	}
930 	return 0;
931 }
932 
delete_engine_group(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)933 static int delete_engine_group(struct device *dev,
934 			       struct otx2_cpt_eng_grp_info *eng_grp)
935 {
936 	int ret;
937 
938 	if (!eng_grp->is_enabled)
939 		return 0;
940 
941 	if (eng_grp->mirror.ref_count)
942 		return -EINVAL;
943 
944 	/* Removing engine group mirroring if enabled */
945 	remove_eng_grp_mirroring(eng_grp);
946 
947 	/* Disable engine group */
948 	ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
949 	if (ret)
950 		return ret;
951 
952 	/* Release all engines held by this engine group */
953 	ret = release_engines(dev, eng_grp);
954 	if (ret)
955 		return ret;
956 
957 	eng_grp->is_enabled = false;
958 
959 	return 0;
960 }
961 
update_ucode_ptrs(struct otx2_cpt_eng_grp_info * eng_grp)962 static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
963 {
964 	struct otx2_cpt_ucode *ucode;
965 
966 	if (eng_grp->mirror.is_ena)
967 		ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
968 	else
969 		ucode = &eng_grp->ucode[0];
970 	WARN_ON(!eng_grp->engs[0].type);
971 	eng_grp->engs[0].ucode = ucode;
972 
973 	if (eng_grp->engs[1].type) {
974 		if (is_2nd_ucode_used(eng_grp))
975 			eng_grp->engs[1].ucode = &eng_grp->ucode[1];
976 		else
977 			eng_grp->engs[1].ucode = ucode;
978 	}
979 }
980 
create_engine_group(struct device * dev,struct otx2_cpt_eng_grps * eng_grps,struct otx2_cpt_engines * engs,int ucodes_cnt,void * ucode_data[],int is_print)981 static int create_engine_group(struct device *dev,
982 			       struct otx2_cpt_eng_grps *eng_grps,
983 			       struct otx2_cpt_engines *engs, int ucodes_cnt,
984 			       void *ucode_data[], int is_print)
985 {
986 	struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
987 	struct otx2_cpt_eng_grp_info *eng_grp;
988 	struct otx2_cpt_uc_info_t *uc_info;
989 	int i, ret = 0;
990 
991 	/* Find engine group which is not used */
992 	eng_grp = find_unused_eng_grp(eng_grps);
993 	if (!eng_grp) {
994 		dev_err(dev, "Error all engine groups are being used\n");
995 		return -ENOSPC;
996 	}
997 	/* Load ucode */
998 	for (i = 0; i < ucodes_cnt; i++) {
999 		uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
1000 		eng_grp->ucode[i] = uc_info->ucode;
1001 		ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
1002 					    uc_info->fw->data);
1003 		if (ret)
1004 			goto unload_ucode;
1005 	}
1006 
1007 	/* Check if this group mirrors another existing engine group */
1008 	mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1009 	if (mirrored_eng_grp) {
1010 		/* Setup mirroring */
1011 		setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1012 
1013 		/*
1014 		 * Update count of requested engines because some
1015 		 * of them might be shared with mirrored group
1016 		 */
1017 		update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
1018 	}
1019 	ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
1020 	if (ret)
1021 		goto unload_ucode;
1022 
1023 	/* Update ucode pointers used by engines */
1024 	update_ucode_ptrs(eng_grp);
1025 
1026 	/* Update engine masks used by this group */
1027 	ret = eng_grp_update_masks(dev, eng_grp);
1028 	if (ret)
1029 		goto release_engs;
1030 
1031 	/* Enable engine group */
1032 	ret = enable_eng_grp(eng_grp, eng_grps->obj);
1033 	if (ret)
1034 		goto release_engs;
1035 
1036 	/*
1037 	 * If this engine group mirrors another engine group
1038 	 * then we need to unload ucode as we will use ucode
1039 	 * from mirrored engine group
1040 	 */
1041 	if (eng_grp->mirror.is_ena)
1042 		ucode_unload(dev, &eng_grp->ucode[0]);
1043 
1044 	eng_grp->is_enabled = true;
1045 
1046 	if (!is_print)
1047 		return 0;
1048 
1049 	if (mirrored_eng_grp)
1050 		dev_info(dev,
1051 			 "Engine_group%d: reuse microcode %s from group %d\n",
1052 			 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1053 			 mirrored_eng_grp->idx);
1054 	else
1055 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1056 			 eng_grp->idx, eng_grp->ucode[0].ver_str);
1057 	if (is_2nd_ucode_used(eng_grp))
1058 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1059 			 eng_grp->idx, eng_grp->ucode[1].ver_str);
1060 
1061 	return 0;
1062 
1063 release_engs:
1064 	release_engines(dev, eng_grp);
1065 unload_ucode:
1066 	ucode_unload(dev, &eng_grp->ucode[0]);
1067 	ucode_unload(dev, &eng_grp->ucode[1]);
1068 	return ret;
1069 }
1070 
delete_engine_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1071 static void delete_engine_grps(struct pci_dev *pdev,
1072 			       struct otx2_cpt_eng_grps *eng_grps)
1073 {
1074 	int i;
1075 
1076 	/* First delete all mirroring engine groups */
1077 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1078 		if (eng_grps->grp[i].mirror.is_ena)
1079 			delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1080 
1081 	/* Delete remaining engine groups */
1082 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1083 		delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1084 }
1085 
1086 #define PCI_DEVID_CN10K_RNM 0xA098
1087 #define RNM_ENTROPY_STATUS  0x8
1088 
rnm_to_cpt_errata_fixup(struct device * dev)1089 static void rnm_to_cpt_errata_fixup(struct device *dev)
1090 {
1091 	struct pci_dev *pdev;
1092 	void __iomem *base;
1093 	int timeout = 5000;
1094 
1095 	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
1096 	if (!pdev)
1097 		return;
1098 
1099 	base = pci_ioremap_bar(pdev, 0);
1100 	if (!base)
1101 		goto put_pdev;
1102 
1103 	while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
1104 		cpu_relax();
1105 		udelay(1);
1106 		timeout--;
1107 		if (!timeout) {
1108 			dev_warn(dev, "RNM is not producing entropy\n");
1109 			break;
1110 		}
1111 	}
1112 
1113 	iounmap(base);
1114 
1115 put_pdev:
1116 	pci_dev_put(pdev);
1117 }
1118 
otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps * eng_grps,int eng_type)1119 int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
1120 {
1121 
1122 	int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
1123 	struct otx2_cpt_eng_grp_info *grp;
1124 	int i;
1125 
1126 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1127 		grp = &eng_grps->grp[i];
1128 		if (!grp->is_enabled)
1129 			continue;
1130 
1131 		if (eng_type == OTX2_CPT_SE_TYPES) {
1132 			if (eng_grp_has_eng_type(grp, eng_type) &&
1133 			    !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
1134 				eng_grp_num = i;
1135 				break;
1136 			}
1137 		} else {
1138 			if (eng_grp_has_eng_type(grp, eng_type)) {
1139 				eng_grp_num = i;
1140 				break;
1141 			}
1142 		}
1143 	}
1144 	return eng_grp_num;
1145 }
1146 
otx2_cpt_create_eng_grps(struct otx2_cptpf_dev * cptpf,struct otx2_cpt_eng_grps * eng_grps)1147 int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
1148 			     struct otx2_cpt_eng_grps *eng_grps)
1149 {
1150 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1151 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1152 	struct pci_dev *pdev = cptpf->pdev;
1153 	struct fw_info_t fw_info;
1154 	u64 reg_val;
1155 	int ret = 0;
1156 
1157 	mutex_lock(&eng_grps->lock);
1158 	/*
1159 	 * We don't create engine groups if it was already
1160 	 * made (when user enabled VFs for the first time)
1161 	 */
1162 	if (eng_grps->is_grps_created)
1163 		goto unlock;
1164 
1165 	ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
1166 	if (ret)
1167 		goto unlock;
1168 
1169 	/*
1170 	 * Create engine group with SE engines for kernel
1171 	 * crypto functionality (symmetric crypto)
1172 	 */
1173 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1174 	if (uc_info[0] == NULL) {
1175 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1176 		ret = -EINVAL;
1177 		goto release_fw;
1178 	}
1179 	engs[0].type = OTX2_CPT_SE_TYPES;
1180 	engs[0].count = eng_grps->avail.max_se_cnt;
1181 
1182 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1183 				  (void **) uc_info, 1);
1184 	if (ret)
1185 		goto release_fw;
1186 
1187 	/*
1188 	 * Create engine group with SE+IE engines for IPSec.
1189 	 * All SE engines will be shared with engine group 0.
1190 	 */
1191 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1192 	uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1193 
1194 	if (uc_info[1] == NULL) {
1195 		dev_err(&pdev->dev, "Unable to find firmware for IE");
1196 		ret = -EINVAL;
1197 		goto delete_eng_grp;
1198 	}
1199 	engs[0].type = OTX2_CPT_SE_TYPES;
1200 	engs[0].count = eng_grps->avail.max_se_cnt;
1201 	engs[1].type = OTX2_CPT_IE_TYPES;
1202 	engs[1].count = eng_grps->avail.max_ie_cnt;
1203 
1204 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
1205 				  (void **) uc_info, 1);
1206 	if (ret)
1207 		goto delete_eng_grp;
1208 
1209 	/*
1210 	 * Create engine group with AE engines for asymmetric
1211 	 * crypto functionality.
1212 	 */
1213 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1214 	if (uc_info[0] == NULL) {
1215 		dev_err(&pdev->dev, "Unable to find firmware for AE");
1216 		ret = -EINVAL;
1217 		goto delete_eng_grp;
1218 	}
1219 	engs[0].type = OTX2_CPT_AE_TYPES;
1220 	engs[0].count = eng_grps->avail.max_ae_cnt;
1221 
1222 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1223 				  (void **) uc_info, 1);
1224 	if (ret)
1225 		goto delete_eng_grp;
1226 
1227 	eng_grps->is_grps_created = true;
1228 
1229 	cpt_ucode_release_fw(&fw_info);
1230 
1231 	if (is_dev_otx2(pdev))
1232 		goto unlock;
1233 
1234 	/*
1235 	 * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
1236 	 * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
1237 	 */
1238 	rnm_to_cpt_errata_fixup(&pdev->dev);
1239 
1240 	otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
1241 			     BLKADDR_CPT0);
1242 	/*
1243 	 * Configure engine group mask to allow context prefetching
1244 	 * for the groups and enable random number request, to enable
1245 	 * CPT to request random numbers from RNM.
1246 	 */
1247 	reg_val |= OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16);
1248 	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
1249 			      reg_val, BLKADDR_CPT0);
1250 	/*
1251 	 * Set interval to periodically flush dirty data for the next
1252 	 * CTX cache entry. Set the interval count to maximum supported
1253 	 * value.
1254 	 */
1255 	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
1256 			      CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
1257 
1258 	/*
1259 	 * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
1260 	 * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
1261 	 * encounters a fault/poison, a rare case may result in
1262 	 * unpredictable data being delivered to a CPT engine.
1263 	 */
1264 	if (cpt_is_errata_38550_exists(pdev)) {
1265 		otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1266 				     &reg_val, BLKADDR_CPT0);
1267 		otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1268 				      reg_val | BIT_ULL(24), BLKADDR_CPT0);
1269 	}
1270 
1271 	mutex_unlock(&eng_grps->lock);
1272 	return 0;
1273 
1274 delete_eng_grp:
1275 	delete_engine_grps(pdev, eng_grps);
1276 release_fw:
1277 	cpt_ucode_release_fw(&fw_info);
1278 unlock:
1279 	mutex_unlock(&eng_grps->lock);
1280 	return ret;
1281 }
1282 
cptx_disable_all_cores(struct otx2_cptpf_dev * cptpf,int total_cores,int blkaddr)1283 static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
1284 				  int blkaddr)
1285 {
1286 	int timeout = 10, ret;
1287 	int i, busy;
1288 	u64 reg;
1289 
1290 	/* Disengage the cores from groups */
1291 	for (i = 0; i < total_cores; i++) {
1292 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1293 						CPT_AF_EXEX_CTL2(i), 0x0,
1294 						blkaddr);
1295 		if (ret)
1296 			return ret;
1297 
1298 		cptpf->eng_grps.eng_ref_cnt[i] = 0;
1299 	}
1300 	ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1301 	if (ret)
1302 		return ret;
1303 
1304 	/* Wait for cores to become idle */
1305 	do {
1306 		busy = 0;
1307 		usleep_range(10000, 20000);
1308 		if (timeout-- < 0)
1309 			return -EBUSY;
1310 
1311 		for (i = 0; i < total_cores; i++) {
1312 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
1313 						   cptpf->pdev,
1314 						   CPT_AF_EXEX_STS(i), &reg,
1315 						   blkaddr);
1316 			if (ret)
1317 				return ret;
1318 
1319 			if (reg & 0x1) {
1320 				busy = 1;
1321 				break;
1322 			}
1323 		}
1324 	} while (busy);
1325 
1326 	/* Disable the cores */
1327 	for (i = 0; i < total_cores; i++) {
1328 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1329 						CPT_AF_EXEX_CTL(i), 0x0,
1330 						blkaddr);
1331 		if (ret)
1332 			return ret;
1333 	}
1334 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1335 }
1336 
otx2_cpt_disable_all_cores(struct otx2_cptpf_dev * cptpf)1337 int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
1338 {
1339 	int total_cores, ret;
1340 
1341 	total_cores = cptpf->eng_grps.avail.max_se_cnt +
1342 		      cptpf->eng_grps.avail.max_ie_cnt +
1343 		      cptpf->eng_grps.avail.max_ae_cnt;
1344 
1345 	if (cptpf->has_cpt1) {
1346 		ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
1347 		if (ret)
1348 			return ret;
1349 	}
1350 	return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
1351 }
1352 
otx2_cpt_cleanup_eng_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1353 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1354 			       struct otx2_cpt_eng_grps *eng_grps)
1355 {
1356 	struct otx2_cpt_eng_grp_info *grp;
1357 	int i, j;
1358 
1359 	mutex_lock(&eng_grps->lock);
1360 	delete_engine_grps(pdev, eng_grps);
1361 	/* Release memory */
1362 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1363 		grp = &eng_grps->grp[i];
1364 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1365 			kfree(grp->engs[j].bmap);
1366 			grp->engs[j].bmap = NULL;
1367 		}
1368 	}
1369 	mutex_unlock(&eng_grps->lock);
1370 }
1371 
otx2_cpt_init_eng_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1372 int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
1373 			   struct otx2_cpt_eng_grps *eng_grps)
1374 {
1375 	struct otx2_cpt_eng_grp_info *grp;
1376 	int i, j, ret;
1377 
1378 	mutex_init(&eng_grps->lock);
1379 	eng_grps->obj = pci_get_drvdata(pdev);
1380 	eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1381 	eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
1382 	eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1383 
1384 	eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1385 			     eng_grps->avail.max_ie_cnt +
1386 			     eng_grps->avail.max_ae_cnt;
1387 	if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
1388 		dev_err(&pdev->dev,
1389 			"Number of engines %d > than max supported %d\n",
1390 			eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
1391 		ret = -EINVAL;
1392 		goto cleanup_eng_grps;
1393 	}
1394 
1395 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1396 		grp = &eng_grps->grp[i];
1397 		grp->g = eng_grps;
1398 		grp->idx = i;
1399 
1400 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1401 			grp->engs[j].bmap =
1402 				kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1403 					sizeof(long), GFP_KERNEL);
1404 			if (!grp->engs[j].bmap) {
1405 				ret = -ENOMEM;
1406 				goto cleanup_eng_grps;
1407 			}
1408 		}
1409 	}
1410 	return 0;
1411 
1412 cleanup_eng_grps:
1413 	otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
1414 	return ret;
1415 }
1416 
create_eng_caps_discovery_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1417 static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
1418 					  struct otx2_cpt_eng_grps *eng_grps)
1419 {
1420 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1421 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1422 	struct fw_info_t fw_info;
1423 	int ret;
1424 
1425 	mutex_lock(&eng_grps->lock);
1426 	ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
1427 	if (ret) {
1428 		mutex_unlock(&eng_grps->lock);
1429 		return ret;
1430 	}
1431 
1432 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1433 	if (uc_info[0] == NULL) {
1434 		dev_err(&pdev->dev, "Unable to find firmware for AE\n");
1435 		ret = -EINVAL;
1436 		goto release_fw;
1437 	}
1438 	engs[0].type = OTX2_CPT_AE_TYPES;
1439 	engs[0].count = 2;
1440 
1441 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1442 				  (void **) uc_info, 0);
1443 	if (ret)
1444 		goto release_fw;
1445 
1446 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1447 	if (uc_info[0] == NULL) {
1448 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1449 		ret = -EINVAL;
1450 		goto delete_eng_grp;
1451 	}
1452 	engs[0].type = OTX2_CPT_SE_TYPES;
1453 	engs[0].count = 2;
1454 
1455 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1456 				  (void **) uc_info, 0);
1457 	if (ret)
1458 		goto delete_eng_grp;
1459 
1460 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1461 	if (uc_info[0] == NULL) {
1462 		dev_err(&pdev->dev, "Unable to find firmware for IE\n");
1463 		ret = -EINVAL;
1464 		goto delete_eng_grp;
1465 	}
1466 	engs[0].type = OTX2_CPT_IE_TYPES;
1467 	engs[0].count = 2;
1468 
1469 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1470 				  (void **) uc_info, 0);
1471 	if (ret)
1472 		goto delete_eng_grp;
1473 
1474 	cpt_ucode_release_fw(&fw_info);
1475 	mutex_unlock(&eng_grps->lock);
1476 	return 0;
1477 
1478 delete_eng_grp:
1479 	delete_engine_grps(pdev, eng_grps);
1480 release_fw:
1481 	cpt_ucode_release_fw(&fw_info);
1482 	mutex_unlock(&eng_grps->lock);
1483 	return ret;
1484 }
1485 
1486 /*
1487  * Get CPT HW capabilities using LOAD_FVC operation.
1488  */
otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev * cptpf)1489 int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
1490 {
1491 	struct otx2_cptlfs_info *lfs = &cptpf->lfs;
1492 	struct otx2_cpt_iq_command iq_cmd;
1493 	union otx2_cpt_opcode opcode;
1494 	union otx2_cpt_res_s *result;
1495 	union otx2_cpt_inst_s inst;
1496 	dma_addr_t rptr_baddr;
1497 	struct pci_dev *pdev;
1498 	u32 len, compl_rlen;
1499 	int ret, etype;
1500 	void *rptr;
1501 
1502 	/*
1503 	 * We don't get capabilities if it was already done
1504 	 * (when user enabled VFs for the first time)
1505 	 */
1506 	if (cptpf->is_eng_caps_discovered)
1507 		return 0;
1508 
1509 	pdev = cptpf->pdev;
1510 	/*
1511 	 * Create engine groups for each type to submit LOAD_FVC op and
1512 	 * get engine's capabilities.
1513 	 */
1514 	ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
1515 	if (ret)
1516 		goto delete_grps;
1517 
1518 	ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
1519 			      OTX2_CPT_QUEUE_HI_PRIO, 1);
1520 	if (ret)
1521 		goto delete_grps;
1522 
1523 	compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
1524 	len = compl_rlen + LOADFVC_RLEN;
1525 
1526 	result = kzalloc(len, GFP_KERNEL);
1527 	if (!result) {
1528 		ret = -ENOMEM;
1529 		goto lf_cleanup;
1530 	}
1531 	rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
1532 				    DMA_BIDIRECTIONAL);
1533 	if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
1534 		dev_err(&pdev->dev, "DMA mapping failed\n");
1535 		ret = -EFAULT;
1536 		goto free_result;
1537 	}
1538 	rptr = (u8 *)result + compl_rlen;
1539 
1540 	/* Fill in the command */
1541 	opcode.s.major = LOADFVC_MAJOR_OP;
1542 	opcode.s.minor = LOADFVC_MINOR_OP;
1543 
1544 	iq_cmd.cmd.u = 0;
1545 	iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
1546 
1547 	/* 64-bit swap for microcode data reads, not needed for addresses */
1548 	cpu_to_be64s(&iq_cmd.cmd.u);
1549 	iq_cmd.dptr = 0;
1550 	iq_cmd.rptr = rptr_baddr + compl_rlen;
1551 	iq_cmd.cptr.u = 0;
1552 
1553 	for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
1554 		result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
1555 		iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
1556 							 etype);
1557 		otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
1558 		lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
1559 
1560 		while (lfs->ops->cpt_get_compcode(result) ==
1561 						OTX2_CPT_COMPLETION_CODE_INIT)
1562 			cpu_relax();
1563 
1564 		cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
1565 	}
1566 	dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
1567 	cptpf->is_eng_caps_discovered = true;
1568 
1569 free_result:
1570 	kfree(result);
1571 lf_cleanup:
1572 	otx2_cptlf_shutdown(lfs);
1573 delete_grps:
1574 	delete_engine_grps(pdev, &cptpf->eng_grps);
1575 
1576 	return ret;
1577 }
1578 
otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev * cptpf,struct devlink_param_gset_ctx * ctx)1579 int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
1580 				   struct devlink_param_gset_ctx *ctx)
1581 {
1582 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };
1583 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};
1584 	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1585 	char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
1586 	char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
1587 	struct device *dev = &cptpf->pdev->dev;
1588 	char *start, *val, *err_msg, *tmp;
1589 	int grp_idx = 0, ret = -EINVAL;
1590 	bool has_se, has_ie, has_ae;
1591 	struct fw_info_t fw_info;
1592 	int ucode_idx = 0;
1593 
1594 	if (!eng_grps->is_grps_created) {
1595 		dev_err(dev, "Not allowed before creating the default groups\n");
1596 		return -EINVAL;
1597 	}
1598 	err_msg = "Invalid engine group format";
1599 	strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
1600 	start = tmp_buf;
1601 
1602 	has_se = has_ie = has_ae = false;
1603 
1604 	for (;;) {
1605 		val = strsep(&start, ";");
1606 		if (!val)
1607 			break;
1608 		val = strim(val);
1609 		if (!*val)
1610 			continue;
1611 
1612 		if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1613 			if (has_se || ucode_idx)
1614 				goto err_print;
1615 			tmp = strsep(&val, ":");
1616 			if (!tmp)
1617 				goto err_print;
1618 			tmp = strim(tmp);
1619 			if (!val)
1620 				goto err_print;
1621 			if (strlen(tmp) != 2)
1622 				goto err_print;
1623 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1624 				goto err_print;
1625 			engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
1626 			has_se = true;
1627 		} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1628 			if (has_ae || ucode_idx)
1629 				goto err_print;
1630 			tmp = strsep(&val, ":");
1631 			if (!tmp)
1632 				goto err_print;
1633 			tmp = strim(tmp);
1634 			if (!val)
1635 				goto err_print;
1636 			if (strlen(tmp) != 2)
1637 				goto err_print;
1638 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1639 				goto err_print;
1640 			engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
1641 			has_ae = true;
1642 		} else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
1643 			if (has_ie || ucode_idx)
1644 				goto err_print;
1645 			tmp = strsep(&val, ":");
1646 			if (!tmp)
1647 				goto err_print;
1648 			tmp = strim(tmp);
1649 			if (!val)
1650 				goto err_print;
1651 			if (strlen(tmp) != 2)
1652 				goto err_print;
1653 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1654 				goto err_print;
1655 			engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
1656 			has_ie = true;
1657 		} else {
1658 			if (ucode_idx > 1)
1659 				goto err_print;
1660 			if (!strlen(val))
1661 				goto err_print;
1662 			if (strnstr(val, " ", strlen(val)))
1663 				goto err_print;
1664 			ucode_filename[ucode_idx++] = val;
1665 		}
1666 	}
1667 
1668 	/* Validate input parameters */
1669 	if (!(grp_idx && ucode_idx))
1670 		goto err_print;
1671 
1672 	if (ucode_idx > 1 && grp_idx < 2)
1673 		goto err_print;
1674 
1675 	if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
1676 		err_msg = "Error max 2 engine types can be attached";
1677 		goto err_print;
1678 	}
1679 
1680 	if (grp_idx > 1) {
1681 		if ((engs[0].type + engs[1].type) !=
1682 		    (OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
1683 			err_msg = "Only combination of SE+IE engines is allowed";
1684 			goto err_print;
1685 		}
1686 		/* Keep SE engines at zero index */
1687 		if (engs[1].type == OTX2_CPT_SE_TYPES)
1688 			swap(engs[0], engs[1]);
1689 	}
1690 	mutex_lock(&eng_grps->lock);
1691 
1692 	if (cptpf->enabled_vfs) {
1693 		dev_err(dev, "Disable VFs before modifying engine groups\n");
1694 		ret = -EACCES;
1695 		goto err_unlock;
1696 	}
1697 	INIT_LIST_HEAD(&fw_info.ucodes);
1698 
1699 	ret = load_fw(dev, &fw_info, ucode_filename[0], eng_grps->rid);
1700 	if (ret) {
1701 		dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
1702 		goto err_unlock;
1703 	}
1704 	if (ucode_idx > 1) {
1705 		ret = load_fw(dev, &fw_info, ucode_filename[1], eng_grps->rid);
1706 		if (ret) {
1707 			dev_err(dev, "Unable to load firmware %s\n",
1708 				ucode_filename[1]);
1709 			goto release_fw;
1710 		}
1711 	}
1712 	uc_info[0] = get_ucode(&fw_info, engs[0].type);
1713 	if (uc_info[0] == NULL) {
1714 		dev_err(dev, "Unable to find firmware for %s\n",
1715 			get_eng_type_str(engs[0].type));
1716 		ret = -EINVAL;
1717 		goto release_fw;
1718 	}
1719 	if (ucode_idx > 1) {
1720 		uc_info[1] = get_ucode(&fw_info, engs[1].type);
1721 		if (uc_info[1] == NULL) {
1722 			dev_err(dev, "Unable to find firmware for %s\n",
1723 				get_eng_type_str(engs[1].type));
1724 			ret = -EINVAL;
1725 			goto release_fw;
1726 		}
1727 	}
1728 	ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1729 				  (void **)uc_info, 1);
1730 
1731 release_fw:
1732 	cpt_ucode_release_fw(&fw_info);
1733 err_unlock:
1734 	mutex_unlock(&eng_grps->lock);
1735 	return ret;
1736 err_print:
1737 	dev_err(dev, "%s\n", err_msg);
1738 	return ret;
1739 }
1740 
otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev * cptpf,struct devlink_param_gset_ctx * ctx)1741 int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
1742 				   struct devlink_param_gset_ctx *ctx)
1743 {
1744 	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1745 	struct device *dev = &cptpf->pdev->dev;
1746 	char *tmp, *err_msg;
1747 	int egrp;
1748 	int ret;
1749 
1750 	err_msg = "Invalid input string format(ex: egrp:0)";
1751 	if (strncasecmp(ctx->val.vstr, "egrp", 4))
1752 		goto err_print;
1753 	tmp = ctx->val.vstr;
1754 	strsep(&tmp, ":");
1755 	if (!tmp)
1756 		goto err_print;
1757 	if (kstrtoint(tmp, 10, &egrp))
1758 		goto err_print;
1759 
1760 	if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
1761 		dev_err(dev, "Invalid engine group %d", egrp);
1762 		return -EINVAL;
1763 	}
1764 	if (!eng_grps->grp[egrp].is_enabled) {
1765 		dev_err(dev, "Error engine_group%d is not configured", egrp);
1766 		return -EINVAL;
1767 	}
1768 	mutex_lock(&eng_grps->lock);
1769 	ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
1770 	mutex_unlock(&eng_grps->lock);
1771 
1772 	return ret;
1773 
1774 err_print:
1775 	dev_err(dev, "%s\n", err_msg);
1776 	return -EINVAL;
1777 }
1778