xref: /linux/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/ctype.h>
5 #include <linux/firmware.h>
6 #include "otx2_cptpf_ucode.h"
7 #include "otx2_cpt_common.h"
8 #include "otx2_cptpf.h"
9 #include "otx2_cptlf.h"
10 #include "otx2_cpt_reqmgr.h"
11 #include "rvu_reg.h"
12 
13 #define CSR_DELAY 30
14 
15 #define LOADFVC_RLEN 8
16 #define LOADFVC_MAJOR_OP 0x01
17 #define LOADFVC_MINOR_OP 0x08
18 
19 /*
20  * Interval to flush dirty data for next CTX entry. The interval is measured
21  * in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns).
22  */
23 #define CTX_FLUSH_TIMER_CNT 0x2FAF0
24 
25 struct fw_info_t {
26 	struct list_head ucodes;
27 };
28 
get_cores_bmap(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)29 static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
30 					struct otx2_cpt_eng_grp_info *eng_grp)
31 {
32 	struct otx2_cpt_bitmap bmap = { {0} };
33 	bool found = false;
34 	int i;
35 
36 	if (eng_grp->g->engs_num < 0 ||
37 	    eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
38 		dev_err(dev, "unsupported number of engines %d on octeontx2\n",
39 			eng_grp->g->engs_num);
40 		return bmap;
41 	}
42 
43 	for (i = 0; i  < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
44 		if (eng_grp->engs[i].type) {
45 			bitmap_or(bmap.bits, bmap.bits,
46 				  eng_grp->engs[i].bmap,
47 				  eng_grp->g->engs_num);
48 			bmap.size = eng_grp->g->engs_num;
49 			found = true;
50 		}
51 	}
52 
53 	if (!found)
54 		dev_err(dev, "No engines reserved for engine group %d\n",
55 			eng_grp->idx);
56 	return bmap;
57 }
58 
is_eng_type(int val,int eng_type)59 static int is_eng_type(int val, int eng_type)
60 {
61 	return val & (1 << eng_type);
62 }
63 
is_2nd_ucode_used(struct otx2_cpt_eng_grp_info * eng_grp)64 static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
65 {
66 	if (eng_grp->ucode[1].type)
67 		return true;
68 	else
69 		return false;
70 }
71 
set_ucode_filename(struct otx2_cpt_ucode * ucode,const char * filename)72 static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
73 			       const char *filename)
74 {
75 	strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
76 }
77 
get_eng_type_str(int eng_type)78 static char *get_eng_type_str(int eng_type)
79 {
80 	char *str = "unknown";
81 
82 	switch (eng_type) {
83 	case OTX2_CPT_SE_TYPES:
84 		str = "SE";
85 		break;
86 
87 	case OTX2_CPT_IE_TYPES:
88 		str = "IE";
89 		break;
90 
91 	case OTX2_CPT_AE_TYPES:
92 		str = "AE";
93 		break;
94 	}
95 	return str;
96 }
97 
get_ucode_type_str(int ucode_type)98 static char *get_ucode_type_str(int ucode_type)
99 {
100 	char *str = "unknown";
101 
102 	switch (ucode_type) {
103 	case (1 << OTX2_CPT_SE_TYPES):
104 		str = "SE";
105 		break;
106 
107 	case (1 << OTX2_CPT_IE_TYPES):
108 		str = "IE";
109 		break;
110 
111 	case (1 << OTX2_CPT_AE_TYPES):
112 		str = "AE";
113 		break;
114 
115 	case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
116 		str = "SE+IPSEC";
117 		break;
118 	}
119 	return str;
120 }
121 
get_ucode_type(struct device * dev,struct otx2_cpt_ucode_hdr * ucode_hdr,int * ucode_type,u16 rid)122 static int get_ucode_type(struct device *dev,
123 			  struct otx2_cpt_ucode_hdr *ucode_hdr,
124 			  int *ucode_type, u16 rid)
125 {
126 	char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
127 	char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
128 	int i, val = 0;
129 	u8 nn;
130 
131 	strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
132 	for (i = 0; i < strlen(tmp_ver_str); i++)
133 		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
134 
135 	sprintf(ver_str_prefix, "ocpt-%02d", rid);
136 	if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
137 		return -EINVAL;
138 
139 	nn = ucode_hdr->ver_num.nn;
140 	if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
141 	    (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
142 	     nn == OTX2_CPT_SE_UC_TYPE3))
143 		val |= 1 << OTX2_CPT_SE_TYPES;
144 	if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
145 	    (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
146 	     nn == OTX2_CPT_IE_UC_TYPE3))
147 		val |= 1 << OTX2_CPT_IE_TYPES;
148 	if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
149 	    nn == OTX2_CPT_AE_UC_TYPE)
150 		val |= 1 << OTX2_CPT_AE_TYPES;
151 
152 	*ucode_type = val;
153 
154 	if (!val)
155 		return -EINVAL;
156 
157 	return 0;
158 }
159 
__write_ucode_base(struct otx2_cptpf_dev * cptpf,int eng,dma_addr_t dma_addr,int blkaddr)160 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
161 			      dma_addr_t dma_addr, int blkaddr)
162 {
163 	return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
164 				     CPT_AF_EXEX_UCODE_BASE(eng),
165 				     (u64)dma_addr, blkaddr);
166 }
167 
cptx_set_ucode_base(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,int blkaddr)168 static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
169 			       struct otx2_cptpf_dev *cptpf, int blkaddr)
170 {
171 	struct otx2_cpt_engs_rsvd *engs;
172 	dma_addr_t dma_addr;
173 	int i, bit, ret;
174 
175 	/* Set PF number for microcode fetches */
176 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
177 				    CPT_AF_PF_FUNC,
178 				    cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
179 	if (ret)
180 		return ret;
181 
182 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
183 		engs = &eng_grp->engs[i];
184 		if (!engs->type)
185 			continue;
186 
187 		dma_addr = engs->ucode->dma;
188 
189 		/*
190 		 * Set UCODE_BASE only for the cores which are not used,
191 		 * other cores should have already valid UCODE_BASE set
192 		 */
193 		for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
194 			if (!eng_grp->g->eng_ref_cnt[bit]) {
195 				ret = __write_ucode_base(cptpf, bit, dma_addr,
196 							 blkaddr);
197 				if (ret)
198 					return ret;
199 			}
200 	}
201 	return 0;
202 }
203 
cpt_set_ucode_base(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)204 static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
205 {
206 	struct otx2_cptpf_dev *cptpf = obj;
207 	int ret;
208 
209 	if (cptpf->has_cpt1) {
210 		ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
211 		if (ret)
212 			return ret;
213 	}
214 	return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
215 }
216 
cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,struct otx2_cpt_bitmap bmap,int blkaddr)217 static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
218 					 struct otx2_cptpf_dev *cptpf,
219 					 struct otx2_cpt_bitmap bmap,
220 					 int blkaddr)
221 {
222 	int i, timeout = 10;
223 	int busy, ret;
224 	u64 reg = 0;
225 
226 	/* Detach the cores from group */
227 	for_each_set_bit(i, bmap.bits, bmap.size) {
228 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
229 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
230 		if (ret)
231 			return ret;
232 
233 		if (reg & (1ull << eng_grp->idx)) {
234 			eng_grp->g->eng_ref_cnt[i]--;
235 			reg &= ~(1ull << eng_grp->idx);
236 
237 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
238 						    cptpf->pdev,
239 						    CPT_AF_EXEX_CTL2(i), reg,
240 						    blkaddr);
241 			if (ret)
242 				return ret;
243 		}
244 	}
245 
246 	/* Wait for cores to become idle */
247 	do {
248 		busy = 0;
249 		usleep_range(10000, 20000);
250 		if (timeout-- < 0)
251 			return -EBUSY;
252 
253 		for_each_set_bit(i, bmap.bits, bmap.size) {
254 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
255 						   cptpf->pdev,
256 						   CPT_AF_EXEX_STS(i), &reg,
257 						   blkaddr);
258 			if (ret)
259 				return ret;
260 
261 			if (reg & 0x1) {
262 				busy = 1;
263 				break;
264 			}
265 		}
266 	} while (busy);
267 
268 	/* Disable the cores only if they are not used anymore */
269 	for_each_set_bit(i, bmap.bits, bmap.size) {
270 		if (!eng_grp->g->eng_ref_cnt[i]) {
271 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
272 						    cptpf->pdev,
273 						    CPT_AF_EXEX_CTL(i), 0x0,
274 						    blkaddr);
275 			if (ret)
276 				return ret;
277 		}
278 	}
279 
280 	return 0;
281 }
282 
cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)283 static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
284 					void *obj)
285 {
286 	struct otx2_cptpf_dev *cptpf = obj;
287 	struct otx2_cpt_bitmap bmap;
288 	int ret;
289 
290 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
291 	if (!bmap.size)
292 		return -EINVAL;
293 
294 	if (cptpf->has_cpt1) {
295 		ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
296 						    BLKADDR_CPT1);
297 		if (ret)
298 			return ret;
299 	}
300 	return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
301 					     BLKADDR_CPT0);
302 }
303 
cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,struct otx2_cpt_bitmap bmap,int blkaddr)304 static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
305 					struct otx2_cptpf_dev *cptpf,
306 					struct otx2_cpt_bitmap bmap,
307 					int blkaddr)
308 {
309 	u64 reg = 0;
310 	int i, ret;
311 
312 	/* Attach the cores to the group */
313 	for_each_set_bit(i, bmap.bits, bmap.size) {
314 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
315 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
316 		if (ret)
317 			return ret;
318 
319 		if (!(reg & (1ull << eng_grp->idx))) {
320 			eng_grp->g->eng_ref_cnt[i]++;
321 			reg |= 1ull << eng_grp->idx;
322 
323 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
324 						    cptpf->pdev,
325 						    CPT_AF_EXEX_CTL2(i), reg,
326 						    blkaddr);
327 			if (ret)
328 				return ret;
329 		}
330 	}
331 
332 	/* Enable the cores */
333 	for_each_set_bit(i, bmap.bits, bmap.size) {
334 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
335 						CPT_AF_EXEX_CTL(i), 0x1,
336 						blkaddr);
337 		if (ret)
338 			return ret;
339 	}
340 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
341 }
342 
cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)343 static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
344 				       void *obj)
345 {
346 	struct otx2_cptpf_dev *cptpf = obj;
347 	struct otx2_cpt_bitmap bmap;
348 	int ret;
349 
350 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
351 	if (!bmap.size)
352 		return -EINVAL;
353 
354 	if (cptpf->has_cpt1) {
355 		ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
356 						   BLKADDR_CPT1);
357 		if (ret)
358 			return ret;
359 	}
360 	return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
361 }
362 
load_fw(struct device * dev,struct fw_info_t * fw_info,char * filename,u16 rid)363 static int load_fw(struct device *dev, struct fw_info_t *fw_info,
364 		   char *filename, u16 rid)
365 {
366 	struct otx2_cpt_ucode_hdr *ucode_hdr;
367 	struct otx2_cpt_uc_info_t *uc_info;
368 	int ucode_type, ucode_size;
369 	int ret;
370 
371 	uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
372 	if (!uc_info)
373 		return -ENOMEM;
374 
375 	ret = request_firmware(&uc_info->fw, filename, dev);
376 	if (ret)
377 		goto free_uc_info;
378 
379 	ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
380 	ret = get_ucode_type(dev, ucode_hdr, &ucode_type, rid);
381 	if (ret)
382 		goto release_fw;
383 
384 	ucode_size = ntohl(ucode_hdr->code_length) * 2;
385 	if (!ucode_size) {
386 		dev_err(dev, "Ucode %s invalid size\n", filename);
387 		ret = -EINVAL;
388 		goto release_fw;
389 	}
390 
391 	set_ucode_filename(&uc_info->ucode, filename);
392 	memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
393 	       OTX2_CPT_UCODE_VER_STR_SZ);
394 	uc_info->ucode.ver_str[OTX2_CPT_UCODE_VER_STR_SZ] = 0;
395 	uc_info->ucode.ver_num = ucode_hdr->ver_num;
396 	uc_info->ucode.type = ucode_type;
397 	uc_info->ucode.size = ucode_size;
398 	list_add_tail(&uc_info->list, &fw_info->ucodes);
399 
400 	return 0;
401 
402 release_fw:
403 	release_firmware(uc_info->fw);
404 free_uc_info:
405 	kfree(uc_info);
406 	return ret;
407 }
408 
cpt_ucode_release_fw(struct fw_info_t * fw_info)409 static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
410 {
411 	struct otx2_cpt_uc_info_t *curr, *temp;
412 
413 	if (!fw_info)
414 		return;
415 
416 	list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
417 		list_del(&curr->list);
418 		release_firmware(curr->fw);
419 		kfree(curr);
420 	}
421 }
422 
get_ucode(struct fw_info_t * fw_info,int ucode_type)423 static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
424 					    int ucode_type)
425 {
426 	struct otx2_cpt_uc_info_t *curr;
427 
428 	list_for_each_entry(curr, &fw_info->ucodes, list) {
429 		if (!is_eng_type(curr->ucode.type, ucode_type))
430 			continue;
431 
432 		return curr;
433 	}
434 	return NULL;
435 }
436 
print_uc_info(struct fw_info_t * fw_info)437 static void print_uc_info(struct fw_info_t *fw_info)
438 {
439 	struct otx2_cpt_uc_info_t *curr;
440 
441 	list_for_each_entry(curr, &fw_info->ucodes, list) {
442 		pr_debug("Ucode filename %s\n", curr->ucode.filename);
443 		pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
444 		pr_debug("Ucode version %d.%d.%d.%d\n",
445 			 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
446 			 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
447 		pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
448 			 get_ucode_type_str(curr->ucode.type));
449 		pr_debug("Ucode size %d\n", curr->ucode.size);
450 		pr_debug("Ucode ptr %p\n", curr->fw->data);
451 	}
452 }
453 
cpt_ucode_load_fw(struct pci_dev * pdev,struct fw_info_t * fw_info,u16 rid)454 static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,
455 			     u16 rid)
456 {
457 	char filename[OTX2_CPT_NAME_LENGTH];
458 	char eng_type[8] = {0};
459 	int ret, e, i;
460 
461 	INIT_LIST_HEAD(&fw_info->ucodes);
462 
463 	for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
464 		strcpy(eng_type, get_eng_type_str(e));
465 		for (i = 0; i < strlen(eng_type); i++)
466 			eng_type[i] = tolower(eng_type[i]);
467 
468 		snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
469 			 rid, eng_type);
470 		/* Request firmware for each engine type */
471 		ret = load_fw(&pdev->dev, fw_info, filename, rid);
472 		if (ret)
473 			goto release_fw;
474 	}
475 	print_uc_info(fw_info);
476 	return 0;
477 
478 release_fw:
479 	cpt_ucode_release_fw(fw_info);
480 	return ret;
481 }
482 
find_engines_by_type(struct otx2_cpt_eng_grp_info * eng_grp,int eng_type)483 struct otx2_cpt_engs_rsvd *find_engines_by_type(
484 					struct otx2_cpt_eng_grp_info *eng_grp,
485 					int eng_type)
486 {
487 	int i;
488 
489 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
490 		if (!eng_grp->engs[i].type)
491 			continue;
492 
493 		if (eng_grp->engs[i].type == eng_type)
494 			return &eng_grp->engs[i];
495 	}
496 	return NULL;
497 }
498 
eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info * eng_grp,int eng_type)499 static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
500 				int eng_type)
501 {
502 	struct otx2_cpt_engs_rsvd *engs;
503 
504 	engs = find_engines_by_type(eng_grp, eng_type);
505 
506 	return (engs != NULL ? 1 : 0);
507 }
508 
update_engines_avail_count(struct device * dev,struct otx2_cpt_engs_available * avail,struct otx2_cpt_engs_rsvd * engs,int val)509 static int update_engines_avail_count(struct device *dev,
510 				      struct otx2_cpt_engs_available *avail,
511 				      struct otx2_cpt_engs_rsvd *engs, int val)
512 {
513 	switch (engs->type) {
514 	case OTX2_CPT_SE_TYPES:
515 		avail->se_cnt += val;
516 		break;
517 
518 	case OTX2_CPT_IE_TYPES:
519 		avail->ie_cnt += val;
520 		break;
521 
522 	case OTX2_CPT_AE_TYPES:
523 		avail->ae_cnt += val;
524 		break;
525 
526 	default:
527 		dev_err(dev, "Invalid engine type %d\n", engs->type);
528 		return -EINVAL;
529 	}
530 	return 0;
531 }
532 
update_engines_offset(struct device * dev,struct otx2_cpt_engs_available * avail,struct otx2_cpt_engs_rsvd * engs)533 static int update_engines_offset(struct device *dev,
534 				 struct otx2_cpt_engs_available *avail,
535 				 struct otx2_cpt_engs_rsvd *engs)
536 {
537 	switch (engs->type) {
538 	case OTX2_CPT_SE_TYPES:
539 		engs->offset = 0;
540 		break;
541 
542 	case OTX2_CPT_IE_TYPES:
543 		engs->offset = avail->max_se_cnt;
544 		break;
545 
546 	case OTX2_CPT_AE_TYPES:
547 		engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
548 		break;
549 
550 	default:
551 		dev_err(dev, "Invalid engine type %d\n", engs->type);
552 		return -EINVAL;
553 	}
554 	return 0;
555 }
556 
release_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp)557 static int release_engines(struct device *dev,
558 			   struct otx2_cpt_eng_grp_info *grp)
559 {
560 	int i, ret = 0;
561 
562 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
563 		if (!grp->engs[i].type)
564 			continue;
565 
566 		if (grp->engs[i].count > 0) {
567 			ret = update_engines_avail_count(dev, &grp->g->avail,
568 							 &grp->engs[i],
569 							 grp->engs[i].count);
570 			if (ret)
571 				return ret;
572 		}
573 
574 		grp->engs[i].type = 0;
575 		grp->engs[i].count = 0;
576 		grp->engs[i].offset = 0;
577 		grp->engs[i].ucode = NULL;
578 		bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
579 	}
580 	return 0;
581 }
582 
do_reserve_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_engs)583 static int do_reserve_engines(struct device *dev,
584 			      struct otx2_cpt_eng_grp_info *grp,
585 			      struct otx2_cpt_engines *req_engs)
586 {
587 	struct otx2_cpt_engs_rsvd *engs = NULL;
588 	int i, ret;
589 
590 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
591 		if (!grp->engs[i].type) {
592 			engs = &grp->engs[i];
593 			break;
594 		}
595 	}
596 
597 	if (!engs)
598 		return -ENOMEM;
599 
600 	engs->type = req_engs->type;
601 	engs->count = req_engs->count;
602 
603 	ret = update_engines_offset(dev, &grp->g->avail, engs);
604 	if (ret)
605 		return ret;
606 
607 	if (engs->count > 0) {
608 		ret = update_engines_avail_count(dev, &grp->g->avail, engs,
609 						 -engs->count);
610 		if (ret)
611 			return ret;
612 	}
613 
614 	return 0;
615 }
616 
check_engines_availability(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_eng)617 static int check_engines_availability(struct device *dev,
618 				      struct otx2_cpt_eng_grp_info *grp,
619 				      struct otx2_cpt_engines *req_eng)
620 {
621 	int avail_cnt = 0;
622 
623 	switch (req_eng->type) {
624 	case OTX2_CPT_SE_TYPES:
625 		avail_cnt = grp->g->avail.se_cnt;
626 		break;
627 
628 	case OTX2_CPT_IE_TYPES:
629 		avail_cnt = grp->g->avail.ie_cnt;
630 		break;
631 
632 	case OTX2_CPT_AE_TYPES:
633 		avail_cnt = grp->g->avail.ae_cnt;
634 		break;
635 
636 	default:
637 		dev_err(dev, "Invalid engine type %d\n", req_eng->type);
638 		return -EINVAL;
639 	}
640 
641 	if (avail_cnt < req_eng->count) {
642 		dev_err(dev,
643 			"Error available %s engines %d < than requested %d\n",
644 			get_eng_type_str(req_eng->type),
645 			avail_cnt, req_eng->count);
646 		return -EBUSY;
647 	}
648 	return 0;
649 }
650 
reserve_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_engs,int ucodes_cnt)651 static int reserve_engines(struct device *dev,
652 			   struct otx2_cpt_eng_grp_info *grp,
653 			   struct otx2_cpt_engines *req_engs, int ucodes_cnt)
654 {
655 	int i, ret = 0;
656 
657 	/* Validate if a number of requested engines are available */
658 	for (i = 0; i < ucodes_cnt; i++) {
659 		ret = check_engines_availability(dev, grp, &req_engs[i]);
660 		if (ret)
661 			return ret;
662 	}
663 
664 	/* Reserve requested engines for this engine group */
665 	for (i = 0; i < ucodes_cnt; i++) {
666 		ret = do_reserve_engines(dev, grp, &req_engs[i]);
667 		if (ret)
668 			return ret;
669 	}
670 	return 0;
671 }
672 
ucode_unload(struct device * dev,struct otx2_cpt_ucode * ucode)673 static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
674 {
675 	if (ucode->va) {
676 		dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
677 				  ucode->dma);
678 		ucode->va = NULL;
679 		ucode->dma = 0;
680 		ucode->size = 0;
681 	}
682 
683 	memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
684 	memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
685 	set_ucode_filename(ucode, "");
686 	ucode->type = 0;
687 }
688 
copy_ucode_to_dma_mem(struct device * dev,struct otx2_cpt_ucode * ucode,const u8 * ucode_data)689 static int copy_ucode_to_dma_mem(struct device *dev,
690 				 struct otx2_cpt_ucode *ucode,
691 				 const u8 *ucode_data)
692 {
693 	u32 i;
694 
695 	/*  Allocate DMAable space */
696 	ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
697 				       GFP_KERNEL);
698 	if (!ucode->va)
699 		return -ENOMEM;
700 
701 	memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
702 	       ucode->size);
703 
704 	/* Byte swap 64-bit */
705 	for (i = 0; i < (ucode->size / 8); i++)
706 		cpu_to_be64s(&((u64 *)ucode->va)[i]);
707 	/*  Ucode needs 16-bit swap */
708 	for (i = 0; i < (ucode->size / 2); i++)
709 		cpu_to_be16s(&((u16 *)ucode->va)[i]);
710 	return 0;
711 }
712 
enable_eng_grp(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)713 static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
714 			  void *obj)
715 {
716 	int ret;
717 
718 	/* Point microcode to each core of the group */
719 	ret = cpt_set_ucode_base(eng_grp, obj);
720 	if (ret)
721 		return ret;
722 
723 	/* Attach the cores to the group and enable them */
724 	ret = cpt_attach_and_enable_cores(eng_grp, obj);
725 
726 	return ret;
727 }
728 
disable_eng_grp(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp,void * obj)729 static int disable_eng_grp(struct device *dev,
730 			   struct otx2_cpt_eng_grp_info *eng_grp,
731 			   void *obj)
732 {
733 	int i, ret;
734 
735 	/* Disable all engines used by this group */
736 	ret = cpt_detach_and_disable_cores(eng_grp, obj);
737 	if (ret)
738 		return ret;
739 
740 	/* Unload ucode used by this engine group */
741 	ucode_unload(dev, &eng_grp->ucode[0]);
742 	ucode_unload(dev, &eng_grp->ucode[1]);
743 
744 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
745 		if (!eng_grp->engs[i].type)
746 			continue;
747 
748 		eng_grp->engs[i].ucode = &eng_grp->ucode[0];
749 	}
750 
751 	/* Clear UCODE_BASE register for each engine used by this group */
752 	ret = cpt_set_ucode_base(eng_grp, obj);
753 
754 	return ret;
755 }
756 
setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info * dst_grp,struct otx2_cpt_eng_grp_info * src_grp)757 static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
758 				    struct otx2_cpt_eng_grp_info *src_grp)
759 {
760 	/* Setup fields for engine group which is mirrored */
761 	src_grp->mirror.is_ena = false;
762 	src_grp->mirror.idx = 0;
763 	src_grp->mirror.ref_count++;
764 
765 	/* Setup fields for mirroring engine group */
766 	dst_grp->mirror.is_ena = true;
767 	dst_grp->mirror.idx = src_grp->idx;
768 	dst_grp->mirror.ref_count = 0;
769 }
770 
remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info * dst_grp)771 static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
772 {
773 	struct otx2_cpt_eng_grp_info *src_grp;
774 
775 	if (!dst_grp->mirror.is_ena)
776 		return;
777 
778 	src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
779 
780 	src_grp->mirror.ref_count--;
781 	dst_grp->mirror.is_ena = false;
782 	dst_grp->mirror.idx = 0;
783 	dst_grp->mirror.ref_count = 0;
784 }
785 
update_requested_engs(struct otx2_cpt_eng_grp_info * mirror_eng_grp,struct otx2_cpt_engines * engs,int engs_cnt)786 static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
787 				  struct otx2_cpt_engines *engs, int engs_cnt)
788 {
789 	struct otx2_cpt_engs_rsvd *mirrored_engs;
790 	int i;
791 
792 	for (i = 0; i < engs_cnt; i++) {
793 		mirrored_engs = find_engines_by_type(mirror_eng_grp,
794 						     engs[i].type);
795 		if (!mirrored_engs)
796 			continue;
797 
798 		/*
799 		 * If mirrored group has this type of engines attached then
800 		 * there are 3 scenarios possible:
801 		 * 1) mirrored_engs.count == engs[i].count then all engines
802 		 * from mirrored engine group will be shared with this engine
803 		 * group
804 		 * 2) mirrored_engs.count > engs[i].count then only a subset of
805 		 * engines from mirrored engine group will be shared with this
806 		 * engine group
807 		 * 3) mirrored_engs.count < engs[i].count then all engines
808 		 * from mirrored engine group will be shared with this group
809 		 * and additional engines will be reserved for exclusively use
810 		 * by this engine group
811 		 */
812 		engs[i].count -= mirrored_engs->count;
813 	}
814 }
815 
find_mirrored_eng_grp(struct otx2_cpt_eng_grp_info * grp)816 static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
817 					struct otx2_cpt_eng_grp_info *grp)
818 {
819 	struct otx2_cpt_eng_grps *eng_grps = grp->g;
820 	int i;
821 
822 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
823 		if (!eng_grps->grp[i].is_enabled)
824 			continue;
825 		if (eng_grps->grp[i].ucode[0].type &&
826 		    eng_grps->grp[i].ucode[1].type)
827 			continue;
828 		if (grp->idx == i)
829 			continue;
830 		if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
831 				 grp->ucode[0].ver_str,
832 				 OTX2_CPT_UCODE_VER_STR_SZ))
833 			return &eng_grps->grp[i];
834 	}
835 
836 	return NULL;
837 }
838 
find_unused_eng_grp(struct otx2_cpt_eng_grps * eng_grps)839 static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
840 					struct otx2_cpt_eng_grps *eng_grps)
841 {
842 	int i;
843 
844 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
845 		if (!eng_grps->grp[i].is_enabled)
846 			return &eng_grps->grp[i];
847 	}
848 	return NULL;
849 }
850 
eng_grp_update_masks(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)851 static int eng_grp_update_masks(struct device *dev,
852 				struct otx2_cpt_eng_grp_info *eng_grp)
853 {
854 	struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
855 	struct otx2_cpt_bitmap tmp_bmap = { {0} };
856 	int i, j, cnt, max_cnt;
857 	int bit;
858 
859 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
860 		engs = &eng_grp->engs[i];
861 		if (!engs->type)
862 			continue;
863 		if (engs->count <= 0)
864 			continue;
865 
866 		switch (engs->type) {
867 		case OTX2_CPT_SE_TYPES:
868 			max_cnt = eng_grp->g->avail.max_se_cnt;
869 			break;
870 
871 		case OTX2_CPT_IE_TYPES:
872 			max_cnt = eng_grp->g->avail.max_ie_cnt;
873 			break;
874 
875 		case OTX2_CPT_AE_TYPES:
876 			max_cnt = eng_grp->g->avail.max_ae_cnt;
877 			break;
878 
879 		default:
880 			dev_err(dev, "Invalid engine type %d\n", engs->type);
881 			return -EINVAL;
882 		}
883 
884 		cnt = engs->count;
885 		WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
886 		bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
887 		for (j = engs->offset; j < engs->offset + max_cnt; j++) {
888 			if (!eng_grp->g->eng_ref_cnt[j]) {
889 				bitmap_set(tmp_bmap.bits, j, 1);
890 				cnt--;
891 				if (!cnt)
892 					break;
893 			}
894 		}
895 
896 		if (cnt)
897 			return -ENOSPC;
898 
899 		bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
900 	}
901 
902 	if (!eng_grp->mirror.is_ena)
903 		return 0;
904 
905 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
906 		engs = &eng_grp->engs[i];
907 		if (!engs->type)
908 			continue;
909 
910 		mirrored_engs = find_engines_by_type(
911 					&eng_grp->g->grp[eng_grp->mirror.idx],
912 					engs->type);
913 		WARN_ON(!mirrored_engs && engs->count <= 0);
914 		if (!mirrored_engs)
915 			continue;
916 
917 		bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
918 			    eng_grp->g->engs_num);
919 		if (engs->count < 0) {
920 			bit = find_first_bit(mirrored_engs->bmap,
921 					     eng_grp->g->engs_num);
922 			bitmap_clear(tmp_bmap.bits, bit, -engs->count);
923 		}
924 		bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
925 			  eng_grp->g->engs_num);
926 	}
927 	return 0;
928 }
929 
delete_engine_group(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)930 static int delete_engine_group(struct device *dev,
931 			       struct otx2_cpt_eng_grp_info *eng_grp)
932 {
933 	int ret;
934 
935 	if (!eng_grp->is_enabled)
936 		return 0;
937 
938 	if (eng_grp->mirror.ref_count)
939 		return -EINVAL;
940 
941 	/* Removing engine group mirroring if enabled */
942 	remove_eng_grp_mirroring(eng_grp);
943 
944 	/* Disable engine group */
945 	ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
946 	if (ret)
947 		return ret;
948 
949 	/* Release all engines held by this engine group */
950 	ret = release_engines(dev, eng_grp);
951 	if (ret)
952 		return ret;
953 
954 	eng_grp->is_enabled = false;
955 
956 	return 0;
957 }
958 
update_ucode_ptrs(struct otx2_cpt_eng_grp_info * eng_grp)959 static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
960 {
961 	struct otx2_cpt_ucode *ucode;
962 
963 	if (eng_grp->mirror.is_ena)
964 		ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
965 	else
966 		ucode = &eng_grp->ucode[0];
967 	WARN_ON(!eng_grp->engs[0].type);
968 	eng_grp->engs[0].ucode = ucode;
969 
970 	if (eng_grp->engs[1].type) {
971 		if (is_2nd_ucode_used(eng_grp))
972 			eng_grp->engs[1].ucode = &eng_grp->ucode[1];
973 		else
974 			eng_grp->engs[1].ucode = ucode;
975 	}
976 }
977 
create_engine_group(struct device * dev,struct otx2_cpt_eng_grps * eng_grps,struct otx2_cpt_engines * engs,int ucodes_cnt,void * ucode_data[],int is_print)978 static int create_engine_group(struct device *dev,
979 			       struct otx2_cpt_eng_grps *eng_grps,
980 			       struct otx2_cpt_engines *engs, int ucodes_cnt,
981 			       void *ucode_data[], int is_print)
982 {
983 	struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
984 	struct otx2_cpt_eng_grp_info *eng_grp;
985 	struct otx2_cpt_uc_info_t *uc_info;
986 	int i, ret = 0;
987 
988 	/* Find engine group which is not used */
989 	eng_grp = find_unused_eng_grp(eng_grps);
990 	if (!eng_grp) {
991 		dev_err(dev, "Error all engine groups are being used\n");
992 		return -ENOSPC;
993 	}
994 	/* Load ucode */
995 	for (i = 0; i < ucodes_cnt; i++) {
996 		uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
997 		eng_grp->ucode[i] = uc_info->ucode;
998 		ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
999 					    uc_info->fw->data);
1000 		if (ret)
1001 			goto unload_ucode;
1002 	}
1003 
1004 	/* Check if this group mirrors another existing engine group */
1005 	mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1006 	if (mirrored_eng_grp) {
1007 		/* Setup mirroring */
1008 		setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1009 
1010 		/*
1011 		 * Update count of requested engines because some
1012 		 * of them might be shared with mirrored group
1013 		 */
1014 		update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
1015 	}
1016 	ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
1017 	if (ret)
1018 		goto unload_ucode;
1019 
1020 	/* Update ucode pointers used by engines */
1021 	update_ucode_ptrs(eng_grp);
1022 
1023 	/* Update engine masks used by this group */
1024 	ret = eng_grp_update_masks(dev, eng_grp);
1025 	if (ret)
1026 		goto release_engs;
1027 
1028 	/* Enable engine group */
1029 	ret = enable_eng_grp(eng_grp, eng_grps->obj);
1030 	if (ret)
1031 		goto release_engs;
1032 
1033 	/*
1034 	 * If this engine group mirrors another engine group
1035 	 * then we need to unload ucode as we will use ucode
1036 	 * from mirrored engine group
1037 	 */
1038 	if (eng_grp->mirror.is_ena)
1039 		ucode_unload(dev, &eng_grp->ucode[0]);
1040 
1041 	eng_grp->is_enabled = true;
1042 
1043 	if (!is_print)
1044 		return 0;
1045 
1046 	if (mirrored_eng_grp)
1047 		dev_info(dev,
1048 			 "Engine_group%d: reuse microcode %s from group %d\n",
1049 			 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1050 			 mirrored_eng_grp->idx);
1051 	else
1052 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1053 			 eng_grp->idx, eng_grp->ucode[0].ver_str);
1054 	if (is_2nd_ucode_used(eng_grp))
1055 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1056 			 eng_grp->idx, eng_grp->ucode[1].ver_str);
1057 
1058 	return 0;
1059 
1060 release_engs:
1061 	release_engines(dev, eng_grp);
1062 unload_ucode:
1063 	ucode_unload(dev, &eng_grp->ucode[0]);
1064 	ucode_unload(dev, &eng_grp->ucode[1]);
1065 	return ret;
1066 }
1067 
delete_engine_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1068 static void delete_engine_grps(struct pci_dev *pdev,
1069 			       struct otx2_cpt_eng_grps *eng_grps)
1070 {
1071 	int i;
1072 
1073 	/* First delete all mirroring engine groups */
1074 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1075 		if (eng_grps->grp[i].mirror.is_ena)
1076 			delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1077 
1078 	/* Delete remaining engine groups */
1079 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1080 		delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1081 }
1082 
1083 #define PCI_DEVID_CN10K_RNM 0xA098
1084 #define RNM_ENTROPY_STATUS  0x8
1085 
rnm_to_cpt_errata_fixup(struct device * dev)1086 static void rnm_to_cpt_errata_fixup(struct device *dev)
1087 {
1088 	struct pci_dev *pdev;
1089 	void __iomem *base;
1090 	int timeout = 5000;
1091 
1092 	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
1093 	if (!pdev)
1094 		return;
1095 
1096 	base = pci_ioremap_bar(pdev, 0);
1097 	if (!base)
1098 		goto put_pdev;
1099 
1100 	while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
1101 		cpu_relax();
1102 		udelay(1);
1103 		timeout--;
1104 		if (!timeout) {
1105 			dev_warn(dev, "RNM is not producing entropy\n");
1106 			break;
1107 		}
1108 	}
1109 
1110 	iounmap(base);
1111 
1112 put_pdev:
1113 	pci_dev_put(pdev);
1114 }
1115 
otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps * eng_grps,int eng_type)1116 int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
1117 {
1118 
1119 	int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
1120 	struct otx2_cpt_eng_grp_info *grp;
1121 	int i;
1122 
1123 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1124 		grp = &eng_grps->grp[i];
1125 		if (!grp->is_enabled)
1126 			continue;
1127 
1128 		if (eng_type == OTX2_CPT_SE_TYPES) {
1129 			if (eng_grp_has_eng_type(grp, eng_type) &&
1130 			    !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
1131 				eng_grp_num = i;
1132 				break;
1133 			}
1134 		} else {
1135 			if (eng_grp_has_eng_type(grp, eng_type)) {
1136 				eng_grp_num = i;
1137 				break;
1138 			}
1139 		}
1140 	}
1141 	return eng_grp_num;
1142 }
1143 
otx2_cpt_create_eng_grps(struct otx2_cptpf_dev * cptpf,struct otx2_cpt_eng_grps * eng_grps)1144 int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
1145 			     struct otx2_cpt_eng_grps *eng_grps)
1146 {
1147 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1148 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1149 	struct pci_dev *pdev = cptpf->pdev;
1150 	struct fw_info_t fw_info;
1151 	u64 reg_val;
1152 	int ret = 0;
1153 
1154 	mutex_lock(&eng_grps->lock);
1155 	/*
1156 	 * We don't create engine groups if it was already
1157 	 * made (when user enabled VFs for the first time)
1158 	 */
1159 	if (eng_grps->is_grps_created)
1160 		goto unlock;
1161 
1162 	ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
1163 	if (ret)
1164 		goto unlock;
1165 
1166 	/*
1167 	 * Create engine group with SE engines for kernel
1168 	 * crypto functionality (symmetric crypto)
1169 	 */
1170 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1171 	if (uc_info[0] == NULL) {
1172 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1173 		ret = -EINVAL;
1174 		goto release_fw;
1175 	}
1176 	engs[0].type = OTX2_CPT_SE_TYPES;
1177 	engs[0].count = eng_grps->avail.max_se_cnt;
1178 
1179 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1180 				  (void **) uc_info, 1);
1181 	if (ret)
1182 		goto release_fw;
1183 
1184 	/*
1185 	 * Create engine group with SE+IE engines for IPSec.
1186 	 * All SE engines will be shared with engine group 0.
1187 	 */
1188 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1189 	uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1190 
1191 	if (uc_info[1] == NULL) {
1192 		dev_err(&pdev->dev, "Unable to find firmware for IE");
1193 		ret = -EINVAL;
1194 		goto delete_eng_grp;
1195 	}
1196 	engs[0].type = OTX2_CPT_SE_TYPES;
1197 	engs[0].count = eng_grps->avail.max_se_cnt;
1198 	engs[1].type = OTX2_CPT_IE_TYPES;
1199 	engs[1].count = eng_grps->avail.max_ie_cnt;
1200 
1201 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
1202 				  (void **) uc_info, 1);
1203 	if (ret)
1204 		goto delete_eng_grp;
1205 
1206 	/*
1207 	 * Create engine group with AE engines for asymmetric
1208 	 * crypto functionality.
1209 	 */
1210 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1211 	if (uc_info[0] == NULL) {
1212 		dev_err(&pdev->dev, "Unable to find firmware for AE");
1213 		ret = -EINVAL;
1214 		goto delete_eng_grp;
1215 	}
1216 	engs[0].type = OTX2_CPT_AE_TYPES;
1217 	engs[0].count = eng_grps->avail.max_ae_cnt;
1218 
1219 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1220 				  (void **) uc_info, 1);
1221 	if (ret)
1222 		goto delete_eng_grp;
1223 
1224 	eng_grps->is_grps_created = true;
1225 
1226 	cpt_ucode_release_fw(&fw_info);
1227 
1228 	if (is_dev_otx2(pdev))
1229 		goto unlock;
1230 
1231 	/*
1232 	 * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
1233 	 * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
1234 	 */
1235 	rnm_to_cpt_errata_fixup(&pdev->dev);
1236 
1237 	otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
1238 			     BLKADDR_CPT0);
1239 	/*
1240 	 * Configure engine group mask to allow context prefetching
1241 	 * for the groups and enable random number request, to enable
1242 	 * CPT to request random numbers from RNM.
1243 	 */
1244 	reg_val |= OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16);
1245 	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
1246 			      reg_val, BLKADDR_CPT0);
1247 	/*
1248 	 * Set interval to periodically flush dirty data for the next
1249 	 * CTX cache entry. Set the interval count to maximum supported
1250 	 * value.
1251 	 */
1252 	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
1253 			      CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
1254 
1255 	/*
1256 	 * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
1257 	 * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
1258 	 * encounters a fault/poison, a rare case may result in
1259 	 * unpredictable data being delivered to a CPT engine.
1260 	 */
1261 	if (cpt_is_errata_38550_exists(pdev)) {
1262 		otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1263 				     &reg_val, BLKADDR_CPT0);
1264 		otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1265 				      reg_val | BIT_ULL(24), BLKADDR_CPT0);
1266 	}
1267 
1268 	mutex_unlock(&eng_grps->lock);
1269 	return 0;
1270 
1271 delete_eng_grp:
1272 	delete_engine_grps(pdev, eng_grps);
1273 release_fw:
1274 	cpt_ucode_release_fw(&fw_info);
1275 unlock:
1276 	mutex_unlock(&eng_grps->lock);
1277 	return ret;
1278 }
1279 
cptx_disable_all_cores(struct otx2_cptpf_dev * cptpf,int total_cores,int blkaddr)1280 static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
1281 				  int blkaddr)
1282 {
1283 	int timeout = 10, ret;
1284 	int i, busy;
1285 	u64 reg;
1286 
1287 	/* Disengage the cores from groups */
1288 	for (i = 0; i < total_cores; i++) {
1289 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1290 						CPT_AF_EXEX_CTL2(i), 0x0,
1291 						blkaddr);
1292 		if (ret)
1293 			return ret;
1294 
1295 		cptpf->eng_grps.eng_ref_cnt[i] = 0;
1296 	}
1297 	ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1298 	if (ret)
1299 		return ret;
1300 
1301 	/* Wait for cores to become idle */
1302 	do {
1303 		busy = 0;
1304 		usleep_range(10000, 20000);
1305 		if (timeout-- < 0)
1306 			return -EBUSY;
1307 
1308 		for (i = 0; i < total_cores; i++) {
1309 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
1310 						   cptpf->pdev,
1311 						   CPT_AF_EXEX_STS(i), &reg,
1312 						   blkaddr);
1313 			if (ret)
1314 				return ret;
1315 
1316 			if (reg & 0x1) {
1317 				busy = 1;
1318 				break;
1319 			}
1320 		}
1321 	} while (busy);
1322 
1323 	/* Disable the cores */
1324 	for (i = 0; i < total_cores; i++) {
1325 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1326 						CPT_AF_EXEX_CTL(i), 0x0,
1327 						blkaddr);
1328 		if (ret)
1329 			return ret;
1330 	}
1331 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1332 }
1333 
otx2_cpt_disable_all_cores(struct otx2_cptpf_dev * cptpf)1334 int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
1335 {
1336 	int total_cores, ret;
1337 
1338 	total_cores = cptpf->eng_grps.avail.max_se_cnt +
1339 		      cptpf->eng_grps.avail.max_ie_cnt +
1340 		      cptpf->eng_grps.avail.max_ae_cnt;
1341 
1342 	if (cptpf->has_cpt1) {
1343 		ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
1344 		if (ret)
1345 			return ret;
1346 	}
1347 	return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
1348 }
1349 
otx2_cpt_cleanup_eng_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1350 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1351 			       struct otx2_cpt_eng_grps *eng_grps)
1352 {
1353 	struct otx2_cpt_eng_grp_info *grp;
1354 	int i, j;
1355 
1356 	mutex_lock(&eng_grps->lock);
1357 	delete_engine_grps(pdev, eng_grps);
1358 	/* Release memory */
1359 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1360 		grp = &eng_grps->grp[i];
1361 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1362 			kfree(grp->engs[j].bmap);
1363 			grp->engs[j].bmap = NULL;
1364 		}
1365 	}
1366 	mutex_unlock(&eng_grps->lock);
1367 }
1368 
otx2_cpt_init_eng_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1369 int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
1370 			   struct otx2_cpt_eng_grps *eng_grps)
1371 {
1372 	struct otx2_cpt_eng_grp_info *grp;
1373 	int i, j, ret;
1374 
1375 	mutex_init(&eng_grps->lock);
1376 	eng_grps->obj = pci_get_drvdata(pdev);
1377 	eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1378 	eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
1379 	eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1380 
1381 	eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1382 			     eng_grps->avail.max_ie_cnt +
1383 			     eng_grps->avail.max_ae_cnt;
1384 	if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
1385 		dev_err(&pdev->dev,
1386 			"Number of engines %d > than max supported %d\n",
1387 			eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
1388 		ret = -EINVAL;
1389 		goto cleanup_eng_grps;
1390 	}
1391 
1392 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1393 		grp = &eng_grps->grp[i];
1394 		grp->g = eng_grps;
1395 		grp->idx = i;
1396 
1397 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1398 			grp->engs[j].bmap =
1399 				kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1400 					sizeof(long), GFP_KERNEL);
1401 			if (!grp->engs[j].bmap) {
1402 				ret = -ENOMEM;
1403 				goto cleanup_eng_grps;
1404 			}
1405 		}
1406 	}
1407 	return 0;
1408 
1409 cleanup_eng_grps:
1410 	otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
1411 	return ret;
1412 }
1413 
create_eng_caps_discovery_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1414 static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
1415 					  struct otx2_cpt_eng_grps *eng_grps)
1416 {
1417 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1418 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1419 	struct fw_info_t fw_info;
1420 	int ret;
1421 
1422 	mutex_lock(&eng_grps->lock);
1423 	ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
1424 	if (ret) {
1425 		mutex_unlock(&eng_grps->lock);
1426 		return ret;
1427 	}
1428 
1429 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1430 	if (uc_info[0] == NULL) {
1431 		dev_err(&pdev->dev, "Unable to find firmware for AE\n");
1432 		ret = -EINVAL;
1433 		goto release_fw;
1434 	}
1435 	engs[0].type = OTX2_CPT_AE_TYPES;
1436 	engs[0].count = 2;
1437 
1438 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1439 				  (void **) uc_info, 0);
1440 	if (ret)
1441 		goto release_fw;
1442 
1443 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1444 	if (uc_info[0] == NULL) {
1445 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1446 		ret = -EINVAL;
1447 		goto delete_eng_grp;
1448 	}
1449 	engs[0].type = OTX2_CPT_SE_TYPES;
1450 	engs[0].count = 2;
1451 
1452 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1453 				  (void **) uc_info, 0);
1454 	if (ret)
1455 		goto delete_eng_grp;
1456 
1457 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1458 	if (uc_info[0] == NULL) {
1459 		dev_err(&pdev->dev, "Unable to find firmware for IE\n");
1460 		ret = -EINVAL;
1461 		goto delete_eng_grp;
1462 	}
1463 	engs[0].type = OTX2_CPT_IE_TYPES;
1464 	engs[0].count = 2;
1465 
1466 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1467 				  (void **) uc_info, 0);
1468 	if (ret)
1469 		goto delete_eng_grp;
1470 
1471 	cpt_ucode_release_fw(&fw_info);
1472 	mutex_unlock(&eng_grps->lock);
1473 	return 0;
1474 
1475 delete_eng_grp:
1476 	delete_engine_grps(pdev, eng_grps);
1477 release_fw:
1478 	cpt_ucode_release_fw(&fw_info);
1479 	mutex_unlock(&eng_grps->lock);
1480 	return ret;
1481 }
1482 
1483 /*
1484  * Get CPT HW capabilities using LOAD_FVC operation.
1485  */
otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev * cptpf)1486 int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
1487 {
1488 	struct otx2_cptlfs_info *lfs = &cptpf->lfs;
1489 	struct otx2_cpt_iq_command iq_cmd;
1490 	union otx2_cpt_opcode opcode;
1491 	union otx2_cpt_res_s *result;
1492 	union otx2_cpt_inst_s inst;
1493 	dma_addr_t rptr_baddr;
1494 	struct pci_dev *pdev;
1495 	u32 len, compl_rlen;
1496 	int ret, etype;
1497 	void *rptr;
1498 
1499 	/*
1500 	 * We don't get capabilities if it was already done
1501 	 * (when user enabled VFs for the first time)
1502 	 */
1503 	if (cptpf->is_eng_caps_discovered)
1504 		return 0;
1505 
1506 	pdev = cptpf->pdev;
1507 	/*
1508 	 * Create engine groups for each type to submit LOAD_FVC op and
1509 	 * get engine's capabilities.
1510 	 */
1511 	ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
1512 	if (ret)
1513 		goto delete_grps;
1514 
1515 	otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base,
1516 				&cptpf->afpf_mbox, BLKADDR_CPT0);
1517 	ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
1518 			      OTX2_CPT_QUEUE_HI_PRIO, 1);
1519 	if (ret)
1520 		goto delete_grps;
1521 
1522 	compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
1523 	len = compl_rlen + LOADFVC_RLEN;
1524 
1525 	result = kzalloc(len, GFP_KERNEL);
1526 	if (!result) {
1527 		ret = -ENOMEM;
1528 		goto lf_cleanup;
1529 	}
1530 	rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
1531 				    DMA_BIDIRECTIONAL);
1532 	if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
1533 		dev_err(&pdev->dev, "DMA mapping failed\n");
1534 		ret = -EFAULT;
1535 		goto free_result;
1536 	}
1537 	rptr = (u8 *)result + compl_rlen;
1538 
1539 	/* Fill in the command */
1540 	opcode.s.major = LOADFVC_MAJOR_OP;
1541 	opcode.s.minor = LOADFVC_MINOR_OP;
1542 
1543 	iq_cmd.cmd.u = 0;
1544 	iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
1545 
1546 	/* 64-bit swap for microcode data reads, not needed for addresses */
1547 	cpu_to_be64s(&iq_cmd.cmd.u);
1548 	iq_cmd.dptr = 0;
1549 	iq_cmd.rptr = rptr_baddr + compl_rlen;
1550 	iq_cmd.cptr.u = 0;
1551 
1552 	for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
1553 		result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
1554 		iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
1555 							 etype);
1556 		otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
1557 		lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
1558 
1559 		while (lfs->ops->cpt_get_compcode(result) ==
1560 						OTX2_CPT_COMPLETION_CODE_INIT)
1561 			cpu_relax();
1562 
1563 		cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
1564 	}
1565 	dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
1566 	cptpf->is_eng_caps_discovered = true;
1567 
1568 free_result:
1569 	kfree(result);
1570 lf_cleanup:
1571 	otx2_cptlf_shutdown(lfs);
1572 delete_grps:
1573 	delete_engine_grps(pdev, &cptpf->eng_grps);
1574 
1575 	return ret;
1576 }
1577 
otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev * cptpf,struct devlink_param_gset_ctx * ctx)1578 int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
1579 				   struct devlink_param_gset_ctx *ctx)
1580 {
1581 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };
1582 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};
1583 	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1584 	char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
1585 	char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
1586 	struct device *dev = &cptpf->pdev->dev;
1587 	char *start, *val, *err_msg, *tmp;
1588 	int grp_idx = 0, ret = -EINVAL;
1589 	bool has_se, has_ie, has_ae;
1590 	struct fw_info_t fw_info;
1591 	int ucode_idx = 0;
1592 
1593 	if (!eng_grps->is_grps_created) {
1594 		dev_err(dev, "Not allowed before creating the default groups\n");
1595 		return -EINVAL;
1596 	}
1597 	err_msg = "Invalid engine group format";
1598 	strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
1599 	start = tmp_buf;
1600 
1601 	has_se = has_ie = has_ae = false;
1602 
1603 	for (;;) {
1604 		val = strsep(&start, ";");
1605 		if (!val)
1606 			break;
1607 		val = strim(val);
1608 		if (!*val)
1609 			continue;
1610 
1611 		if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1612 			if (has_se || ucode_idx)
1613 				goto err_print;
1614 			tmp = strsep(&val, ":");
1615 			if (!tmp)
1616 				goto err_print;
1617 			tmp = strim(tmp);
1618 			if (!val)
1619 				goto err_print;
1620 			if (strlen(tmp) != 2)
1621 				goto err_print;
1622 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1623 				goto err_print;
1624 			engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
1625 			has_se = true;
1626 		} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1627 			if (has_ae || ucode_idx)
1628 				goto err_print;
1629 			tmp = strsep(&val, ":");
1630 			if (!tmp)
1631 				goto err_print;
1632 			tmp = strim(tmp);
1633 			if (!val)
1634 				goto err_print;
1635 			if (strlen(tmp) != 2)
1636 				goto err_print;
1637 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1638 				goto err_print;
1639 			engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
1640 			has_ae = true;
1641 		} else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
1642 			if (has_ie || ucode_idx)
1643 				goto err_print;
1644 			tmp = strsep(&val, ":");
1645 			if (!tmp)
1646 				goto err_print;
1647 			tmp = strim(tmp);
1648 			if (!val)
1649 				goto err_print;
1650 			if (strlen(tmp) != 2)
1651 				goto err_print;
1652 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1653 				goto err_print;
1654 			engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
1655 			has_ie = true;
1656 		} else {
1657 			if (ucode_idx > 1)
1658 				goto err_print;
1659 			if (!strlen(val))
1660 				goto err_print;
1661 			if (strnstr(val, " ", strlen(val)))
1662 				goto err_print;
1663 			ucode_filename[ucode_idx++] = val;
1664 		}
1665 	}
1666 
1667 	/* Validate input parameters */
1668 	if (!(grp_idx && ucode_idx))
1669 		goto err_print;
1670 
1671 	if (ucode_idx > 1 && grp_idx < 2)
1672 		goto err_print;
1673 
1674 	if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
1675 		err_msg = "Error max 2 engine types can be attached";
1676 		goto err_print;
1677 	}
1678 
1679 	if (grp_idx > 1) {
1680 		if ((engs[0].type + engs[1].type) !=
1681 		    (OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
1682 			err_msg = "Only combination of SE+IE engines is allowed";
1683 			goto err_print;
1684 		}
1685 		/* Keep SE engines at zero index */
1686 		if (engs[1].type == OTX2_CPT_SE_TYPES)
1687 			swap(engs[0], engs[1]);
1688 	}
1689 	mutex_lock(&eng_grps->lock);
1690 
1691 	if (cptpf->enabled_vfs) {
1692 		dev_err(dev, "Disable VFs before modifying engine groups\n");
1693 		ret = -EACCES;
1694 		goto err_unlock;
1695 	}
1696 	INIT_LIST_HEAD(&fw_info.ucodes);
1697 
1698 	ret = load_fw(dev, &fw_info, ucode_filename[0], eng_grps->rid);
1699 	if (ret) {
1700 		dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
1701 		goto err_unlock;
1702 	}
1703 	if (ucode_idx > 1) {
1704 		ret = load_fw(dev, &fw_info, ucode_filename[1], eng_grps->rid);
1705 		if (ret) {
1706 			dev_err(dev, "Unable to load firmware %s\n",
1707 				ucode_filename[1]);
1708 			goto release_fw;
1709 		}
1710 	}
1711 	uc_info[0] = get_ucode(&fw_info, engs[0].type);
1712 	if (uc_info[0] == NULL) {
1713 		dev_err(dev, "Unable to find firmware for %s\n",
1714 			get_eng_type_str(engs[0].type));
1715 		ret = -EINVAL;
1716 		goto release_fw;
1717 	}
1718 	if (ucode_idx > 1) {
1719 		uc_info[1] = get_ucode(&fw_info, engs[1].type);
1720 		if (uc_info[1] == NULL) {
1721 			dev_err(dev, "Unable to find firmware for %s\n",
1722 				get_eng_type_str(engs[1].type));
1723 			ret = -EINVAL;
1724 			goto release_fw;
1725 		}
1726 	}
1727 	ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1728 				  (void **)uc_info, 1);
1729 
1730 release_fw:
1731 	cpt_ucode_release_fw(&fw_info);
1732 err_unlock:
1733 	mutex_unlock(&eng_grps->lock);
1734 	return ret;
1735 err_print:
1736 	dev_err(dev, "%s\n", err_msg);
1737 	return ret;
1738 }
1739 
otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev * cptpf,struct devlink_param_gset_ctx * ctx)1740 int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
1741 				   struct devlink_param_gset_ctx *ctx)
1742 {
1743 	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1744 	struct device *dev = &cptpf->pdev->dev;
1745 	char *tmp, *err_msg;
1746 	int egrp;
1747 	int ret;
1748 
1749 	err_msg = "Invalid input string format(ex: egrp:0)";
1750 	if (strncasecmp(ctx->val.vstr, "egrp", 4))
1751 		goto err_print;
1752 	tmp = ctx->val.vstr;
1753 	strsep(&tmp, ":");
1754 	if (!tmp)
1755 		goto err_print;
1756 	if (kstrtoint(tmp, 10, &egrp))
1757 		goto err_print;
1758 
1759 	if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
1760 		dev_err(dev, "Invalid engine group %d", egrp);
1761 		return -EINVAL;
1762 	}
1763 	if (!eng_grps->grp[egrp].is_enabled) {
1764 		dev_err(dev, "Error engine_group%d is not configured", egrp);
1765 		return -EINVAL;
1766 	}
1767 	mutex_lock(&eng_grps->lock);
1768 	ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
1769 	mutex_unlock(&eng_grps->lock);
1770 
1771 	return ret;
1772 
1773 err_print:
1774 	dev_err(dev, "%s\n", err_msg);
1775 	return -EINVAL;
1776 }
1777 
get_engs_info(struct otx2_cpt_eng_grp_info * eng_grp,char * buf,int size,int idx)1778 static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf,
1779 			  int size, int idx)
1780 {
1781 	struct otx2_cpt_engs_rsvd *mirrored_engs = NULL;
1782 	struct otx2_cpt_engs_rsvd *engs;
1783 	int len, i;
1784 
1785 	buf[0] = '\0';
1786 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
1787 		engs = &eng_grp->engs[i];
1788 		if (!engs->type)
1789 			continue;
1790 		if (idx != -1 && idx != i)
1791 			continue;
1792 
1793 		if (eng_grp->mirror.is_ena)
1794 			mirrored_engs = find_engines_by_type(
1795 				&eng_grp->g->grp[eng_grp->mirror.idx],
1796 				engs->type);
1797 		if (i > 0 && idx == -1) {
1798 			len = strlen(buf);
1799 			scnprintf(buf + len, size - len, ", ");
1800 		}
1801 
1802 		len = strlen(buf);
1803 		scnprintf(buf + len, size - len, "%d %s ",
1804 			  mirrored_engs ? engs->count + mirrored_engs->count :
1805 					  engs->count,
1806 			  get_eng_type_str(engs->type));
1807 		if (mirrored_engs) {
1808 			len = strlen(buf);
1809 			scnprintf(buf + len, size - len,
1810 				  "(%d shared with engine_group%d) ",
1811 				  engs->count <= 0 ?
1812 					  engs->count + mirrored_engs->count :
1813 					  mirrored_engs->count,
1814 				  eng_grp->mirror.idx);
1815 		}
1816 	}
1817 }
1818 
otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev * cptpf)1819 void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
1820 {
1821 	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1822 	struct otx2_cpt_eng_grp_info *mirrored_grp;
1823 	char engs_info[2 * OTX2_CPT_NAME_LENGTH];
1824 	struct otx2_cpt_eng_grp_info *grp;
1825 	struct otx2_cpt_engs_rsvd *engs;
1826 	int i, j;
1827 
1828 	pr_debug("Engine groups global info");
1829 	pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt,
1830 		 eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt);
1831 	pr_debug("free SE %d", eng_grps->avail.se_cnt);
1832 	pr_debug("free IE %d", eng_grps->avail.ie_cnt);
1833 	pr_debug("free AE %d", eng_grps->avail.ae_cnt);
1834 
1835 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1836 		grp = &eng_grps->grp[i];
1837 		pr_debug("engine_group%d, state %s", i,
1838 			 grp->is_enabled ? "enabled" : "disabled");
1839 		if (grp->is_enabled) {
1840 			mirrored_grp = &eng_grps->grp[grp->mirror.idx];
1841 			pr_debug("Ucode0 filename %s, version %s",
1842 				 grp->mirror.is_ena ?
1843 					 mirrored_grp->ucode[0].filename :
1844 					 grp->ucode[0].filename,
1845 				 grp->mirror.is_ena ?
1846 					 mirrored_grp->ucode[0].ver_str :
1847 					 grp->ucode[0].ver_str);
1848 			if (is_2nd_ucode_used(grp))
1849 				pr_debug("Ucode1 filename %s, version %s",
1850 					 grp->ucode[1].filename,
1851 					 grp->ucode[1].ver_str);
1852 		}
1853 
1854 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1855 			engs = &grp->engs[j];
1856 			if (engs->type) {
1857 				u32 mask[5] = { };
1858 
1859 				get_engs_info(grp, engs_info,
1860 					      2 * OTX2_CPT_NAME_LENGTH, j);
1861 				pr_debug("Slot%d: %s", j, engs_info);
1862 				bitmap_to_arr32(mask, engs->bmap,
1863 						eng_grps->engs_num);
1864 				if (is_dev_otx2(cptpf->pdev))
1865 					pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
1866 						 mask[3], mask[2], mask[1],
1867 						 mask[0]);
1868 				else
1869 					pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x",
1870 						 mask[4], mask[3], mask[2], mask[1],
1871 						 mask[0]);
1872 			}
1873 		}
1874 	}
1875 }
1876