1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3
4 #include <linux/ctype.h>
5 #include <linux/firmware.h>
6 #include <linux/string.h>
7 #include <linux/string_choices.h>
8 #include "otx2_cptpf_ucode.h"
9 #include "otx2_cpt_common.h"
10 #include "otx2_cptpf.h"
11 #include "otx2_cptlf.h"
12 #include "otx2_cpt_reqmgr.h"
13 #include "rvu_reg.h"
14
15 #define CSR_DELAY 30
16
17 #define LOADFVC_RLEN 8
18 #define LOADFVC_MAJOR_OP 0x01
19 #define LOADFVC_MINOR_OP 0x08
20
21 /*
22 * Interval to flush dirty data for next CTX entry. The interval is measured
23 * in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns).
24 */
25 #define CTX_FLUSH_TIMER_CNT 0x2FAF0
26
27 struct fw_info_t {
28 struct list_head ucodes;
29 };
30
get_cores_bmap(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)31 static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
32 struct otx2_cpt_eng_grp_info *eng_grp)
33 {
34 struct otx2_cpt_bitmap bmap = { {0} };
35 bool found = false;
36 int i;
37
38 if (eng_grp->g->engs_num < 0 ||
39 eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
40 dev_err(dev, "unsupported number of engines %d on octeontx2\n",
41 eng_grp->g->engs_num);
42 return bmap;
43 }
44
45 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
46 if (eng_grp->engs[i].type) {
47 bitmap_or(bmap.bits, bmap.bits,
48 eng_grp->engs[i].bmap,
49 eng_grp->g->engs_num);
50 bmap.size = eng_grp->g->engs_num;
51 found = true;
52 }
53 }
54
55 if (!found)
56 dev_err(dev, "No engines reserved for engine group %d\n",
57 eng_grp->idx);
58 return bmap;
59 }
60
is_eng_type(int val,int eng_type)61 static int is_eng_type(int val, int eng_type)
62 {
63 return val & (1 << eng_type);
64 }
65
is_2nd_ucode_used(struct otx2_cpt_eng_grp_info * eng_grp)66 static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
67 {
68 if (eng_grp->ucode[1].type)
69 return true;
70 else
71 return false;
72 }
73
set_ucode_filename(struct otx2_cpt_ucode * ucode,const char * filename)74 static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
75 const char *filename)
76 {
77 strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
78 }
79
get_eng_type_str(int eng_type)80 static char *get_eng_type_str(int eng_type)
81 {
82 char *str = "unknown";
83
84 switch (eng_type) {
85 case OTX2_CPT_SE_TYPES:
86 str = "SE";
87 break;
88
89 case OTX2_CPT_IE_TYPES:
90 str = "IE";
91 break;
92
93 case OTX2_CPT_AE_TYPES:
94 str = "AE";
95 break;
96 }
97 return str;
98 }
99
get_ucode_type_str(int ucode_type)100 static char *get_ucode_type_str(int ucode_type)
101 {
102 char *str = "unknown";
103
104 switch (ucode_type) {
105 case (1 << OTX2_CPT_SE_TYPES):
106 str = "SE";
107 break;
108
109 case (1 << OTX2_CPT_IE_TYPES):
110 str = "IE";
111 break;
112
113 case (1 << OTX2_CPT_AE_TYPES):
114 str = "AE";
115 break;
116
117 case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
118 str = "SE+IPSEC";
119 break;
120 }
121 return str;
122 }
123
get_ucode_type(struct device * dev,struct otx2_cpt_ucode_hdr * ucode_hdr,int * ucode_type,u16 rid)124 static int get_ucode_type(struct device *dev,
125 struct otx2_cpt_ucode_hdr *ucode_hdr,
126 int *ucode_type, u16 rid)
127 {
128 char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
129 char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
130 int i, val = 0;
131 u8 nn;
132
133 strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
134 for (i = 0; i < strlen(tmp_ver_str); i++)
135 tmp_ver_str[i] = tolower(tmp_ver_str[i]);
136
137 sprintf(ver_str_prefix, "ocpt-%02d", rid);
138 if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
139 return -EINVAL;
140
141 nn = ucode_hdr->ver_num.nn;
142 if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
143 (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
144 nn == OTX2_CPT_SE_UC_TYPE3))
145 val |= 1 << OTX2_CPT_SE_TYPES;
146 if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
147 (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
148 nn == OTX2_CPT_IE_UC_TYPE3))
149 val |= 1 << OTX2_CPT_IE_TYPES;
150 if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
151 nn == OTX2_CPT_AE_UC_TYPE)
152 val |= 1 << OTX2_CPT_AE_TYPES;
153
154 *ucode_type = val;
155
156 if (!val)
157 return -EINVAL;
158
159 return 0;
160 }
161
__write_ucode_base(struct otx2_cptpf_dev * cptpf,int eng,dma_addr_t dma_addr,int blkaddr)162 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
163 dma_addr_t dma_addr, int blkaddr)
164 {
165 return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
166 CPT_AF_EXEX_UCODE_BASE(eng),
167 (u64)dma_addr, blkaddr);
168 }
169
cptx_set_ucode_base(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,int blkaddr)170 static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
171 struct otx2_cptpf_dev *cptpf, int blkaddr)
172 {
173 struct otx2_cpt_engs_rsvd *engs;
174 dma_addr_t dma_addr;
175 int i, bit, ret;
176
177 /* Set PF number for microcode fetches */
178 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
179 CPT_AF_PF_FUNC,
180 rvu_make_pcifunc(cptpf->pdev,
181 cptpf->pf_id, 0),
182 blkaddr);
183 if (ret)
184 return ret;
185
186 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
187 engs = &eng_grp->engs[i];
188 if (!engs->type)
189 continue;
190
191 dma_addr = engs->ucode->dma;
192
193 /*
194 * Set UCODE_BASE only for the cores which are not used,
195 * other cores should have already valid UCODE_BASE set
196 */
197 for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
198 if (!eng_grp->g->eng_ref_cnt[bit]) {
199 ret = __write_ucode_base(cptpf, bit, dma_addr,
200 blkaddr);
201 if (ret)
202 return ret;
203 }
204 }
205 return 0;
206 }
207
cpt_set_ucode_base(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)208 static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
209 {
210 struct otx2_cptpf_dev *cptpf = obj;
211 int ret;
212
213 if (cptpf->has_cpt1) {
214 ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
215 if (ret)
216 return ret;
217 }
218 return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
219 }
220
cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,struct otx2_cpt_bitmap bmap,int blkaddr)221 static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
222 struct otx2_cptpf_dev *cptpf,
223 struct otx2_cpt_bitmap bmap,
224 int blkaddr)
225 {
226 int i, timeout = 10;
227 int busy, ret;
228 u64 reg = 0;
229
230 /* Detach the cores from group */
231 for_each_set_bit(i, bmap.bits, bmap.size) {
232 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
233 CPT_AF_EXEX_CTL2(i), ®, blkaddr);
234 if (ret)
235 return ret;
236
237 if (reg & (1ull << eng_grp->idx)) {
238 eng_grp->g->eng_ref_cnt[i]--;
239 reg &= ~(1ull << eng_grp->idx);
240
241 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
242 cptpf->pdev,
243 CPT_AF_EXEX_CTL2(i), reg,
244 blkaddr);
245 if (ret)
246 return ret;
247 }
248 }
249
250 /* Wait for cores to become idle */
251 do {
252 busy = 0;
253 usleep_range(10000, 20000);
254 if (timeout-- < 0)
255 return -EBUSY;
256
257 for_each_set_bit(i, bmap.bits, bmap.size) {
258 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
259 cptpf->pdev,
260 CPT_AF_EXEX_STS(i), ®,
261 blkaddr);
262 if (ret)
263 return ret;
264
265 if (reg & 0x1) {
266 busy = 1;
267 break;
268 }
269 }
270 } while (busy);
271
272 /* Disable the cores only if they are not used anymore */
273 for_each_set_bit(i, bmap.bits, bmap.size) {
274 if (!eng_grp->g->eng_ref_cnt[i]) {
275 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
276 cptpf->pdev,
277 CPT_AF_EXEX_CTL(i), 0x0,
278 blkaddr);
279 if (ret)
280 return ret;
281 }
282 }
283
284 return 0;
285 }
286
cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)287 static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
288 void *obj)
289 {
290 struct otx2_cptpf_dev *cptpf = obj;
291 struct otx2_cpt_bitmap bmap;
292 int ret;
293
294 bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
295 if (!bmap.size)
296 return -EINVAL;
297
298 if (cptpf->has_cpt1) {
299 ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
300 BLKADDR_CPT1);
301 if (ret)
302 return ret;
303 }
304 return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
305 BLKADDR_CPT0);
306 }
307
cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,struct otx2_cpt_bitmap bmap,int blkaddr)308 static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
309 struct otx2_cptpf_dev *cptpf,
310 struct otx2_cpt_bitmap bmap,
311 int blkaddr)
312 {
313 u64 reg = 0;
314 int i, ret;
315
316 /* Attach the cores to the group */
317 for_each_set_bit(i, bmap.bits, bmap.size) {
318 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
319 CPT_AF_EXEX_CTL2(i), ®, blkaddr);
320 if (ret)
321 return ret;
322
323 if (!(reg & (1ull << eng_grp->idx))) {
324 eng_grp->g->eng_ref_cnt[i]++;
325 reg |= 1ull << eng_grp->idx;
326
327 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
328 cptpf->pdev,
329 CPT_AF_EXEX_CTL2(i), reg,
330 blkaddr);
331 if (ret)
332 return ret;
333 }
334 }
335
336 /* Enable the cores */
337 for_each_set_bit(i, bmap.bits, bmap.size) {
338 ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
339 CPT_AF_EXEX_CTL(i), 0x1,
340 blkaddr);
341 if (ret)
342 return ret;
343 }
344 return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
345 }
346
cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)347 static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
348 void *obj)
349 {
350 struct otx2_cptpf_dev *cptpf = obj;
351 struct otx2_cpt_bitmap bmap;
352 int ret;
353
354 bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
355 if (!bmap.size)
356 return -EINVAL;
357
358 if (cptpf->has_cpt1) {
359 ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
360 BLKADDR_CPT1);
361 if (ret)
362 return ret;
363 }
364 return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
365 }
366
load_fw(struct device * dev,struct fw_info_t * fw_info,char * filename,u16 rid)367 static int load_fw(struct device *dev, struct fw_info_t *fw_info,
368 char *filename, u16 rid)
369 {
370 struct otx2_cpt_ucode_hdr *ucode_hdr;
371 struct otx2_cpt_uc_info_t *uc_info;
372 int ucode_type, ucode_size;
373 int ret;
374
375 uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
376 if (!uc_info)
377 return -ENOMEM;
378
379 ret = request_firmware(&uc_info->fw, filename, dev);
380 if (ret)
381 goto free_uc_info;
382
383 ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
384 ret = get_ucode_type(dev, ucode_hdr, &ucode_type, rid);
385 if (ret)
386 goto release_fw;
387
388 ucode_size = ntohl(ucode_hdr->code_length) * 2;
389 if (!ucode_size) {
390 dev_err(dev, "Ucode %s invalid size\n", filename);
391 ret = -EINVAL;
392 goto release_fw;
393 }
394
395 set_ucode_filename(&uc_info->ucode, filename);
396 memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
397 OTX2_CPT_UCODE_VER_STR_SZ);
398 uc_info->ucode.ver_str[OTX2_CPT_UCODE_VER_STR_SZ] = 0;
399 uc_info->ucode.ver_num = ucode_hdr->ver_num;
400 uc_info->ucode.type = ucode_type;
401 uc_info->ucode.size = ucode_size;
402 list_add_tail(&uc_info->list, &fw_info->ucodes);
403
404 return 0;
405
406 release_fw:
407 release_firmware(uc_info->fw);
408 free_uc_info:
409 kfree(uc_info);
410 return ret;
411 }
412
cpt_ucode_release_fw(struct fw_info_t * fw_info)413 static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
414 {
415 struct otx2_cpt_uc_info_t *curr, *temp;
416
417 if (!fw_info)
418 return;
419
420 list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
421 list_del(&curr->list);
422 release_firmware(curr->fw);
423 kfree(curr);
424 }
425 }
426
get_ucode(struct fw_info_t * fw_info,int ucode_type)427 static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
428 int ucode_type)
429 {
430 struct otx2_cpt_uc_info_t *curr;
431
432 list_for_each_entry(curr, &fw_info->ucodes, list) {
433 if (!is_eng_type(curr->ucode.type, ucode_type))
434 continue;
435
436 return curr;
437 }
438 return NULL;
439 }
440
print_uc_info(struct fw_info_t * fw_info)441 static void print_uc_info(struct fw_info_t *fw_info)
442 {
443 struct otx2_cpt_uc_info_t *curr;
444
445 list_for_each_entry(curr, &fw_info->ucodes, list) {
446 pr_debug("Ucode filename %s\n", curr->ucode.filename);
447 pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
448 pr_debug("Ucode version %d.%d.%d.%d\n",
449 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
450 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
451 pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
452 get_ucode_type_str(curr->ucode.type));
453 pr_debug("Ucode size %d\n", curr->ucode.size);
454 pr_debug("Ucode ptr %p\n", curr->fw->data);
455 }
456 }
457
cpt_ucode_load_fw(struct pci_dev * pdev,struct fw_info_t * fw_info,u16 rid)458 static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,
459 u16 rid)
460 {
461 char filename[OTX2_CPT_NAME_LENGTH];
462 char eng_type[8];
463 int ret, e, i;
464
465 INIT_LIST_HEAD(&fw_info->ucodes);
466
467 for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
468 strscpy(eng_type, get_eng_type_str(e));
469 for (i = 0; i < strlen(eng_type); i++)
470 eng_type[i] = tolower(eng_type[i]);
471
472 snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
473 rid, eng_type);
474 /* Request firmware for each engine type */
475 ret = load_fw(&pdev->dev, fw_info, filename, rid);
476 if (ret)
477 goto release_fw;
478 }
479 print_uc_info(fw_info);
480 return 0;
481
482 release_fw:
483 cpt_ucode_release_fw(fw_info);
484 return ret;
485 }
486
find_engines_by_type(struct otx2_cpt_eng_grp_info * eng_grp,int eng_type)487 struct otx2_cpt_engs_rsvd *find_engines_by_type(
488 struct otx2_cpt_eng_grp_info *eng_grp,
489 int eng_type)
490 {
491 int i;
492
493 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
494 if (!eng_grp->engs[i].type)
495 continue;
496
497 if (eng_grp->engs[i].type == eng_type)
498 return &eng_grp->engs[i];
499 }
500 return NULL;
501 }
502
eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info * eng_grp,int eng_type)503 static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
504 int eng_type)
505 {
506 struct otx2_cpt_engs_rsvd *engs;
507
508 engs = find_engines_by_type(eng_grp, eng_type);
509
510 return (engs != NULL ? 1 : 0);
511 }
512
update_engines_avail_count(struct device * dev,struct otx2_cpt_engs_available * avail,struct otx2_cpt_engs_rsvd * engs,int val)513 static int update_engines_avail_count(struct device *dev,
514 struct otx2_cpt_engs_available *avail,
515 struct otx2_cpt_engs_rsvd *engs, int val)
516 {
517 switch (engs->type) {
518 case OTX2_CPT_SE_TYPES:
519 avail->se_cnt += val;
520 break;
521
522 case OTX2_CPT_IE_TYPES:
523 avail->ie_cnt += val;
524 break;
525
526 case OTX2_CPT_AE_TYPES:
527 avail->ae_cnt += val;
528 break;
529
530 default:
531 dev_err(dev, "Invalid engine type %d\n", engs->type);
532 return -EINVAL;
533 }
534 return 0;
535 }
536
update_engines_offset(struct device * dev,struct otx2_cpt_engs_available * avail,struct otx2_cpt_engs_rsvd * engs)537 static int update_engines_offset(struct device *dev,
538 struct otx2_cpt_engs_available *avail,
539 struct otx2_cpt_engs_rsvd *engs)
540 {
541 switch (engs->type) {
542 case OTX2_CPT_SE_TYPES:
543 engs->offset = 0;
544 break;
545
546 case OTX2_CPT_IE_TYPES:
547 engs->offset = avail->max_se_cnt;
548 break;
549
550 case OTX2_CPT_AE_TYPES:
551 engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
552 break;
553
554 default:
555 dev_err(dev, "Invalid engine type %d\n", engs->type);
556 return -EINVAL;
557 }
558 return 0;
559 }
560
release_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp)561 static int release_engines(struct device *dev,
562 struct otx2_cpt_eng_grp_info *grp)
563 {
564 int i, ret = 0;
565
566 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
567 if (!grp->engs[i].type)
568 continue;
569
570 if (grp->engs[i].count > 0) {
571 ret = update_engines_avail_count(dev, &grp->g->avail,
572 &grp->engs[i],
573 grp->engs[i].count);
574 if (ret)
575 return ret;
576 }
577
578 grp->engs[i].type = 0;
579 grp->engs[i].count = 0;
580 grp->engs[i].offset = 0;
581 grp->engs[i].ucode = NULL;
582 bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
583 }
584 return 0;
585 }
586
do_reserve_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_engs)587 static int do_reserve_engines(struct device *dev,
588 struct otx2_cpt_eng_grp_info *grp,
589 struct otx2_cpt_engines *req_engs)
590 {
591 struct otx2_cpt_engs_rsvd *engs = NULL;
592 int i, ret;
593
594 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
595 if (!grp->engs[i].type) {
596 engs = &grp->engs[i];
597 break;
598 }
599 }
600
601 if (!engs)
602 return -ENOMEM;
603
604 engs->type = req_engs->type;
605 engs->count = req_engs->count;
606
607 ret = update_engines_offset(dev, &grp->g->avail, engs);
608 if (ret)
609 return ret;
610
611 if (engs->count > 0) {
612 ret = update_engines_avail_count(dev, &grp->g->avail, engs,
613 -engs->count);
614 if (ret)
615 return ret;
616 }
617
618 return 0;
619 }
620
check_engines_availability(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_eng)621 static int check_engines_availability(struct device *dev,
622 struct otx2_cpt_eng_grp_info *grp,
623 struct otx2_cpt_engines *req_eng)
624 {
625 int avail_cnt = 0;
626
627 switch (req_eng->type) {
628 case OTX2_CPT_SE_TYPES:
629 avail_cnt = grp->g->avail.se_cnt;
630 break;
631
632 case OTX2_CPT_IE_TYPES:
633 avail_cnt = grp->g->avail.ie_cnt;
634 break;
635
636 case OTX2_CPT_AE_TYPES:
637 avail_cnt = grp->g->avail.ae_cnt;
638 break;
639
640 default:
641 dev_err(dev, "Invalid engine type %d\n", req_eng->type);
642 return -EINVAL;
643 }
644
645 if (avail_cnt < req_eng->count) {
646 dev_err(dev,
647 "Error available %s engines %d < than requested %d\n",
648 get_eng_type_str(req_eng->type),
649 avail_cnt, req_eng->count);
650 return -EBUSY;
651 }
652 return 0;
653 }
654
reserve_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_engs,int ucodes_cnt)655 static int reserve_engines(struct device *dev,
656 struct otx2_cpt_eng_grp_info *grp,
657 struct otx2_cpt_engines *req_engs, int ucodes_cnt)
658 {
659 int i, ret = 0;
660
661 /* Validate if a number of requested engines are available */
662 for (i = 0; i < ucodes_cnt; i++) {
663 ret = check_engines_availability(dev, grp, &req_engs[i]);
664 if (ret)
665 return ret;
666 }
667
668 /* Reserve requested engines for this engine group */
669 for (i = 0; i < ucodes_cnt; i++) {
670 ret = do_reserve_engines(dev, grp, &req_engs[i]);
671 if (ret)
672 return ret;
673 }
674 return 0;
675 }
676
ucode_unload(struct device * dev,struct otx2_cpt_ucode * ucode)677 static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
678 {
679 if (ucode->va) {
680 dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
681 ucode->dma);
682 ucode->va = NULL;
683 ucode->dma = 0;
684 ucode->size = 0;
685 }
686
687 memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
688 memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
689 set_ucode_filename(ucode, "");
690 ucode->type = 0;
691 }
692
copy_ucode_to_dma_mem(struct device * dev,struct otx2_cpt_ucode * ucode,const u8 * ucode_data)693 static int copy_ucode_to_dma_mem(struct device *dev,
694 struct otx2_cpt_ucode *ucode,
695 const u8 *ucode_data)
696 {
697 u32 i;
698
699 /* Allocate DMAable space */
700 ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
701 GFP_KERNEL);
702 if (!ucode->va)
703 return -ENOMEM;
704
705 memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
706 ucode->size);
707
708 /* Byte swap 64-bit */
709 for (i = 0; i < (ucode->size / 8); i++)
710 cpu_to_be64s(&((u64 *)ucode->va)[i]);
711 /* Ucode needs 16-bit swap */
712 for (i = 0; i < (ucode->size / 2); i++)
713 cpu_to_be16s(&((u16 *)ucode->va)[i]);
714 return 0;
715 }
716
enable_eng_grp(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)717 static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
718 void *obj)
719 {
720 int ret;
721
722 /* Point microcode to each core of the group */
723 ret = cpt_set_ucode_base(eng_grp, obj);
724 if (ret)
725 return ret;
726
727 /* Attach the cores to the group and enable them */
728 ret = cpt_attach_and_enable_cores(eng_grp, obj);
729
730 return ret;
731 }
732
disable_eng_grp(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp,void * obj)733 static int disable_eng_grp(struct device *dev,
734 struct otx2_cpt_eng_grp_info *eng_grp,
735 void *obj)
736 {
737 int i, ret;
738
739 /* Disable all engines used by this group */
740 ret = cpt_detach_and_disable_cores(eng_grp, obj);
741 if (ret)
742 return ret;
743
744 /* Unload ucode used by this engine group */
745 ucode_unload(dev, &eng_grp->ucode[0]);
746 ucode_unload(dev, &eng_grp->ucode[1]);
747
748 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
749 if (!eng_grp->engs[i].type)
750 continue;
751
752 eng_grp->engs[i].ucode = &eng_grp->ucode[0];
753 }
754
755 /* Clear UCODE_BASE register for each engine used by this group */
756 ret = cpt_set_ucode_base(eng_grp, obj);
757
758 return ret;
759 }
760
setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info * dst_grp,struct otx2_cpt_eng_grp_info * src_grp)761 static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
762 struct otx2_cpt_eng_grp_info *src_grp)
763 {
764 /* Setup fields for engine group which is mirrored */
765 src_grp->mirror.is_ena = false;
766 src_grp->mirror.idx = 0;
767 src_grp->mirror.ref_count++;
768
769 /* Setup fields for mirroring engine group */
770 dst_grp->mirror.is_ena = true;
771 dst_grp->mirror.idx = src_grp->idx;
772 dst_grp->mirror.ref_count = 0;
773 }
774
remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info * dst_grp)775 static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
776 {
777 struct otx2_cpt_eng_grp_info *src_grp;
778
779 if (!dst_grp->mirror.is_ena)
780 return;
781
782 src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
783
784 src_grp->mirror.ref_count--;
785 dst_grp->mirror.is_ena = false;
786 dst_grp->mirror.idx = 0;
787 dst_grp->mirror.ref_count = 0;
788 }
789
update_requested_engs(struct otx2_cpt_eng_grp_info * mirror_eng_grp,struct otx2_cpt_engines * engs,int engs_cnt)790 static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
791 struct otx2_cpt_engines *engs, int engs_cnt)
792 {
793 struct otx2_cpt_engs_rsvd *mirrored_engs;
794 int i;
795
796 for (i = 0; i < engs_cnt; i++) {
797 mirrored_engs = find_engines_by_type(mirror_eng_grp,
798 engs[i].type);
799 if (!mirrored_engs)
800 continue;
801
802 /*
803 * If mirrored group has this type of engines attached then
804 * there are 3 scenarios possible:
805 * 1) mirrored_engs.count == engs[i].count then all engines
806 * from mirrored engine group will be shared with this engine
807 * group
808 * 2) mirrored_engs.count > engs[i].count then only a subset of
809 * engines from mirrored engine group will be shared with this
810 * engine group
811 * 3) mirrored_engs.count < engs[i].count then all engines
812 * from mirrored engine group will be shared with this group
813 * and additional engines will be reserved for exclusively use
814 * by this engine group
815 */
816 engs[i].count -= mirrored_engs->count;
817 }
818 }
819
find_mirrored_eng_grp(struct otx2_cpt_eng_grp_info * grp)820 static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
821 struct otx2_cpt_eng_grp_info *grp)
822 {
823 struct otx2_cpt_eng_grps *eng_grps = grp->g;
824 int i;
825
826 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
827 if (!eng_grps->grp[i].is_enabled)
828 continue;
829 if (eng_grps->grp[i].ucode[0].type &&
830 eng_grps->grp[i].ucode[1].type)
831 continue;
832 if (grp->idx == i)
833 continue;
834 if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
835 grp->ucode[0].ver_str,
836 OTX2_CPT_UCODE_VER_STR_SZ))
837 return &eng_grps->grp[i];
838 }
839
840 return NULL;
841 }
842
find_unused_eng_grp(struct otx2_cpt_eng_grps * eng_grps)843 static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
844 struct otx2_cpt_eng_grps *eng_grps)
845 {
846 int i;
847
848 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
849 if (!eng_grps->grp[i].is_enabled)
850 return &eng_grps->grp[i];
851 }
852 return NULL;
853 }
854
eng_grp_update_masks(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)855 static int eng_grp_update_masks(struct device *dev,
856 struct otx2_cpt_eng_grp_info *eng_grp)
857 {
858 struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
859 struct otx2_cpt_bitmap tmp_bmap = { {0} };
860 int i, j, cnt, max_cnt;
861 int bit;
862
863 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
864 engs = &eng_grp->engs[i];
865 if (!engs->type)
866 continue;
867 if (engs->count <= 0)
868 continue;
869
870 switch (engs->type) {
871 case OTX2_CPT_SE_TYPES:
872 max_cnt = eng_grp->g->avail.max_se_cnt;
873 break;
874
875 case OTX2_CPT_IE_TYPES:
876 max_cnt = eng_grp->g->avail.max_ie_cnt;
877 break;
878
879 case OTX2_CPT_AE_TYPES:
880 max_cnt = eng_grp->g->avail.max_ae_cnt;
881 break;
882
883 default:
884 dev_err(dev, "Invalid engine type %d\n", engs->type);
885 return -EINVAL;
886 }
887
888 cnt = engs->count;
889 WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
890 bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
891 for (j = engs->offset; j < engs->offset + max_cnt; j++) {
892 if (!eng_grp->g->eng_ref_cnt[j]) {
893 bitmap_set(tmp_bmap.bits, j, 1);
894 cnt--;
895 if (!cnt)
896 break;
897 }
898 }
899
900 if (cnt)
901 return -ENOSPC;
902
903 bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
904 }
905
906 if (!eng_grp->mirror.is_ena)
907 return 0;
908
909 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
910 engs = &eng_grp->engs[i];
911 if (!engs->type)
912 continue;
913
914 mirrored_engs = find_engines_by_type(
915 &eng_grp->g->grp[eng_grp->mirror.idx],
916 engs->type);
917 WARN_ON(!mirrored_engs && engs->count <= 0);
918 if (!mirrored_engs)
919 continue;
920
921 bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
922 eng_grp->g->engs_num);
923 if (engs->count < 0) {
924 bit = find_first_bit(mirrored_engs->bmap,
925 eng_grp->g->engs_num);
926 bitmap_clear(tmp_bmap.bits, bit, -engs->count);
927 }
928 bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
929 eng_grp->g->engs_num);
930 }
931 return 0;
932 }
933
delete_engine_group(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)934 static int delete_engine_group(struct device *dev,
935 struct otx2_cpt_eng_grp_info *eng_grp)
936 {
937 int ret;
938
939 if (!eng_grp->is_enabled)
940 return 0;
941
942 if (eng_grp->mirror.ref_count)
943 return -EINVAL;
944
945 /* Removing engine group mirroring if enabled */
946 remove_eng_grp_mirroring(eng_grp);
947
948 /* Disable engine group */
949 ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
950 if (ret)
951 return ret;
952
953 /* Release all engines held by this engine group */
954 ret = release_engines(dev, eng_grp);
955 if (ret)
956 return ret;
957
958 eng_grp->is_enabled = false;
959
960 return 0;
961 }
962
update_ucode_ptrs(struct otx2_cpt_eng_grp_info * eng_grp)963 static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
964 {
965 struct otx2_cpt_ucode *ucode;
966
967 if (eng_grp->mirror.is_ena)
968 ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
969 else
970 ucode = &eng_grp->ucode[0];
971 WARN_ON(!eng_grp->engs[0].type);
972 eng_grp->engs[0].ucode = ucode;
973
974 if (eng_grp->engs[1].type) {
975 if (is_2nd_ucode_used(eng_grp))
976 eng_grp->engs[1].ucode = &eng_grp->ucode[1];
977 else
978 eng_grp->engs[1].ucode = ucode;
979 }
980 }
981
create_engine_group(struct device * dev,struct otx2_cpt_eng_grps * eng_grps,struct otx2_cpt_engines * engs,int ucodes_cnt,void * ucode_data[],int is_print)982 static int create_engine_group(struct device *dev,
983 struct otx2_cpt_eng_grps *eng_grps,
984 struct otx2_cpt_engines *engs, int ucodes_cnt,
985 void *ucode_data[], int is_print)
986 {
987 struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
988 struct otx2_cpt_eng_grp_info *eng_grp;
989 struct otx2_cpt_uc_info_t *uc_info;
990 int i, ret = 0;
991
992 /* Find engine group which is not used */
993 eng_grp = find_unused_eng_grp(eng_grps);
994 if (!eng_grp) {
995 dev_err(dev, "Error all engine groups are being used\n");
996 return -ENOSPC;
997 }
998 /* Load ucode */
999 for (i = 0; i < ucodes_cnt; i++) {
1000 uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
1001 eng_grp->ucode[i] = uc_info->ucode;
1002 ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
1003 uc_info->fw->data);
1004 if (ret)
1005 goto unload_ucode;
1006 }
1007
1008 /* Check if this group mirrors another existing engine group */
1009 mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1010 if (mirrored_eng_grp) {
1011 /* Setup mirroring */
1012 setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1013
1014 /*
1015 * Update count of requested engines because some
1016 * of them might be shared with mirrored group
1017 */
1018 update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
1019 }
1020 ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
1021 if (ret)
1022 goto unload_ucode;
1023
1024 /* Update ucode pointers used by engines */
1025 update_ucode_ptrs(eng_grp);
1026
1027 /* Update engine masks used by this group */
1028 ret = eng_grp_update_masks(dev, eng_grp);
1029 if (ret)
1030 goto release_engs;
1031
1032 /* Enable engine group */
1033 ret = enable_eng_grp(eng_grp, eng_grps->obj);
1034 if (ret)
1035 goto release_engs;
1036
1037 /*
1038 * If this engine group mirrors another engine group
1039 * then we need to unload ucode as we will use ucode
1040 * from mirrored engine group
1041 */
1042 if (eng_grp->mirror.is_ena)
1043 ucode_unload(dev, &eng_grp->ucode[0]);
1044
1045 eng_grp->is_enabled = true;
1046
1047 if (!is_print)
1048 return 0;
1049
1050 if (mirrored_eng_grp)
1051 dev_info(dev,
1052 "Engine_group%d: reuse microcode %s from group %d\n",
1053 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1054 mirrored_eng_grp->idx);
1055 else
1056 dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1057 eng_grp->idx, eng_grp->ucode[0].ver_str);
1058 if (is_2nd_ucode_used(eng_grp))
1059 dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1060 eng_grp->idx, eng_grp->ucode[1].ver_str);
1061
1062 return 0;
1063
1064 release_engs:
1065 release_engines(dev, eng_grp);
1066 unload_ucode:
1067 ucode_unload(dev, &eng_grp->ucode[0]);
1068 ucode_unload(dev, &eng_grp->ucode[1]);
1069 return ret;
1070 }
1071
delete_engine_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1072 static void delete_engine_grps(struct pci_dev *pdev,
1073 struct otx2_cpt_eng_grps *eng_grps)
1074 {
1075 int i;
1076
1077 /* First delete all mirroring engine groups */
1078 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1079 if (eng_grps->grp[i].mirror.is_ena)
1080 delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1081
1082 /* Delete remaining engine groups */
1083 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1084 delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1085 }
1086
1087 #define PCI_DEVID_CN10K_RNM 0xA098
1088 #define RNM_ENTROPY_STATUS 0x8
1089
rnm_to_cpt_errata_fixup(struct device * dev)1090 static void rnm_to_cpt_errata_fixup(struct device *dev)
1091 {
1092 struct pci_dev *pdev;
1093 void __iomem *base;
1094 int timeout = 5000;
1095
1096 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
1097 if (!pdev)
1098 return;
1099
1100 base = pci_ioremap_bar(pdev, 0);
1101 if (!base)
1102 goto put_pdev;
1103
1104 while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
1105 cpu_relax();
1106 udelay(1);
1107 timeout--;
1108 if (!timeout) {
1109 dev_warn(dev, "RNM is not producing entropy\n");
1110 break;
1111 }
1112 }
1113
1114 iounmap(base);
1115
1116 put_pdev:
1117 pci_dev_put(pdev);
1118 }
1119
otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps * eng_grps,int eng_type)1120 int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
1121 {
1122
1123 int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
1124 struct otx2_cpt_eng_grp_info *grp;
1125 int i;
1126
1127 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1128 grp = &eng_grps->grp[i];
1129 if (!grp->is_enabled)
1130 continue;
1131
1132 if (eng_type == OTX2_CPT_SE_TYPES) {
1133 if (eng_grp_has_eng_type(grp, eng_type) &&
1134 !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
1135 eng_grp_num = i;
1136 break;
1137 }
1138 } else {
1139 if (eng_grp_has_eng_type(grp, eng_type)) {
1140 eng_grp_num = i;
1141 break;
1142 }
1143 }
1144 }
1145 return eng_grp_num;
1146 }
1147
otx2_cpt_create_eng_grps(struct otx2_cptpf_dev * cptpf,struct otx2_cpt_eng_grps * eng_grps)1148 int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
1149 struct otx2_cpt_eng_grps *eng_grps)
1150 {
1151 struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
1152 struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1153 struct pci_dev *pdev = cptpf->pdev;
1154 struct fw_info_t fw_info;
1155 u64 reg_val;
1156 int ret = 0;
1157
1158 mutex_lock(&eng_grps->lock);
1159 /*
1160 * We don't create engine groups if it was already
1161 * made (when user enabled VFs for the first time)
1162 */
1163 if (eng_grps->is_grps_created)
1164 goto unlock;
1165
1166 ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
1167 if (ret)
1168 goto unlock;
1169
1170 /*
1171 * Create engine group with SE engines for kernel
1172 * crypto functionality (symmetric crypto)
1173 */
1174 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1175 if (uc_info[0] == NULL) {
1176 dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1177 ret = -EINVAL;
1178 goto release_fw;
1179 }
1180 engs[0].type = OTX2_CPT_SE_TYPES;
1181 engs[0].count = eng_grps->avail.max_se_cnt;
1182
1183 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1184 (void **) uc_info, 1);
1185 if (ret)
1186 goto release_fw;
1187
1188 /*
1189 * Create engine group with SE+IE engines for IPSec.
1190 * All SE engines will be shared with engine group 0.
1191 */
1192 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1193 uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1194
1195 if (uc_info[1] == NULL) {
1196 dev_err(&pdev->dev, "Unable to find firmware for IE");
1197 ret = -EINVAL;
1198 goto delete_eng_grp;
1199 }
1200 engs[0].type = OTX2_CPT_SE_TYPES;
1201 engs[0].count = eng_grps->avail.max_se_cnt;
1202 engs[1].type = OTX2_CPT_IE_TYPES;
1203 engs[1].count = eng_grps->avail.max_ie_cnt;
1204
1205 ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
1206 (void **) uc_info, 1);
1207 if (ret)
1208 goto delete_eng_grp;
1209
1210 /*
1211 * Create engine group with AE engines for asymmetric
1212 * crypto functionality.
1213 */
1214 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1215 if (uc_info[0] == NULL) {
1216 dev_err(&pdev->dev, "Unable to find firmware for AE");
1217 ret = -EINVAL;
1218 goto delete_eng_grp;
1219 }
1220 engs[0].type = OTX2_CPT_AE_TYPES;
1221 engs[0].count = eng_grps->avail.max_ae_cnt;
1222
1223 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1224 (void **) uc_info, 1);
1225 if (ret)
1226 goto delete_eng_grp;
1227
1228 eng_grps->is_grps_created = true;
1229
1230 cpt_ucode_release_fw(&fw_info);
1231
1232 if (is_dev_otx2(pdev))
1233 goto unlock;
1234
1235 /*
1236 * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
1237 * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
1238 */
1239 rnm_to_cpt_errata_fixup(&pdev->dev);
1240
1241 otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, ®_val,
1242 BLKADDR_CPT0);
1243 /*
1244 * Configure engine group mask to allow context prefetching
1245 * for the groups and enable random number request, to enable
1246 * CPT to request random numbers from RNM.
1247 */
1248 reg_val |= OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16);
1249 otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
1250 reg_val, BLKADDR_CPT0);
1251 /*
1252 * Set interval to periodically flush dirty data for the next
1253 * CTX cache entry. Set the interval count to maximum supported
1254 * value.
1255 */
1256 otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
1257 CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
1258
1259 /*
1260 * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
1261 * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
1262 * encounters a fault/poison, a rare case may result in
1263 * unpredictable data being delivered to a CPT engine.
1264 */
1265 if (cpt_is_errata_38550_exists(pdev)) {
1266 otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1267 ®_val, BLKADDR_CPT0);
1268 otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1269 reg_val | BIT_ULL(24), BLKADDR_CPT0);
1270 }
1271
1272 mutex_unlock(&eng_grps->lock);
1273 return 0;
1274
1275 delete_eng_grp:
1276 delete_engine_grps(pdev, eng_grps);
1277 release_fw:
1278 cpt_ucode_release_fw(&fw_info);
1279 unlock:
1280 mutex_unlock(&eng_grps->lock);
1281 return ret;
1282 }
1283
cptx_disable_all_cores(struct otx2_cptpf_dev * cptpf,int total_cores,int blkaddr)1284 static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
1285 int blkaddr)
1286 {
1287 int timeout = 10, ret;
1288 int i, busy;
1289 u64 reg;
1290
1291 /* Disengage the cores from groups */
1292 for (i = 0; i < total_cores; i++) {
1293 ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1294 CPT_AF_EXEX_CTL2(i), 0x0,
1295 blkaddr);
1296 if (ret)
1297 return ret;
1298
1299 cptpf->eng_grps.eng_ref_cnt[i] = 0;
1300 }
1301 ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1302 if (ret)
1303 return ret;
1304
1305 /* Wait for cores to become idle */
1306 do {
1307 busy = 0;
1308 usleep_range(10000, 20000);
1309 if (timeout-- < 0)
1310 return -EBUSY;
1311
1312 for (i = 0; i < total_cores; i++) {
1313 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
1314 cptpf->pdev,
1315 CPT_AF_EXEX_STS(i), ®,
1316 blkaddr);
1317 if (ret)
1318 return ret;
1319
1320 if (reg & 0x1) {
1321 busy = 1;
1322 break;
1323 }
1324 }
1325 } while (busy);
1326
1327 /* Disable the cores */
1328 for (i = 0; i < total_cores; i++) {
1329 ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1330 CPT_AF_EXEX_CTL(i), 0x0,
1331 blkaddr);
1332 if (ret)
1333 return ret;
1334 }
1335 return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1336 }
1337
otx2_cpt_disable_all_cores(struct otx2_cptpf_dev * cptpf)1338 int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
1339 {
1340 int total_cores, ret;
1341
1342 total_cores = cptpf->eng_grps.avail.max_se_cnt +
1343 cptpf->eng_grps.avail.max_ie_cnt +
1344 cptpf->eng_grps.avail.max_ae_cnt;
1345
1346 if (cptpf->has_cpt1) {
1347 ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
1348 if (ret)
1349 return ret;
1350 }
1351 return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
1352 }
1353
otx2_cpt_cleanup_eng_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1354 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1355 struct otx2_cpt_eng_grps *eng_grps)
1356 {
1357 struct otx2_cpt_eng_grp_info *grp;
1358 int i, j;
1359
1360 mutex_lock(&eng_grps->lock);
1361 delete_engine_grps(pdev, eng_grps);
1362 /* Release memory */
1363 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1364 grp = &eng_grps->grp[i];
1365 for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1366 kfree(grp->engs[j].bmap);
1367 grp->engs[j].bmap = NULL;
1368 }
1369 }
1370 mutex_unlock(&eng_grps->lock);
1371 }
1372
otx2_cpt_init_eng_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1373 int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
1374 struct otx2_cpt_eng_grps *eng_grps)
1375 {
1376 struct otx2_cpt_eng_grp_info *grp;
1377 int i, j, ret;
1378
1379 mutex_init(&eng_grps->lock);
1380 eng_grps->obj = pci_get_drvdata(pdev);
1381 eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1382 eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
1383 eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1384
1385 eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1386 eng_grps->avail.max_ie_cnt +
1387 eng_grps->avail.max_ae_cnt;
1388 if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
1389 dev_err(&pdev->dev,
1390 "Number of engines %d > than max supported %d\n",
1391 eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
1392 ret = -EINVAL;
1393 goto cleanup_eng_grps;
1394 }
1395
1396 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1397 grp = &eng_grps->grp[i];
1398 grp->g = eng_grps;
1399 grp->idx = i;
1400
1401 for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1402 grp->engs[j].bmap =
1403 kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1404 sizeof(long), GFP_KERNEL);
1405 if (!grp->engs[j].bmap) {
1406 ret = -ENOMEM;
1407 goto cleanup_eng_grps;
1408 }
1409 }
1410 }
1411 return 0;
1412
1413 cleanup_eng_grps:
1414 otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
1415 return ret;
1416 }
1417
create_eng_caps_discovery_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1418 static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
1419 struct otx2_cpt_eng_grps *eng_grps)
1420 {
1421 struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
1422 struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1423 struct fw_info_t fw_info;
1424 int ret;
1425
1426 mutex_lock(&eng_grps->lock);
1427 ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
1428 if (ret) {
1429 mutex_unlock(&eng_grps->lock);
1430 return ret;
1431 }
1432
1433 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1434 if (uc_info[0] == NULL) {
1435 dev_err(&pdev->dev, "Unable to find firmware for AE\n");
1436 ret = -EINVAL;
1437 goto release_fw;
1438 }
1439 engs[0].type = OTX2_CPT_AE_TYPES;
1440 engs[0].count = 2;
1441
1442 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1443 (void **) uc_info, 0);
1444 if (ret)
1445 goto release_fw;
1446
1447 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1448 if (uc_info[0] == NULL) {
1449 dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1450 ret = -EINVAL;
1451 goto delete_eng_grp;
1452 }
1453 engs[0].type = OTX2_CPT_SE_TYPES;
1454 engs[0].count = 2;
1455
1456 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1457 (void **) uc_info, 0);
1458 if (ret)
1459 goto delete_eng_grp;
1460
1461 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1462 if (uc_info[0] == NULL) {
1463 dev_err(&pdev->dev, "Unable to find firmware for IE\n");
1464 ret = -EINVAL;
1465 goto delete_eng_grp;
1466 }
1467 engs[0].type = OTX2_CPT_IE_TYPES;
1468 engs[0].count = 2;
1469
1470 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1471 (void **) uc_info, 0);
1472 if (ret)
1473 goto delete_eng_grp;
1474
1475 cpt_ucode_release_fw(&fw_info);
1476 mutex_unlock(&eng_grps->lock);
1477 return 0;
1478
1479 delete_eng_grp:
1480 delete_engine_grps(pdev, eng_grps);
1481 release_fw:
1482 cpt_ucode_release_fw(&fw_info);
1483 mutex_unlock(&eng_grps->lock);
1484 return ret;
1485 }
1486
1487 /*
1488 * Get CPT HW capabilities using LOAD_FVC operation.
1489 */
otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev * cptpf)1490 int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
1491 {
1492 struct otx2_cptlfs_info *lfs = &cptpf->lfs;
1493 struct otx2_cpt_iq_command iq_cmd;
1494 union otx2_cpt_opcode opcode;
1495 union otx2_cpt_res_s *result;
1496 union otx2_cpt_inst_s inst;
1497 dma_addr_t result_baddr;
1498 dma_addr_t rptr_baddr;
1499 struct pci_dev *pdev;
1500 int timeout = 10000;
1501 void *base, *rptr;
1502 int ret, etype;
1503 u32 len;
1504
1505 /*
1506 * We don't get capabilities if it was already done
1507 * (when user enabled VFs for the first time)
1508 */
1509 if (cptpf->is_eng_caps_discovered)
1510 return 0;
1511
1512 pdev = cptpf->pdev;
1513 /*
1514 * Create engine groups for each type to submit LOAD_FVC op and
1515 * get engine's capabilities.
1516 */
1517 ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
1518 if (ret)
1519 goto delete_grps;
1520
1521 ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
1522 OTX2_CPT_QUEUE_HI_PRIO, 1);
1523 if (ret)
1524 goto delete_grps;
1525
1526 /* Allocate extra memory for "rptr" and "result" pointer alignment */
1527 len = LOADFVC_RLEN + ARCH_DMA_MINALIGN +
1528 sizeof(union otx2_cpt_res_s) + OTX2_CPT_RES_ADDR_ALIGN;
1529
1530 base = kzalloc(len, GFP_KERNEL);
1531 if (!base) {
1532 ret = -ENOMEM;
1533 goto lf_cleanup;
1534 }
1535
1536 rptr = PTR_ALIGN(base, ARCH_DMA_MINALIGN);
1537 rptr_baddr = dma_map_single(&pdev->dev, rptr, len, DMA_BIDIRECTIONAL);
1538 if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
1539 dev_err(&pdev->dev, "DMA mapping failed\n");
1540 ret = -EFAULT;
1541 goto free_rptr;
1542 }
1543
1544 result = (union otx2_cpt_res_s *)PTR_ALIGN(rptr + LOADFVC_RLEN,
1545 OTX2_CPT_RES_ADDR_ALIGN);
1546 result_baddr = ALIGN(rptr_baddr + LOADFVC_RLEN,
1547 OTX2_CPT_RES_ADDR_ALIGN);
1548
1549 /* Fill in the command */
1550 opcode.s.major = LOADFVC_MAJOR_OP;
1551 opcode.s.minor = LOADFVC_MINOR_OP;
1552
1553 iq_cmd.cmd.u = 0;
1554 iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
1555
1556 /* 64-bit swap for microcode data reads, not needed for addresses */
1557 cpu_to_be64s(&iq_cmd.cmd.u);
1558 iq_cmd.dptr = 0;
1559 iq_cmd.rptr = rptr_baddr;
1560 iq_cmd.cptr.u = 0;
1561
1562 for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
1563 result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
1564 iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
1565 etype);
1566 otx2_cpt_fill_inst(&inst, &iq_cmd, result_baddr);
1567 lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
1568 timeout = 10000;
1569
1570 while (lfs->ops->cpt_get_compcode(result) ==
1571 OTX2_CPT_COMPLETION_CODE_INIT) {
1572 cpu_relax();
1573 udelay(1);
1574 timeout--;
1575 if (!timeout) {
1576 ret = -ENODEV;
1577 cptpf->is_eng_caps_discovered = false;
1578 dev_warn(&pdev->dev, "Timeout on CPT load_fvc completion poll\n");
1579 goto error_no_response;
1580 }
1581 }
1582
1583 cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
1584 }
1585 cptpf->is_eng_caps_discovered = true;
1586
1587 error_no_response:
1588 dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
1589 free_rptr:
1590 kfree(base);
1591 lf_cleanup:
1592 otx2_cptlf_shutdown(lfs);
1593 delete_grps:
1594 delete_engine_grps(pdev, &cptpf->eng_grps);
1595
1596 return ret;
1597 }
1598
otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev * cptpf,struct devlink_param_gset_ctx * ctx)1599 int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
1600 struct devlink_param_gset_ctx *ctx)
1601 {
1602 struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };
1603 struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};
1604 struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1605 char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
1606 char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
1607 struct device *dev = &cptpf->pdev->dev;
1608 char *start, *val, *err_msg, *tmp;
1609 int grp_idx = 0, ret = -EINVAL;
1610 bool has_se, has_ie, has_ae;
1611 struct fw_info_t fw_info;
1612 int ucode_idx = 0;
1613
1614 if (!eng_grps->is_grps_created) {
1615 dev_err(dev, "Not allowed before creating the default groups\n");
1616 return -EINVAL;
1617 }
1618 err_msg = "Invalid engine group format";
1619 strscpy(tmp_buf, ctx->val.vstr);
1620 start = tmp_buf;
1621
1622 has_se = has_ie = has_ae = false;
1623
1624 for (;;) {
1625 val = strsep(&start, ";");
1626 if (!val)
1627 break;
1628 val = strim(val);
1629 if (!*val)
1630 continue;
1631
1632 if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1633 if (has_se || ucode_idx)
1634 goto err_print;
1635 tmp = strsep(&val, ":");
1636 if (!tmp)
1637 goto err_print;
1638 tmp = strim(tmp);
1639 if (!val)
1640 goto err_print;
1641 if (strlen(tmp) != 2)
1642 goto err_print;
1643 if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1644 goto err_print;
1645 engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
1646 has_se = true;
1647 } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1648 if (has_ae || ucode_idx)
1649 goto err_print;
1650 tmp = strsep(&val, ":");
1651 if (!tmp)
1652 goto err_print;
1653 tmp = strim(tmp);
1654 if (!val)
1655 goto err_print;
1656 if (strlen(tmp) != 2)
1657 goto err_print;
1658 if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1659 goto err_print;
1660 engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
1661 has_ae = true;
1662 } else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
1663 if (has_ie || ucode_idx)
1664 goto err_print;
1665 tmp = strsep(&val, ":");
1666 if (!tmp)
1667 goto err_print;
1668 tmp = strim(tmp);
1669 if (!val)
1670 goto err_print;
1671 if (strlen(tmp) != 2)
1672 goto err_print;
1673 if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1674 goto err_print;
1675 engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
1676 has_ie = true;
1677 } else {
1678 if (ucode_idx > 1)
1679 goto err_print;
1680 if (!strlen(val))
1681 goto err_print;
1682 if (strnstr(val, " ", strlen(val)))
1683 goto err_print;
1684 ucode_filename[ucode_idx++] = val;
1685 }
1686 }
1687
1688 /* Validate input parameters */
1689 if (!(grp_idx && ucode_idx))
1690 goto err_print;
1691
1692 if (ucode_idx > 1 && grp_idx < 2)
1693 goto err_print;
1694
1695 if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
1696 err_msg = "Error max 2 engine types can be attached";
1697 goto err_print;
1698 }
1699
1700 if (grp_idx > 1) {
1701 if ((engs[0].type + engs[1].type) !=
1702 (OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
1703 err_msg = "Only combination of SE+IE engines is allowed";
1704 goto err_print;
1705 }
1706 /* Keep SE engines at zero index */
1707 if (engs[1].type == OTX2_CPT_SE_TYPES)
1708 swap(engs[0], engs[1]);
1709 }
1710 mutex_lock(&eng_grps->lock);
1711
1712 if (cptpf->enabled_vfs) {
1713 dev_err(dev, "Disable VFs before modifying engine groups\n");
1714 ret = -EACCES;
1715 goto err_unlock;
1716 }
1717 INIT_LIST_HEAD(&fw_info.ucodes);
1718
1719 ret = load_fw(dev, &fw_info, ucode_filename[0], eng_grps->rid);
1720 if (ret) {
1721 dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
1722 goto err_unlock;
1723 }
1724 if (ucode_idx > 1) {
1725 ret = load_fw(dev, &fw_info, ucode_filename[1], eng_grps->rid);
1726 if (ret) {
1727 dev_err(dev, "Unable to load firmware %s\n",
1728 ucode_filename[1]);
1729 goto release_fw;
1730 }
1731 }
1732 uc_info[0] = get_ucode(&fw_info, engs[0].type);
1733 if (uc_info[0] == NULL) {
1734 dev_err(dev, "Unable to find firmware for %s\n",
1735 get_eng_type_str(engs[0].type));
1736 ret = -EINVAL;
1737 goto release_fw;
1738 }
1739 if (ucode_idx > 1) {
1740 uc_info[1] = get_ucode(&fw_info, engs[1].type);
1741 if (uc_info[1] == NULL) {
1742 dev_err(dev, "Unable to find firmware for %s\n",
1743 get_eng_type_str(engs[1].type));
1744 ret = -EINVAL;
1745 goto release_fw;
1746 }
1747 }
1748 ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1749 (void **)uc_info, 1);
1750
1751 release_fw:
1752 cpt_ucode_release_fw(&fw_info);
1753 err_unlock:
1754 mutex_unlock(&eng_grps->lock);
1755 return ret;
1756 err_print:
1757 dev_err(dev, "%s\n", err_msg);
1758 return ret;
1759 }
1760
otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev * cptpf,struct devlink_param_gset_ctx * ctx)1761 int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
1762 struct devlink_param_gset_ctx *ctx)
1763 {
1764 struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1765 struct device *dev = &cptpf->pdev->dev;
1766 char *tmp, *err_msg;
1767 int egrp;
1768 int ret;
1769
1770 err_msg = "Invalid input string format(ex: egrp:0)";
1771 if (strncasecmp(ctx->val.vstr, "egrp", 4))
1772 goto err_print;
1773 tmp = ctx->val.vstr;
1774 strsep(&tmp, ":");
1775 if (!tmp)
1776 goto err_print;
1777 if (kstrtoint(tmp, 10, &egrp))
1778 goto err_print;
1779
1780 if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
1781 dev_err(dev, "Invalid engine group %d", egrp);
1782 return -EINVAL;
1783 }
1784 if (!eng_grps->grp[egrp].is_enabled) {
1785 dev_err(dev, "Error engine_group%d is not configured", egrp);
1786 return -EINVAL;
1787 }
1788 mutex_lock(&eng_grps->lock);
1789 ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
1790 mutex_unlock(&eng_grps->lock);
1791
1792 return ret;
1793
1794 err_print:
1795 dev_err(dev, "%s\n", err_msg);
1796 return -EINVAL;
1797 }
1798