xref: /linux/drivers/crypto/cavium/nitrox/nitrox_hal.c (revision 0b8061c340b643e01da431dd60c75a41bb1d31ec)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/delay.h>
3 
4 #include "nitrox_dev.h"
5 #include "nitrox_csr.h"
6 #include "nitrox_hal.h"
7 
8 #define PLL_REF_CLK 50
9 #define MAX_CSR_RETRIES 10
10 
11 /**
12  * emu_enable_cores - Enable EMU cluster cores.
13  * @ndev: NITROX device
14  */
15 static void emu_enable_cores(struct nitrox_device *ndev)
16 {
17 	union emu_se_enable emu_se;
18 	union emu_ae_enable emu_ae;
19 	int i;
20 
21 	/* AE cores 20 per cluster */
22 	emu_ae.value = 0;
23 	emu_ae.s.enable = 0xfffff;
24 
25 	/* SE cores 16 per cluster */
26 	emu_se.value = 0;
27 	emu_se.s.enable = 0xffff;
28 
29 	/* enable per cluster cores */
30 	for (i = 0; i < NR_CLUSTERS; i++) {
31 		nitrox_write_csr(ndev, EMU_AE_ENABLEX(i), emu_ae.value);
32 		nitrox_write_csr(ndev, EMU_SE_ENABLEX(i), emu_se.value);
33 	}
34 }
35 
36 /**
37  * nitrox_config_emu_unit - configure EMU unit.
38  * @ndev: NITROX device
39  */
40 void nitrox_config_emu_unit(struct nitrox_device *ndev)
41 {
42 	union emu_wd_int_ena_w1s emu_wd_int;
43 	union emu_ge_int_ena_w1s emu_ge_int;
44 	u64 offset;
45 	int i;
46 
47 	/* enable cores */
48 	emu_enable_cores(ndev);
49 
50 	/* enable general error and watch dog interrupts */
51 	emu_ge_int.value = 0;
52 	emu_ge_int.s.se_ge = 0xffff;
53 	emu_ge_int.s.ae_ge = 0xfffff;
54 	emu_wd_int.value = 0;
55 	emu_wd_int.s.se_wd = 1;
56 
57 	for (i = 0; i < NR_CLUSTERS; i++) {
58 		offset = EMU_WD_INT_ENA_W1SX(i);
59 		nitrox_write_csr(ndev, offset, emu_wd_int.value);
60 		offset = EMU_GE_INT_ENA_W1SX(i);
61 		nitrox_write_csr(ndev, offset, emu_ge_int.value);
62 	}
63 }
64 
65 static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
66 {
67 	union nps_pkt_in_instr_ctl pkt_in_ctl;
68 	union nps_pkt_in_done_cnts pkt_in_cnts;
69 	int max_retries = MAX_CSR_RETRIES;
70 	u64 offset;
71 
72 	/* step 1: disable the ring, clear enable bit */
73 	offset = NPS_PKT_IN_INSTR_CTLX(ring);
74 	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
75 	pkt_in_ctl.s.enb = 0;
76 	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
77 
78 	/* step 2: wait to clear [ENB] */
79 	usleep_range(100, 150);
80 	do {
81 		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
82 		if (!pkt_in_ctl.s.enb)
83 			break;
84 		udelay(50);
85 	} while (max_retries--);
86 
87 	/* step 3: clear done counts */
88 	offset = NPS_PKT_IN_DONE_CNTSX(ring);
89 	pkt_in_cnts.value = nitrox_read_csr(ndev, offset);
90 	nitrox_write_csr(ndev, offset, pkt_in_cnts.value);
91 	usleep_range(50, 100);
92 }
93 
94 void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
95 {
96 	union nps_pkt_in_instr_ctl pkt_in_ctl;
97 	int max_retries = MAX_CSR_RETRIES;
98 	u64 offset;
99 
100 	/* 64-byte instruction size */
101 	offset = NPS_PKT_IN_INSTR_CTLX(ring);
102 	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
103 	pkt_in_ctl.s.is64b = 1;
104 	pkt_in_ctl.s.enb = 1;
105 	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
106 
107 	/* wait for set [ENB] */
108 	do {
109 		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
110 		if (pkt_in_ctl.s.enb)
111 			break;
112 		udelay(50);
113 	} while (max_retries--);
114 }
115 
116 /**
117  * nitrox_config_pkt_input_rings - configure Packet Input Rings
118  * @ndev: NITROX device
119  */
120 void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
121 {
122 	int i;
123 
124 	for (i = 0; i < ndev->nr_queues; i++) {
125 		struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
126 		union nps_pkt_in_instr_rsize pkt_in_rsize;
127 		union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
128 		u64 offset;
129 
130 		reset_pkt_input_ring(ndev, i);
131 
132 		/**
133 		 * step 4:
134 		 * configure ring base address 16-byte aligned,
135 		 * size and interrupt threshold.
136 		 */
137 		offset = NPS_PKT_IN_INSTR_BADDRX(i);
138 		nitrox_write_csr(ndev, offset, cmdq->dma);
139 
140 		/* configure ring size */
141 		offset = NPS_PKT_IN_INSTR_RSIZEX(i);
142 		pkt_in_rsize.value = 0;
143 		pkt_in_rsize.s.rsize = ndev->qlen;
144 		nitrox_write_csr(ndev, offset, pkt_in_rsize.value);
145 
146 		/* set high threshold for pkt input ring interrupts */
147 		offset = NPS_PKT_IN_INT_LEVELSX(i);
148 		nitrox_write_csr(ndev, offset, 0xffffffff);
149 
150 		/* step 5: clear off door bell counts */
151 		offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
152 		pkt_in_dbell.value = 0;
153 		pkt_in_dbell.s.dbell = 0xffffffff;
154 		nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
155 
156 		/* enable the ring */
157 		enable_pkt_input_ring(ndev, i);
158 	}
159 }
160 
161 static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
162 {
163 	union nps_pkt_slc_ctl pkt_slc_ctl;
164 	union nps_pkt_slc_cnts pkt_slc_cnts;
165 	int max_retries = MAX_CSR_RETRIES;
166 	u64 offset;
167 
168 	/* step 1: disable slc port */
169 	offset = NPS_PKT_SLC_CTLX(port);
170 	pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
171 	pkt_slc_ctl.s.enb = 0;
172 	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
173 
174 	/* step 2 */
175 	usleep_range(100, 150);
176 	/* wait to clear [ENB] */
177 	do {
178 		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
179 		if (!pkt_slc_ctl.s.enb)
180 			break;
181 		udelay(50);
182 	} while (max_retries--);
183 
184 	/* step 3: clear slc counters */
185 	offset = NPS_PKT_SLC_CNTSX(port);
186 	pkt_slc_cnts.value = nitrox_read_csr(ndev, offset);
187 	nitrox_write_csr(ndev, offset, pkt_slc_cnts.value);
188 	usleep_range(50, 100);
189 }
190 
191 void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
192 {
193 	union nps_pkt_slc_ctl pkt_slc_ctl;
194 	int max_retries = MAX_CSR_RETRIES;
195 	u64 offset;
196 
197 	offset = NPS_PKT_SLC_CTLX(port);
198 	pkt_slc_ctl.value = 0;
199 	pkt_slc_ctl.s.enb = 1;
200 	/*
201 	 * 8 trailing 0x00 bytes will be added
202 	 * to the end of the outgoing packet.
203 	 */
204 	pkt_slc_ctl.s.z = 1;
205 	/* enable response header */
206 	pkt_slc_ctl.s.rh = 1;
207 	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
208 
209 	/* wait to set [ENB] */
210 	do {
211 		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
212 		if (pkt_slc_ctl.s.enb)
213 			break;
214 		udelay(50);
215 	} while (max_retries--);
216 }
217 
218 static void config_pkt_solicit_port(struct nitrox_device *ndev, int port)
219 {
220 	union nps_pkt_slc_int_levels pkt_slc_int;
221 	u64 offset;
222 
223 	reset_pkt_solicit_port(ndev, port);
224 
225 	/* step 4: configure interrupt levels */
226 	offset = NPS_PKT_SLC_INT_LEVELSX(port);
227 	pkt_slc_int.value = 0;
228 	/* time interrupt threshold */
229 	pkt_slc_int.s.timet = 0x3fffff;
230 	nitrox_write_csr(ndev, offset, pkt_slc_int.value);
231 
232 	/* enable the solicit port */
233 	enable_pkt_solicit_port(ndev, port);
234 }
235 
236 void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
237 {
238 	int i;
239 
240 	for (i = 0; i < ndev->nr_queues; i++)
241 		config_pkt_solicit_port(ndev, i);
242 }
243 
244 /**
245  * enable_nps_core_interrupts - enable NPS core interrutps
246  * @ndev: NITROX device.
247  *
248  * This includes NPS core interrupts.
249  */
250 static void enable_nps_core_interrupts(struct nitrox_device *ndev)
251 {
252 	union nps_core_int_ena_w1s core_int;
253 
254 	/* NPS core interrutps */
255 	core_int.value = 0;
256 	core_int.s.host_wr_err = 1;
257 	core_int.s.host_wr_timeout = 1;
258 	core_int.s.exec_wr_timeout = 1;
259 	core_int.s.npco_dma_malform = 1;
260 	core_int.s.host_nps_wr_err = 1;
261 	nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value);
262 }
263 
264 void nitrox_config_nps_core_unit(struct nitrox_device *ndev)
265 {
266 	union nps_core_gbl_vfcfg core_gbl_vfcfg;
267 
268 	/* endian control information */
269 	nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL);
270 
271 	/* disable ILK interface */
272 	core_gbl_vfcfg.value = 0;
273 	core_gbl_vfcfg.s.ilk_disable = 1;
274 	core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
275 	nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
276 
277 	/* enable nps core interrupts */
278 	enable_nps_core_interrupts(ndev);
279 }
280 
281 /**
282  * enable_nps_pkt_interrupts - enable NPS packet interrutps
283  * @ndev: NITROX device.
284  *
285  * This includes NPS packet in and slc interrupts.
286  */
287 static void enable_nps_pkt_interrupts(struct nitrox_device *ndev)
288 {
289 	/* NPS packet in ring interrupts */
290 	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
291 	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
292 	nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
293 	/* NPS packet slc port interrupts */
294 	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
295 	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
296 	nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
297 }
298 
299 void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev)
300 {
301 	/* config input and solicit ports */
302 	nitrox_config_pkt_input_rings(ndev);
303 	nitrox_config_pkt_solicit_ports(ndev);
304 
305 	/* enable nps packet interrupts */
306 	enable_nps_pkt_interrupts(ndev);
307 }
308 
309 static void reset_aqm_ring(struct nitrox_device *ndev, int ring)
310 {
311 	union aqmq_en aqmq_en_reg;
312 	union aqmq_activity_stat activity_stat;
313 	union aqmq_cmp_cnt cmp_cnt;
314 	int max_retries = MAX_CSR_RETRIES;
315 	u64 offset;
316 
317 	/* step 1: disable the queue */
318 	offset = AQMQ_ENX(ring);
319 	aqmq_en_reg.value = 0;
320 	aqmq_en_reg.queue_enable = 0;
321 	nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
322 
323 	/* step 2: wait for AQMQ_ACTIVITY_STATX[QUEUE_ACTIVE] to clear */
324 	usleep_range(100, 150);
325 	offset = AQMQ_ACTIVITY_STATX(ring);
326 	do {
327 		activity_stat.value = nitrox_read_csr(ndev, offset);
328 		if (!activity_stat.queue_active)
329 			break;
330 		udelay(50);
331 	} while (max_retries--);
332 
333 	/* step 3: clear commands completed count */
334 	offset = AQMQ_CMP_CNTX(ring);
335 	cmp_cnt.value = nitrox_read_csr(ndev, offset);
336 	nitrox_write_csr(ndev, offset, cmp_cnt.value);
337 	usleep_range(50, 100);
338 }
339 
340 void enable_aqm_ring(struct nitrox_device *ndev, int ring)
341 {
342 	union aqmq_en aqmq_en_reg;
343 	u64 offset;
344 
345 	offset = AQMQ_ENX(ring);
346 	aqmq_en_reg.value = 0;
347 	aqmq_en_reg.queue_enable = 1;
348 	nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
349 	usleep_range(50, 100);
350 }
351 
352 void nitrox_config_aqm_rings(struct nitrox_device *ndev)
353 {
354 	int ring;
355 
356 	for (ring = 0; ring < ndev->nr_queues; ring++) {
357 		struct nitrox_cmdq *cmdq = ndev->aqmq[ring];
358 		union aqmq_drbl drbl;
359 		union aqmq_qsz qsize;
360 		union aqmq_cmp_thr cmp_thr;
361 		u64 offset;
362 
363 		/* steps 1 - 3 */
364 		reset_aqm_ring(ndev, ring);
365 
366 		/* step 4: clear doorbell count of ring */
367 		offset = AQMQ_DRBLX(ring);
368 		drbl.value = 0;
369 		drbl.dbell_count = 0xFFFFFFFF;
370 		nitrox_write_csr(ndev, offset, drbl.value);
371 
372 		/* step 5: configure host ring details */
373 
374 		/* set host address for next command of ring */
375 		offset = AQMQ_NXT_CMDX(ring);
376 		nitrox_write_csr(ndev, offset, 0ULL);
377 
378 		/* set host address of ring base */
379 		offset = AQMQ_BADRX(ring);
380 		nitrox_write_csr(ndev, offset, cmdq->dma);
381 
382 		/* set ring size */
383 		offset = AQMQ_QSZX(ring);
384 		qsize.value = 0;
385 		qsize.host_queue_size = ndev->qlen;
386 		nitrox_write_csr(ndev, offset, qsize.value);
387 
388 		/* set command completion threshold */
389 		offset = AQMQ_CMP_THRX(ring);
390 		cmp_thr.value = 0;
391 		cmp_thr.commands_completed_threshold = 1;
392 		nitrox_write_csr(ndev, offset, cmp_thr.value);
393 
394 		/* step 6: enable the queue */
395 		enable_aqm_ring(ndev, ring);
396 	}
397 }
398 
399 static void enable_aqm_interrupts(struct nitrox_device *ndev)
400 {
401 	/* clear interrupt enable bits */
402 	nitrox_write_csr(ndev, AQM_DBELL_OVF_LO_ENA_W1S, (~0ULL));
403 	nitrox_write_csr(ndev, AQM_DBELL_OVF_HI_ENA_W1S, (~0ULL));
404 	nitrox_write_csr(ndev, AQM_DMA_RD_ERR_LO_ENA_W1S, (~0ULL));
405 	nitrox_write_csr(ndev, AQM_DMA_RD_ERR_HI_ENA_W1S, (~0ULL));
406 	nitrox_write_csr(ndev, AQM_EXEC_NA_LO_ENA_W1S, (~0ULL));
407 	nitrox_write_csr(ndev, AQM_EXEC_NA_HI_ENA_W1S, (~0ULL));
408 	nitrox_write_csr(ndev, AQM_EXEC_ERR_LO_ENA_W1S, (~0ULL));
409 	nitrox_write_csr(ndev, AQM_EXEC_ERR_HI_ENA_W1S, (~0ULL));
410 }
411 
412 void nitrox_config_aqm_unit(struct nitrox_device *ndev)
413 {
414 	/* config aqm command queues */
415 	nitrox_config_aqm_rings(ndev);
416 
417 	/* enable aqm interrupts */
418 	enable_aqm_interrupts(ndev);
419 }
420 
421 void nitrox_config_pom_unit(struct nitrox_device *ndev)
422 {
423 	union pom_int_ena_w1s pom_int;
424 	int i;
425 
426 	/* enable pom interrupts */
427 	pom_int.value = 0;
428 	pom_int.s.illegal_dport = 1;
429 	nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value);
430 
431 	/* enable perf counters */
432 	for (i = 0; i < ndev->hw.se_cores; i++)
433 		nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i));
434 }
435 
436 /**
437  * nitrox_config_rand_unit - enable NITROX random number unit
438  * @ndev: NITROX device
439  */
440 void nitrox_config_rand_unit(struct nitrox_device *ndev)
441 {
442 	union efl_rnm_ctl_status efl_rnm_ctl;
443 	u64 offset;
444 
445 	offset = EFL_RNM_CTL_STATUS;
446 	efl_rnm_ctl.value = nitrox_read_csr(ndev, offset);
447 	efl_rnm_ctl.s.ent_en = 1;
448 	efl_rnm_ctl.s.rng_en = 1;
449 	nitrox_write_csr(ndev, offset, efl_rnm_ctl.value);
450 }
451 
452 void nitrox_config_efl_unit(struct nitrox_device *ndev)
453 {
454 	int i;
455 
456 	for (i = 0; i < NR_CLUSTERS; i++) {
457 		union efl_core_int_ena_w1s efl_core_int;
458 		u64 offset;
459 
460 		/* EFL core interrupts */
461 		offset = EFL_CORE_INT_ENA_W1SX(i);
462 		efl_core_int.value = 0;
463 		efl_core_int.s.len_ovr = 1;
464 		efl_core_int.s.d_left = 1;
465 		efl_core_int.s.epci_decode_err = 1;
466 		nitrox_write_csr(ndev, offset, efl_core_int.value);
467 
468 		offset = EFL_CORE_VF_ERR_INT0_ENA_W1SX(i);
469 		nitrox_write_csr(ndev, offset, (~0ULL));
470 		offset = EFL_CORE_VF_ERR_INT1_ENA_W1SX(i);
471 		nitrox_write_csr(ndev, offset, (~0ULL));
472 	}
473 }
474 
475 void nitrox_config_bmi_unit(struct nitrox_device *ndev)
476 {
477 	union bmi_ctl bmi_ctl;
478 	union bmi_int_ena_w1s bmi_int_ena;
479 	u64 offset;
480 
481 	/* no threshold limits for PCIe */
482 	offset = BMI_CTL;
483 	bmi_ctl.value = nitrox_read_csr(ndev, offset);
484 	bmi_ctl.s.max_pkt_len = 0xff;
485 	bmi_ctl.s.nps_free_thrsh = 0xff;
486 	bmi_ctl.s.nps_hdrq_thrsh = 0x7a;
487 	nitrox_write_csr(ndev, offset, bmi_ctl.value);
488 
489 	/* enable interrupts */
490 	offset = BMI_INT_ENA_W1S;
491 	bmi_int_ena.value = 0;
492 	bmi_int_ena.s.max_len_err_nps = 1;
493 	bmi_int_ena.s.pkt_rcv_err_nps = 1;
494 	bmi_int_ena.s.fpf_undrrn = 1;
495 	nitrox_write_csr(ndev, offset, bmi_int_ena.value);
496 }
497 
498 void nitrox_config_bmo_unit(struct nitrox_device *ndev)
499 {
500 	union bmo_ctl2 bmo_ctl2;
501 	u64 offset;
502 
503 	/* no threshold limits for PCIe */
504 	offset = BMO_CTL2;
505 	bmo_ctl2.value = nitrox_read_csr(ndev, offset);
506 	bmo_ctl2.s.nps_slc_buf_thrsh = 0xff;
507 	nitrox_write_csr(ndev, offset, bmo_ctl2.value);
508 }
509 
510 void invalidate_lbc(struct nitrox_device *ndev)
511 {
512 	union lbc_inval_ctl lbc_ctl;
513 	union lbc_inval_status lbc_stat;
514 	int max_retries = MAX_CSR_RETRIES;
515 	u64 offset;
516 
517 	/* invalidate LBC */
518 	offset = LBC_INVAL_CTL;
519 	lbc_ctl.value = nitrox_read_csr(ndev, offset);
520 	lbc_ctl.s.cam_inval_start = 1;
521 	nitrox_write_csr(ndev, offset, lbc_ctl.value);
522 
523 	offset = LBC_INVAL_STATUS;
524 	do {
525 		lbc_stat.value = nitrox_read_csr(ndev, offset);
526 		if (lbc_stat.s.done)
527 			break;
528 		udelay(50);
529 	} while (max_retries--);
530 }
531 
532 void nitrox_config_lbc_unit(struct nitrox_device *ndev)
533 {
534 	union lbc_int_ena_w1s lbc_int_ena;
535 	u64 offset;
536 
537 	invalidate_lbc(ndev);
538 
539 	/* enable interrupts */
540 	offset = LBC_INT_ENA_W1S;
541 	lbc_int_ena.value = 0;
542 	lbc_int_ena.s.dma_rd_err = 1;
543 	lbc_int_ena.s.over_fetch_err = 1;
544 	lbc_int_ena.s.cam_inval_abort = 1;
545 	lbc_int_ena.s.cam_hard_err = 1;
546 	nitrox_write_csr(ndev, offset, lbc_int_ena.value);
547 
548 	offset = LBC_PLM_VF1_64_INT_ENA_W1S;
549 	nitrox_write_csr(ndev, offset, (~0ULL));
550 	offset = LBC_PLM_VF65_128_INT_ENA_W1S;
551 	nitrox_write_csr(ndev, offset, (~0ULL));
552 
553 	offset = LBC_ELM_VF1_64_INT_ENA_W1S;
554 	nitrox_write_csr(ndev, offset, (~0ULL));
555 	offset = LBC_ELM_VF65_128_INT_ENA_W1S;
556 	nitrox_write_csr(ndev, offset, (~0ULL));
557 }
558 
559 void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
560 {
561 	union nps_core_gbl_vfcfg vfcfg;
562 
563 	vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
564 	vfcfg.s.cfg = mode & 0x7;
565 
566 	nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
567 }
568 
569 static const char *get_core_option(u8 se_cores, u8 ae_cores)
570 {
571 	const char *option = "";
572 
573 	if (ae_cores == AE_MAX_CORES) {
574 		switch (se_cores) {
575 		case SE_MAX_CORES:
576 			option = "60";
577 			break;
578 		case 40:
579 			option = "60s";
580 			break;
581 		}
582 	} else if (ae_cores == (AE_MAX_CORES / 2)) {
583 		option = "30";
584 	} else {
585 		option = "60i";
586 	}
587 
588 	return option;
589 }
590 
591 static const char *get_feature_option(u8 zip_cores, int core_freq)
592 {
593 	if (zip_cores == 0)
594 		return "";
595 	else if (zip_cores < ZIP_MAX_CORES)
596 		return "-C15";
597 
598 	if (core_freq >= 850)
599 		return "-C45";
600 	else if (core_freq >= 750)
601 		return "-C35";
602 	else if (core_freq >= 550)
603 		return "-C25";
604 
605 	return "";
606 }
607 
608 void nitrox_get_hwinfo(struct nitrox_device *ndev)
609 {
610 	union emu_fuse_map emu_fuse;
611 	union rst_boot rst_boot;
612 	union fus_dat1 fus_dat1;
613 	unsigned char name[IFNAMSIZ * 2] = {};
614 	int i, dead_cores;
615 	u64 offset;
616 
617 	/* get core frequency */
618 	offset = RST_BOOT;
619 	rst_boot.value = nitrox_read_csr(ndev, offset);
620 	ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
621 
622 	for (i = 0; i < NR_CLUSTERS; i++) {
623 		offset = EMU_FUSE_MAPX(i);
624 		emu_fuse.value = nitrox_read_csr(ndev, offset);
625 		if (emu_fuse.s.valid) {
626 			dead_cores = hweight32(emu_fuse.s.ae_fuse);
627 			ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
628 			dead_cores = hweight16(emu_fuse.s.se_fuse);
629 			ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
630 		}
631 	}
632 	/* find zip hardware availability */
633 	offset = FUS_DAT1;
634 	fus_dat1.value = nitrox_read_csr(ndev, offset);
635 	if (!fus_dat1.nozip) {
636 		dead_cores = hweight8(fus_dat1.zip_info);
637 		ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
638 	}
639 
640 	/* determine the partname
641 	 * CNN55<core option>-<freq><pincount>-<feature option>-<rev>
642 	 */
643 	snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u",
644 		 get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores),
645 		 ndev->hw.freq,
646 		 get_feature_option(ndev->hw.zip_cores, ndev->hw.freq),
647 		 ndev->hw.revision_id);
648 
649 	/* copy partname */
650 	strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
651 }
652 
653 void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
654 {
655 	u64 value = ~0ULL;
656 	u64 reg_addr;
657 
658 	/* Mailbox interrupt low enable set register */
659 	reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1S;
660 	nitrox_write_csr(ndev, reg_addr, value);
661 
662 	/* Mailbox interrupt high enable set register */
663 	reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1S;
664 	nitrox_write_csr(ndev, reg_addr, value);
665 }
666 
667 void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
668 {
669 	u64 value = ~0ULL;
670 	u64 reg_addr;
671 
672 	/* Mailbox interrupt low enable clear register */
673 	reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1C;
674 	nitrox_write_csr(ndev, reg_addr, value);
675 
676 	/* Mailbox interrupt high enable clear register */
677 	reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1C;
678 	nitrox_write_csr(ndev, reg_addr, value);
679 }
680