xref: /linux/drivers/crypto/cavium/nitrox/nitrox_isr.c (revision 1a2ac6d7ecdcde74a4e16f31de64124160fc7237)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pci.h>
3 #include <linux/printk.h>
4 #include <linux/slab.h>
5 
6 #include "nitrox_dev.h"
7 #include "nitrox_csr.h"
8 #include "nitrox_common.h"
9 #include "nitrox_hal.h"
10 #include "nitrox_isr.h"
11 #include "nitrox_mbx.h"
12 
13 /*
14  * One vector for each type of ring
15  *  - NPS packet ring, AQMQ ring and ZQMQ ring
16  */
17 #define NR_RING_VECTORS 3
18 #define NR_NON_RING_VECTORS 1
19 /* base entry for packet ring/port */
20 #define PKT_RING_MSIX_BASE 0
21 #define NON_RING_MSIX_BASE 192
22 
23 /**
24  * nps_pkt_slc_isr - IRQ handler for NPS solicit port
25  * @irq: irq number
26  * @data: argument
27  */
28 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
29 {
30 	struct nitrox_q_vector *qvec = data;
31 	union nps_pkt_slc_cnts slc_cnts;
32 	struct nitrox_cmdq *cmdq = qvec->cmdq;
33 
34 	slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
35 	/* New packet on SLC output port */
36 	if (slc_cnts.s.slc_int)
37 		tasklet_hi_schedule(&qvec->resp_tasklet);
38 
39 	return IRQ_HANDLED;
40 }
41 
42 static void clear_nps_core_err_intr(struct nitrox_device *ndev)
43 {
44 	u64 value;
45 
46 	/* Write 1 to clear */
47 	value = nitrox_read_csr(ndev, NPS_CORE_INT);
48 	nitrox_write_csr(ndev, NPS_CORE_INT, value);
49 
50 	dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT  0x%016llx\n", value);
51 }
52 
53 static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
54 {
55 	union nps_pkt_int pkt_int;
56 	unsigned long value, offset;
57 	int i;
58 
59 	pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
60 	dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT  0x%016llx\n",
61 			    pkt_int.value);
62 
63 	if (pkt_int.s.slc_err) {
64 		offset = NPS_PKT_SLC_ERR_TYPE;
65 		value = nitrox_read_csr(ndev, offset);
66 		nitrox_write_csr(ndev, offset, value);
67 		dev_err_ratelimited(DEV(ndev),
68 				    "NPS_PKT_SLC_ERR_TYPE  0x%016lx\n", value);
69 
70 		offset = NPS_PKT_SLC_RERR_LO;
71 		value = nitrox_read_csr(ndev, offset);
72 		nitrox_write_csr(ndev, offset, value);
73 		/* enable the solicit ports */
74 		for_each_set_bit(i, &value, BITS_PER_LONG)
75 			enable_pkt_solicit_port(ndev, i);
76 
77 		dev_err_ratelimited(DEV(ndev),
78 				    "NPS_PKT_SLC_RERR_LO  0x%016lx\n", value);
79 
80 		offset = NPS_PKT_SLC_RERR_HI;
81 		value = nitrox_read_csr(ndev, offset);
82 		nitrox_write_csr(ndev, offset, value);
83 		dev_err_ratelimited(DEV(ndev),
84 				    "NPS_PKT_SLC_RERR_HI  0x%016lx\n", value);
85 	}
86 
87 	if (pkt_int.s.in_err) {
88 		offset = NPS_PKT_IN_ERR_TYPE;
89 		value = nitrox_read_csr(ndev, offset);
90 		nitrox_write_csr(ndev, offset, value);
91 		dev_err_ratelimited(DEV(ndev),
92 				    "NPS_PKT_IN_ERR_TYPE  0x%016lx\n", value);
93 		offset = NPS_PKT_IN_RERR_LO;
94 		value = nitrox_read_csr(ndev, offset);
95 		nitrox_write_csr(ndev, offset, value);
96 		/* enable the input ring */
97 		for_each_set_bit(i, &value, BITS_PER_LONG)
98 			enable_pkt_input_ring(ndev, i);
99 
100 		dev_err_ratelimited(DEV(ndev),
101 				    "NPS_PKT_IN_RERR_LO  0x%016lx\n", value);
102 
103 		offset = NPS_PKT_IN_RERR_HI;
104 		value = nitrox_read_csr(ndev, offset);
105 		nitrox_write_csr(ndev, offset, value);
106 		dev_err_ratelimited(DEV(ndev),
107 				    "NPS_PKT_IN_RERR_HI  0x%016lx\n", value);
108 	}
109 }
110 
111 static void clear_pom_err_intr(struct nitrox_device *ndev)
112 {
113 	u64 value;
114 
115 	value = nitrox_read_csr(ndev, POM_INT);
116 	nitrox_write_csr(ndev, POM_INT, value);
117 	dev_err_ratelimited(DEV(ndev), "POM_INT  0x%016llx\n", value);
118 }
119 
120 static void clear_pem_err_intr(struct nitrox_device *ndev)
121 {
122 	u64 value;
123 
124 	value = nitrox_read_csr(ndev, PEM0_INT);
125 	nitrox_write_csr(ndev, PEM0_INT, value);
126 	dev_err_ratelimited(DEV(ndev), "PEM(0)_INT  0x%016llx\n", value);
127 }
128 
129 static void clear_lbc_err_intr(struct nitrox_device *ndev)
130 {
131 	union lbc_int lbc_int;
132 	u64 value, offset;
133 	int i;
134 
135 	lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
136 	dev_err_ratelimited(DEV(ndev), "LBC_INT  0x%016llx\n", lbc_int.value);
137 
138 	if (lbc_int.s.dma_rd_err) {
139 		for (i = 0; i < NR_CLUSTERS; i++) {
140 			offset = EFL_CORE_VF_ERR_INT0X(i);
141 			value = nitrox_read_csr(ndev, offset);
142 			nitrox_write_csr(ndev, offset, value);
143 			offset = EFL_CORE_VF_ERR_INT1X(i);
144 			value = nitrox_read_csr(ndev, offset);
145 			nitrox_write_csr(ndev, offset, value);
146 		}
147 	}
148 
149 	if (lbc_int.s.cam_soft_err) {
150 		dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
151 		invalidate_lbc(ndev);
152 	}
153 
154 	if (lbc_int.s.pref_dat_len_mismatch_err) {
155 		offset = LBC_PLM_VF1_64_INT;
156 		value = nitrox_read_csr(ndev, offset);
157 		nitrox_write_csr(ndev, offset, value);
158 		offset = LBC_PLM_VF65_128_INT;
159 		value = nitrox_read_csr(ndev, offset);
160 		nitrox_write_csr(ndev, offset, value);
161 	}
162 
163 	if (lbc_int.s.rd_dat_len_mismatch_err) {
164 		offset = LBC_ELM_VF1_64_INT;
165 		value = nitrox_read_csr(ndev, offset);
166 		nitrox_write_csr(ndev, offset, value);
167 		offset = LBC_ELM_VF65_128_INT;
168 		value = nitrox_read_csr(ndev, offset);
169 		nitrox_write_csr(ndev, offset, value);
170 	}
171 	nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
172 }
173 
174 static void clear_efl_err_intr(struct nitrox_device *ndev)
175 {
176 	int i;
177 
178 	for (i = 0; i < NR_CLUSTERS; i++) {
179 		union efl_core_int core_int;
180 		u64 value, offset;
181 
182 		offset = EFL_CORE_INTX(i);
183 		core_int.value = nitrox_read_csr(ndev, offset);
184 		nitrox_write_csr(ndev, offset, core_int.value);
185 		dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT  0x%016llx\n",
186 				    i, core_int.value);
187 		if (core_int.s.se_err) {
188 			offset = EFL_CORE_SE_ERR_INTX(i);
189 			value = nitrox_read_csr(ndev, offset);
190 			nitrox_write_csr(ndev, offset, value);
191 		}
192 	}
193 }
194 
195 static void clear_bmi_err_intr(struct nitrox_device *ndev)
196 {
197 	u64 value;
198 
199 	value = nitrox_read_csr(ndev, BMI_INT);
200 	nitrox_write_csr(ndev, BMI_INT, value);
201 	dev_err_ratelimited(DEV(ndev), "BMI_INT  0x%016llx\n", value);
202 }
203 
204 static void nps_core_int_tasklet(unsigned long data)
205 {
206 	struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
207 	struct nitrox_device *ndev = qvec->ndev;
208 
209 	/* if pf mode do queue recovery */
210 	if (ndev->mode == __NDEV_MODE_PF) {
211 	} else {
212 		/**
213 		 * if VF(s) enabled communicate the error information
214 		 * to VF(s)
215 		 */
216 	}
217 }
218 
219 /*
220  * nps_core_int_isr - interrupt handler for NITROX errors and
221  *   mailbox communication
222  */
223 static irqreturn_t nps_core_int_isr(int irq, void *data)
224 {
225 	struct nitrox_q_vector *qvec = data;
226 	struct nitrox_device *ndev = qvec->ndev;
227 	union nps_core_int_active core_int;
228 
229 	core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
230 
231 	if (core_int.s.nps_core)
232 		clear_nps_core_err_intr(ndev);
233 
234 	if (core_int.s.nps_pkt)
235 		clear_nps_pkt_err_intr(ndev);
236 
237 	if (core_int.s.pom)
238 		clear_pom_err_intr(ndev);
239 
240 	if (core_int.s.pem)
241 		clear_pem_err_intr(ndev);
242 
243 	if (core_int.s.lbc)
244 		clear_lbc_err_intr(ndev);
245 
246 	if (core_int.s.efl)
247 		clear_efl_err_intr(ndev);
248 
249 	if (core_int.s.bmi)
250 		clear_bmi_err_intr(ndev);
251 
252 	/* Mailbox interrupt */
253 	if (core_int.s.mbox)
254 		nitrox_pf2vf_mbox_handler(ndev);
255 
256 	/* If more work callback the ISR, set resend */
257 	core_int.s.resend = 1;
258 	nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
259 
260 	return IRQ_HANDLED;
261 }
262 
263 void nitrox_unregister_interrupts(struct nitrox_device *ndev)
264 {
265 	struct pci_dev *pdev = ndev->pdev;
266 	int i;
267 
268 	for (i = 0; i < ndev->num_vecs; i++) {
269 		struct nitrox_q_vector *qvec;
270 		int vec;
271 
272 		qvec = ndev->qvec + i;
273 		if (!qvec->valid)
274 			continue;
275 
276 		/* get the vector number */
277 		vec = pci_irq_vector(pdev, i);
278 		irq_set_affinity_hint(vec, NULL);
279 		free_irq(vec, qvec);
280 
281 		tasklet_disable(&qvec->resp_tasklet);
282 		tasklet_kill(&qvec->resp_tasklet);
283 		qvec->valid = false;
284 	}
285 	kfree(ndev->qvec);
286 	ndev->qvec = NULL;
287 	pci_free_irq_vectors(pdev);
288 }
289 
290 int nitrox_register_interrupts(struct nitrox_device *ndev)
291 {
292 	struct pci_dev *pdev = ndev->pdev;
293 	struct nitrox_q_vector *qvec;
294 	int nr_vecs, vec, cpu;
295 	int ret, i;
296 
297 	/*
298 	 * PF MSI-X vectors
299 	 *
300 	 * Entry 0: NPS PKT ring 0
301 	 * Entry 1: AQMQ ring 0
302 	 * Entry 2: ZQM ring 0
303 	 * Entry 3: NPS PKT ring 1
304 	 * Entry 4: AQMQ ring 1
305 	 * Entry 5: ZQM ring 1
306 	 * ....
307 	 * Entry 192: NPS_CORE_INT_ACTIVE
308 	 */
309 	nr_vecs = pci_msix_vec_count(pdev);
310 	if (nr_vecs < 0) {
311 		dev_err(DEV(ndev), "Error in getting vec count %d\n", nr_vecs);
312 		return nr_vecs;
313 	}
314 
315 	/* Enable MSI-X */
316 	ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
317 	if (ret < 0) {
318 		dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
319 		return ret;
320 	}
321 	ndev->num_vecs = nr_vecs;
322 
323 	ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
324 	if (!ndev->qvec) {
325 		pci_free_irq_vectors(pdev);
326 		return -ENOMEM;
327 	}
328 
329 	/* request irqs for packet rings/ports */
330 	for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
331 		qvec = &ndev->qvec[i];
332 
333 		qvec->ring = i / NR_RING_VECTORS;
334 		if (qvec->ring >= ndev->nr_queues)
335 			break;
336 
337 		qvec->cmdq = &ndev->pkt_inq[qvec->ring];
338 		snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
339 		/* get the vector number */
340 		vec = pci_irq_vector(pdev, i);
341 		ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
342 		if (ret) {
343 			dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
344 				qvec->ring);
345 			goto irq_fail;
346 		}
347 		cpu = qvec->ring % num_online_cpus();
348 		irq_set_affinity_hint(vec, get_cpu_mask(cpu));
349 
350 		tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
351 			     (unsigned long)qvec);
352 		qvec->valid = true;
353 	}
354 
355 	/* request irqs for non ring vectors */
356 	i = NON_RING_MSIX_BASE;
357 	qvec = &ndev->qvec[i];
358 	qvec->ndev = ndev;
359 
360 	snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
361 	/* get the vector number */
362 	vec = pci_irq_vector(pdev, i);
363 	ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
364 	if (ret) {
365 		dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
366 		goto irq_fail;
367 	}
368 	cpu = num_online_cpus();
369 	irq_set_affinity_hint(vec, get_cpu_mask(cpu));
370 
371 	tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
372 		     (unsigned long)qvec);
373 	qvec->valid = true;
374 
375 	return 0;
376 
377 irq_fail:
378 	nitrox_unregister_interrupts(ndev);
379 	return ret;
380 }
381 
382 void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev)
383 {
384 	struct pci_dev *pdev = ndev->pdev;
385 	int i;
386 
387 	for (i = 0; i < ndev->num_vecs; i++) {
388 		struct nitrox_q_vector *qvec;
389 		int vec;
390 
391 		qvec = ndev->qvec + i;
392 		if (!qvec->valid)
393 			continue;
394 
395 		vec = ndev->iov.msix.vector;
396 		irq_set_affinity_hint(vec, NULL);
397 		free_irq(vec, qvec);
398 
399 		tasklet_disable(&qvec->resp_tasklet);
400 		tasklet_kill(&qvec->resp_tasklet);
401 		qvec->valid = false;
402 	}
403 	kfree(ndev->qvec);
404 	ndev->qvec = NULL;
405 	pci_disable_msix(pdev);
406 }
407 
408 int nitrox_sriov_register_interupts(struct nitrox_device *ndev)
409 {
410 	struct pci_dev *pdev = ndev->pdev;
411 	struct nitrox_q_vector *qvec;
412 	int vec, cpu;
413 	int ret;
414 
415 	/**
416 	 * only non ring vectors i.e Entry 192 is available
417 	 * for PF in SR-IOV mode.
418 	 */
419 	ndev->iov.msix.entry = NON_RING_MSIX_BASE;
420 	ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS);
421 	if (ret) {
422 		dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n",
423 			NON_RING_MSIX_BASE);
424 		return ret;
425 	}
426 
427 	qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL);
428 	if (!qvec) {
429 		pci_disable_msix(pdev);
430 		return -ENOMEM;
431 	}
432 	qvec->ndev = ndev;
433 
434 	ndev->qvec = qvec;
435 	ndev->num_vecs = NR_NON_RING_VECTORS;
436 	snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d",
437 		 NON_RING_MSIX_BASE);
438 
439 	vec = ndev->iov.msix.vector;
440 	ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
441 	if (ret) {
442 		dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n",
443 			NON_RING_MSIX_BASE);
444 		goto iov_irq_fail;
445 	}
446 	cpu = num_online_cpus();
447 	irq_set_affinity_hint(vec, get_cpu_mask(cpu));
448 
449 	tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
450 		     (unsigned long)qvec);
451 	qvec->valid = true;
452 
453 	return 0;
454 
455 iov_irq_fail:
456 	nitrox_sriov_unregister_interrupts(ndev);
457 	return ret;
458 }
459