xref: /linux/drivers/crypto/cavium/nitrox/nitrox_isr.c (revision a6f37cee6e4f6fa9d61962efbcb06a032efed1ba)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pci.h>
3 #include <linux/printk.h>
4 #include <linux/slab.h>
5 
6 #include "nitrox_dev.h"
7 #include "nitrox_csr.h"
8 #include "nitrox_common.h"
9 #include "nitrox_hal.h"
10 
11 #define NR_RING_VECTORS 3
12 #define NPS_CORE_INT_ACTIVE_ENTRY 192
13 
14 /**
15  * nps_pkt_slc_isr - IRQ handler for NPS solicit port
16  * @irq: irq number
17  * @data: argument
18  */
19 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
20 {
21 	struct bh_data *slc = data;
22 	union nps_pkt_slc_cnts pkt_slc_cnts;
23 
24 	pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr);
25 	/* New packet on SLC output port */
26 	if (pkt_slc_cnts.s.slc_int)
27 		tasklet_hi_schedule(&slc->resp_handler);
28 
29 	return IRQ_HANDLED;
30 }
31 
32 static void clear_nps_core_err_intr(struct nitrox_device *ndev)
33 {
34 	u64 value;
35 
36 	/* Write 1 to clear */
37 	value = nitrox_read_csr(ndev, NPS_CORE_INT);
38 	nitrox_write_csr(ndev, NPS_CORE_INT, value);
39 
40 	dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT  0x%016llx\n", value);
41 }
42 
43 static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
44 {
45 	union nps_pkt_int pkt_int;
46 	unsigned long value, offset;
47 	int i;
48 
49 	pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
50 	dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT  0x%016llx\n",
51 			    pkt_int.value);
52 
53 	if (pkt_int.s.slc_err) {
54 		offset = NPS_PKT_SLC_ERR_TYPE;
55 		value = nitrox_read_csr(ndev, offset);
56 		nitrox_write_csr(ndev, offset, value);
57 		dev_err_ratelimited(DEV(ndev),
58 				    "NPS_PKT_SLC_ERR_TYPE  0x%016lx\n", value);
59 
60 		offset = NPS_PKT_SLC_RERR_LO;
61 		value = nitrox_read_csr(ndev, offset);
62 		nitrox_write_csr(ndev, offset, value);
63 		/* enable the solicit ports */
64 		for_each_set_bit(i, &value, BITS_PER_LONG)
65 			enable_pkt_solicit_port(ndev, i);
66 
67 		dev_err_ratelimited(DEV(ndev),
68 				    "NPS_PKT_SLC_RERR_LO  0x%016lx\n", value);
69 
70 		offset = NPS_PKT_SLC_RERR_HI;
71 		value = nitrox_read_csr(ndev, offset);
72 		nitrox_write_csr(ndev, offset, value);
73 		dev_err_ratelimited(DEV(ndev),
74 				    "NPS_PKT_SLC_RERR_HI  0x%016lx\n", value);
75 	}
76 
77 	if (pkt_int.s.in_err) {
78 		offset = NPS_PKT_IN_ERR_TYPE;
79 		value = nitrox_read_csr(ndev, offset);
80 		nitrox_write_csr(ndev, offset, value);
81 		dev_err_ratelimited(DEV(ndev),
82 				    "NPS_PKT_IN_ERR_TYPE  0x%016lx\n", value);
83 		offset = NPS_PKT_IN_RERR_LO;
84 		value = nitrox_read_csr(ndev, offset);
85 		nitrox_write_csr(ndev, offset, value);
86 		/* enable the input ring */
87 		for_each_set_bit(i, &value, BITS_PER_LONG)
88 			enable_pkt_input_ring(ndev, i);
89 
90 		dev_err_ratelimited(DEV(ndev),
91 				    "NPS_PKT_IN_RERR_LO  0x%016lx\n", value);
92 
93 		offset = NPS_PKT_IN_RERR_HI;
94 		value = nitrox_read_csr(ndev, offset);
95 		nitrox_write_csr(ndev, offset, value);
96 		dev_err_ratelimited(DEV(ndev),
97 				    "NPS_PKT_IN_RERR_HI  0x%016lx\n", value);
98 	}
99 }
100 
101 static void clear_pom_err_intr(struct nitrox_device *ndev)
102 {
103 	u64 value;
104 
105 	value = nitrox_read_csr(ndev, POM_INT);
106 	nitrox_write_csr(ndev, POM_INT, value);
107 	dev_err_ratelimited(DEV(ndev), "POM_INT  0x%016llx\n", value);
108 }
109 
110 static void clear_pem_err_intr(struct nitrox_device *ndev)
111 {
112 	u64 value;
113 
114 	value = nitrox_read_csr(ndev, PEM0_INT);
115 	nitrox_write_csr(ndev, PEM0_INT, value);
116 	dev_err_ratelimited(DEV(ndev), "PEM(0)_INT  0x%016llx\n", value);
117 }
118 
119 static void clear_lbc_err_intr(struct nitrox_device *ndev)
120 {
121 	union lbc_int lbc_int;
122 	u64 value, offset;
123 	int i;
124 
125 	lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
126 	dev_err_ratelimited(DEV(ndev), "LBC_INT  0x%016llx\n", lbc_int.value);
127 
128 	if (lbc_int.s.dma_rd_err) {
129 		for (i = 0; i < NR_CLUSTERS; i++) {
130 			offset = EFL_CORE_VF_ERR_INT0X(i);
131 			value = nitrox_read_csr(ndev, offset);
132 			nitrox_write_csr(ndev, offset, value);
133 			offset = EFL_CORE_VF_ERR_INT1X(i);
134 			value = nitrox_read_csr(ndev, offset);
135 			nitrox_write_csr(ndev, offset, value);
136 		}
137 	}
138 
139 	if (lbc_int.s.cam_soft_err) {
140 		dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
141 		invalidate_lbc(ndev);
142 	}
143 
144 	if (lbc_int.s.pref_dat_len_mismatch_err) {
145 		offset = LBC_PLM_VF1_64_INT;
146 		value = nitrox_read_csr(ndev, offset);
147 		nitrox_write_csr(ndev, offset, value);
148 		offset = LBC_PLM_VF65_128_INT;
149 		value = nitrox_read_csr(ndev, offset);
150 		nitrox_write_csr(ndev, offset, value);
151 	}
152 
153 	if (lbc_int.s.rd_dat_len_mismatch_err) {
154 		offset = LBC_ELM_VF1_64_INT;
155 		value = nitrox_read_csr(ndev, offset);
156 		nitrox_write_csr(ndev, offset, value);
157 		offset = LBC_ELM_VF65_128_INT;
158 		value = nitrox_read_csr(ndev, offset);
159 		nitrox_write_csr(ndev, offset, value);
160 	}
161 	nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
162 }
163 
164 static void clear_efl_err_intr(struct nitrox_device *ndev)
165 {
166 	int i;
167 
168 	for (i = 0; i < NR_CLUSTERS; i++) {
169 		union efl_core_int core_int;
170 		u64 value, offset;
171 
172 		offset = EFL_CORE_INTX(i);
173 		core_int.value = nitrox_read_csr(ndev, offset);
174 		nitrox_write_csr(ndev, offset, core_int.value);
175 		dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT  0x%016llx\n",
176 				    i, core_int.value);
177 		if (core_int.s.se_err) {
178 			offset = EFL_CORE_SE_ERR_INTX(i);
179 			value = nitrox_read_csr(ndev, offset);
180 			nitrox_write_csr(ndev, offset, value);
181 		}
182 	}
183 }
184 
185 static void clear_bmi_err_intr(struct nitrox_device *ndev)
186 {
187 	u64 value;
188 
189 	value = nitrox_read_csr(ndev, BMI_INT);
190 	nitrox_write_csr(ndev, BMI_INT, value);
191 	dev_err_ratelimited(DEV(ndev), "BMI_INT  0x%016llx\n", value);
192 }
193 
194 /**
195  * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
196  * @ndev: NITROX device
197  */
198 static void clear_nps_core_int_active(struct nitrox_device *ndev)
199 {
200 	union nps_core_int_active core_int_active;
201 
202 	core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
203 
204 	if (core_int_active.s.nps_core)
205 		clear_nps_core_err_intr(ndev);
206 
207 	if (core_int_active.s.nps_pkt)
208 		clear_nps_pkt_err_intr(ndev);
209 
210 	if (core_int_active.s.pom)
211 		clear_pom_err_intr(ndev);
212 
213 	if (core_int_active.s.pem)
214 		clear_pem_err_intr(ndev);
215 
216 	if (core_int_active.s.lbc)
217 		clear_lbc_err_intr(ndev);
218 
219 	if (core_int_active.s.efl)
220 		clear_efl_err_intr(ndev);
221 
222 	if (core_int_active.s.bmi)
223 		clear_bmi_err_intr(ndev);
224 
225 	/* If more work callback the ISR, set resend */
226 	core_int_active.s.resend = 1;
227 	nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
228 }
229 
230 static irqreturn_t nps_core_int_isr(int irq, void *data)
231 {
232 	struct nitrox_device *ndev = data;
233 
234 	clear_nps_core_int_active(ndev);
235 
236 	return IRQ_HANDLED;
237 }
238 
239 static int nitrox_enable_msix(struct nitrox_device *ndev)
240 {
241 	struct msix_entry *entries;
242 	char **names;
243 	int i, nr_entries, ret;
244 
245 	/*
246 	 * PF MSI-X vectors
247 	 *
248 	 * Entry 0: NPS PKT ring 0
249 	 * Entry 1: AQMQ ring 0
250 	 * Entry 2: ZQM ring 0
251 	 * Entry 3: NPS PKT ring 1
252 	 * Entry 4: AQMQ ring 1
253 	 * Entry 5: ZQM ring 1
254 	 * ....
255 	 * Entry 192: NPS_CORE_INT_ACTIVE
256 	 */
257 	nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
258 	entries = kcalloc_node(nr_entries, sizeof(struct msix_entry),
259 			       GFP_KERNEL, ndev->node);
260 	if (!entries)
261 		return -ENOMEM;
262 
263 	names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
264 	if (!names) {
265 		kfree(entries);
266 		return -ENOMEM;
267 	}
268 
269 	/* fill entires */
270 	for (i = 0; i < (nr_entries - 1); i++)
271 		entries[i].entry = i;
272 
273 	entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
274 
275 	for (i = 0; i < nr_entries; i++) {
276 		*(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
277 		if (!(*(names + i))) {
278 			ret = -ENOMEM;
279 			goto msix_fail;
280 		}
281 	}
282 	ndev->msix.entries = entries;
283 	ndev->msix.names = names;
284 	ndev->msix.nr_entries = nr_entries;
285 
286 	ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
287 				    ndev->msix.nr_entries);
288 	if (ret) {
289 		dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
290 			ret);
291 		goto msix_fail;
292 	}
293 	return 0;
294 
295 msix_fail:
296 	for (i = 0; i < nr_entries; i++)
297 		kfree(*(names + i));
298 
299 	kfree(entries);
300 	kfree(names);
301 	return ret;
302 }
303 
304 static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
305 {
306 	int i;
307 
308 	if (!ndev->bh.slc)
309 		return;
310 
311 	for (i = 0; i < ndev->nr_queues; i++) {
312 		struct bh_data *bh = &ndev->bh.slc[i];
313 
314 		tasklet_disable(&bh->resp_handler);
315 		tasklet_kill(&bh->resp_handler);
316 	}
317 	kfree(ndev->bh.slc);
318 	ndev->bh.slc = NULL;
319 }
320 
321 static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
322 {
323 	u32 size;
324 	int i;
325 
326 	size = ndev->nr_queues * sizeof(struct bh_data);
327 	ndev->bh.slc = kzalloc(size, GFP_KERNEL);
328 	if (!ndev->bh.slc)
329 		return -ENOMEM;
330 
331 	for (i = 0; i < ndev->nr_queues; i++) {
332 		struct bh_data *bh = &ndev->bh.slc[i];
333 		u64 offset;
334 
335 		offset = NPS_PKT_SLC_CNTSX(i);
336 		/* pre calculate completion count address */
337 		bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
338 		bh->cmdq = &ndev->pkt_cmdqs[i];
339 
340 		tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
341 			     (unsigned long)bh);
342 	}
343 
344 	return 0;
345 }
346 
347 static int nitrox_request_irqs(struct nitrox_device *ndev)
348 {
349 	struct pci_dev *pdev = ndev->pdev;
350 	struct msix_entry *msix_ent = ndev->msix.entries;
351 	int nr_ring_vectors, i = 0, ring, cpu, ret;
352 	char *name;
353 
354 	/*
355 	 * PF MSI-X vectors
356 	 *
357 	 * Entry 0: NPS PKT ring 0
358 	 * Entry 1: AQMQ ring 0
359 	 * Entry 2: ZQM ring 0
360 	 * Entry 3: NPS PKT ring 1
361 	 * ....
362 	 * Entry 192: NPS_CORE_INT_ACTIVE
363 	 */
364 	nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
365 
366 	/* request irq for pkt ring/ports only */
367 	while (i < nr_ring_vectors) {
368 		name = *(ndev->msix.names + i);
369 		ring = (i / NR_RING_VECTORS);
370 		snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
371 			 ndev->idx, ring);
372 
373 		ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
374 				  name, &ndev->bh.slc[ring]);
375 		if (ret) {
376 			dev_err(&pdev->dev, "failed to get irq %d for %s\n",
377 				msix_ent[i].vector, name);
378 			return ret;
379 		}
380 		cpu = ring % num_online_cpus();
381 		irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
382 
383 		set_bit(i, ndev->msix.irqs);
384 		i += NR_RING_VECTORS;
385 	}
386 
387 	/* Request IRQ for NPS_CORE_INT_ACTIVE */
388 	name = *(ndev->msix.names + i);
389 	snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
390 	ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
391 	if (ret) {
392 		dev_err(&pdev->dev, "failed to get irq %d for %s\n",
393 			msix_ent[i].vector, name);
394 		return ret;
395 	}
396 	set_bit(i, ndev->msix.irqs);
397 
398 	return 0;
399 }
400 
401 static void nitrox_disable_msix(struct nitrox_device *ndev)
402 {
403 	struct msix_entry *msix_ent = ndev->msix.entries;
404 	char **names = ndev->msix.names;
405 	int i = 0, ring, nr_ring_vectors;
406 
407 	nr_ring_vectors = ndev->msix.nr_entries - 1;
408 
409 	/* clear pkt ring irqs */
410 	while (i < nr_ring_vectors) {
411 		if (test_and_clear_bit(i, ndev->msix.irqs)) {
412 			ring = (i / NR_RING_VECTORS);
413 			irq_set_affinity_hint(msix_ent[i].vector, NULL);
414 			free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
415 		}
416 		i += NR_RING_VECTORS;
417 	}
418 	irq_set_affinity_hint(msix_ent[i].vector, NULL);
419 	free_irq(msix_ent[i].vector, ndev);
420 	clear_bit(i, ndev->msix.irqs);
421 
422 	kfree(ndev->msix.entries);
423 	for (i = 0; i < ndev->msix.nr_entries; i++)
424 		kfree(*(names + i));
425 
426 	kfree(names);
427 	pci_disable_msix(ndev->pdev);
428 }
429 
430 /**
431  * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
432  * @ndev: NITROX device
433  */
434 void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
435 {
436 	nitrox_disable_msix(ndev);
437 	nitrox_cleanup_pkt_slc_bh(ndev);
438 }
439 
440 /**
441  * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ
442  * @ndev: NITROX device
443  *
444  * Return: 0 on success, a negative value on failure.
445  */
446 int nitrox_pf_init_isr(struct nitrox_device *ndev)
447 {
448 	int err;
449 
450 	err = nitrox_setup_pkt_slc_bh(ndev);
451 	if (err)
452 		return err;
453 
454 	err = nitrox_enable_msix(ndev);
455 	if (err)
456 		goto msix_fail;
457 
458 	err = nitrox_request_irqs(ndev);
459 	if (err)
460 		goto irq_fail;
461 
462 	return 0;
463 
464 irq_fail:
465 	nitrox_disable_msix(ndev);
466 msix_fail:
467 	nitrox_cleanup_pkt_slc_bh(ndev);
468 	return err;
469 }
470