xref: /freebsd/sys/dev/qat/qat_common/adf_isr.c (revision 3a3af6b2a160bea72509a9d5ef84e25906b0478a)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include <sys/types.h>
15 #include <sys/bus.h>
16 #include <sys/smp.h>
17 #include <dev/pci/pcivar.h>
18 #include <sys/malloc.h>
19 #include "adf_accel_devices.h"
20 #include "adf_common_drv.h"
21 #include "adf_cfg.h"
22 #include "adf_cfg_strings.h"
23 #include "adf_cfg_common.h"
24 #include "adf_transport_access_macros.h"
25 #include "adf_transport_internal.h"
26 #include "adf_dev_err.h"
27 
28 TASKQUEUE_DEFINE_THREAD(qat_pf);
29 
30 static int
31 adf_enable_msix(struct adf_accel_dev *accel_dev)
32 {
33 	struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev;
34 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
35 	int msix_num_entries = 1;
36 	int count = 0;
37 	int error = 0;
38 	int num_vectors = 0;
39 	u_int *vectors;
40 
41 	/* If SR-IOV is disabled, add entries for each bank */
42 	if (!accel_dev->u1.pf.vf_info) {
43 		msix_num_entries += hw_data->num_banks;
44 		num_vectors = 0;
45 		vectors = NULL;
46 	} else {
47 		num_vectors = hw_data->num_banks + 1;
48 		vectors = malloc(num_vectors * sizeof(u_int),
49 				 M_QAT,
50 				 M_WAITOK | M_ZERO);
51 		vectors[hw_data->num_banks] = 1;
52 	}
53 
54 	count = msix_num_entries;
55 	error = pci_alloc_msix(info_pci_dev->pci_dev, &count);
56 	if (error == 0 && count != msix_num_entries) {
57 		pci_release_msi(info_pci_dev->pci_dev);
58 		error = EFBIG;
59 	}
60 	if (error) {
61 		device_printf(GET_DEV(accel_dev),
62 			      "Failed to enable MSI-X IRQ(s)\n");
63 		free(vectors, M_QAT);
64 		return error;
65 	}
66 
67 	if (vectors != NULL) {
68 		error =
69 		    pci_remap_msix(info_pci_dev->pci_dev, num_vectors, vectors);
70 		free(vectors, M_QAT);
71 		if (error) {
72 			device_printf(GET_DEV(accel_dev),
73 				      "Failed to remap MSI-X IRQ(s)\n");
74 			pci_release_msi(info_pci_dev->pci_dev);
75 			return error;
76 		}
77 	}
78 
79 	return 0;
80 }
81 
82 static void
83 adf_disable_msix(struct adf_accel_pci *info_pci_dev)
84 {
85 	pci_release_msi(info_pci_dev->pci_dev);
86 }
87 
88 static void
89 adf_msix_isr_bundle(void *bank_ptr)
90 {
91 	struct adf_etr_bank_data *bank = bank_ptr;
92 	struct adf_etr_data *priv_data = bank->accel_dev->transport;
93 
94 	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
95 	adf_response_handler((uintptr_t)&priv_data->banks[bank->bank_number]);
96 	return;
97 }
98 
99 static void
100 adf_msix_isr_ae(void *dev_ptr)
101 {
102 	struct adf_accel_dev *accel_dev = dev_ptr;
103 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
104 	struct adf_bar *pmisc =
105 	    &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
106 	struct resource *pmisc_bar_addr = pmisc->virt_addr;
107 	u32 errsou3;
108 	u32 errsou5;
109 	bool reset_required = false;
110 
111 	if (hw_data->ras_interrupts &&
112 	    hw_data->ras_interrupts(accel_dev, &reset_required))
113 		if (reset_required) {
114 			adf_notify_fatal_error(accel_dev);
115 			goto exit;
116 		}
117 
118 	if (hw_data->check_slice_hang && hw_data->check_slice_hang(accel_dev)) {
119 	}
120 
121 exit:
122 	errsou3 = ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3);
123 	errsou5 = ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5);
124 	if (errsou3 | errsou5)
125 		adf_print_err_registers(accel_dev);
126 	else
127 		device_printf(GET_DEV(accel_dev), "spurious AE interrupt\n");
128 
129 	return;
130 }
131 
132 static int
133 adf_get_irq_affinity(struct adf_accel_dev *accel_dev, int bank)
134 {
135 	int core = CPU_FIRST();
136 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
137 	char bankName[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
138 
139 	snprintf(bankName,
140 		 ADF_CFG_MAX_KEY_LEN_IN_BYTES - 1,
141 		 ADF_ETRMGR_CORE_AFFINITY_FORMAT,
142 		 bank);
143 	bankName[ADF_CFG_MAX_KEY_LEN_IN_BYTES - 1] = '\0';
144 
145 	if (adf_cfg_get_param_value(accel_dev, "Accelerator0", bankName, val)) {
146 		device_printf(GET_DEV(accel_dev),
147 			      "No CoreAffinity Set - using default core: %d\n",
148 			      core);
149 	} else {
150 		if (compat_strtouint(val, 10, &core)) {
151 			device_printf(GET_DEV(accel_dev),
152 				      "Can't get cpu core ID\n");
153 		}
154 	}
155 	return (core);
156 }
157 
158 static int
159 adf_request_irqs(struct adf_accel_dev *accel_dev)
160 {
161 	struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev;
162 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
163 	struct msix_entry *msixe = info_pci_dev->msix_entries.entries;
164 	int ret = 0, rid = 0, i = 0;
165 	struct adf_etr_data *etr_data = accel_dev->transport;
166 	int computed_core = 0;
167 
168 	/* Request msix irq for all banks unless SR-IOV enabled */
169 	if (!accel_dev->u1.pf.vf_info) {
170 		for (i = 0; i < hw_data->num_banks; i++) {
171 			struct adf_etr_bank_data *bank = &etr_data->banks[i];
172 
173 			rid = i + 1;
174 			msixe[i].irq =
175 			    bus_alloc_resource_any(info_pci_dev->pci_dev,
176 						   SYS_RES_IRQ,
177 						   &rid,
178 						   RF_ACTIVE);
179 			if (msixe[i].irq == NULL) {
180 				device_printf(
181 				    GET_DEV(accel_dev),
182 				    "failed to allocate IRQ for bundle %d\n",
183 				    i);
184 				return ENXIO;
185 			}
186 
187 			ret = bus_setup_intr(info_pci_dev->pci_dev,
188 					     msixe[i].irq,
189 					     INTR_TYPE_MISC | INTR_MPSAFE,
190 					     NULL,
191 					     adf_msix_isr_bundle,
192 					     bank,
193 					     &msixe[i].cookie);
194 			if (ret) {
195 				device_printf(
196 				    GET_DEV(accel_dev),
197 				    "failed to enable IRQ for bundle %d\n",
198 				    i);
199 				bus_release_resource(info_pci_dev->pci_dev,
200 						     SYS_RES_IRQ,
201 						     rid,
202 						     msixe[i].irq);
203 				msixe[i].irq = NULL;
204 				return ret;
205 			}
206 
207 			computed_core = adf_get_irq_affinity(accel_dev, i);
208 			bus_describe_intr(info_pci_dev->pci_dev,
209 					  msixe[i].irq,
210 					  msixe[i].cookie,
211 					  "b%d",
212 					  i);
213 			bus_bind_intr(info_pci_dev->pci_dev,
214 				      msixe[i].irq,
215 				      computed_core);
216 		}
217 	}
218 
219 	/* Request msix irq for AE */
220 	rid = hw_data->num_banks + 1;
221 	msixe[i].irq = bus_alloc_resource_any(info_pci_dev->pci_dev,
222 					      SYS_RES_IRQ,
223 					      &rid,
224 					      RF_ACTIVE);
225 	if (msixe[i].irq == NULL) {
226 		device_printf(GET_DEV(accel_dev),
227 			      "failed to allocate IRQ for ae-cluster\n");
228 		return ENXIO;
229 	}
230 
231 	ret = bus_setup_intr(info_pci_dev->pci_dev,
232 			     msixe[i].irq,
233 			     INTR_TYPE_MISC | INTR_MPSAFE,
234 			     NULL,
235 			     adf_msix_isr_ae,
236 			     accel_dev,
237 			     &msixe[i].cookie);
238 	if (ret) {
239 		device_printf(GET_DEV(accel_dev),
240 			      "failed to enable IRQ for ae-cluster\n");
241 		bus_release_resource(info_pci_dev->pci_dev,
242 				     SYS_RES_IRQ,
243 				     rid,
244 				     msixe[i].irq);
245 		msixe[i].irq = NULL;
246 		return ret;
247 	}
248 
249 	bus_describe_intr(info_pci_dev->pci_dev,
250 			  msixe[i].irq,
251 			  msixe[i].cookie,
252 			  "ae");
253 	return ret;
254 }
255 
256 static void
257 adf_free_irqs(struct adf_accel_dev *accel_dev)
258 {
259 	struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev;
260 	struct msix_entry *msixe = info_pci_dev->msix_entries.entries;
261 	int i = 0;
262 
263 	if (info_pci_dev->msix_entries.num_entries > 0) {
264 		for (i = 0; i < info_pci_dev->msix_entries.num_entries; i++) {
265 			if (msixe[i].irq != NULL && msixe[i].cookie != NULL) {
266 				bus_teardown_intr(info_pci_dev->pci_dev,
267 						  msixe[i].irq,
268 						  msixe[i].cookie);
269 				bus_free_resource(info_pci_dev->pci_dev,
270 						  SYS_RES_IRQ,
271 						  msixe[i].irq);
272 			}
273 		}
274 	}
275 }
276 
277 static int
278 adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
279 {
280 	struct msix_entry *entries;
281 	u32 msix_num_entries = 1;
282 
283 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
284 	/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
285 	if (!accel_dev->u1.pf.vf_info)
286 		msix_num_entries += hw_data->num_banks;
287 
288 	entries = malloc(msix_num_entries * sizeof(struct msix_entry),
289 			 M_QAT,
290 			 M_WAITOK | M_ZERO);
291 
292 	accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
293 	accel_dev->accel_pci_dev.msix_entries.entries = entries;
294 	return 0;
295 }
296 
297 static void
298 adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
299 {
300 
301 	free(accel_dev->accel_pci_dev.msix_entries.entries, M_QAT);
302 	accel_dev->accel_pci_dev.msix_entries.entries = NULL;
303 }
304 
305 /**
306  * adf_vf_isr_resource_free() - Free IRQ for acceleration device
307  * @accel_dev:  Pointer to acceleration device.
308  *
309  * Function frees interrupts for acceleration device.
310  */
311 void
312 adf_isr_resource_free(struct adf_accel_dev *accel_dev)
313 {
314 	adf_free_irqs(accel_dev);
315 	adf_disable_msix(&accel_dev->accel_pci_dev);
316 	adf_isr_free_msix_entry_table(accel_dev);
317 }
318 
319 /**
320  * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
321  * @accel_dev:  Pointer to acceleration device.
322  *
323  * Function allocates interrupts for acceleration device.
324  *
325  * Return: 0 on success, error code otherwise.
326  */
327 int
328 adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
329 {
330 	int ret;
331 
332 	ret = adf_isr_alloc_msix_entry_table(accel_dev);
333 	if (ret)
334 		return ret;
335 	if (adf_enable_msix(accel_dev))
336 		goto err_out;
337 
338 	if (adf_request_irqs(accel_dev))
339 		goto err_out;
340 
341 	return 0;
342 err_out:
343 	adf_isr_resource_free(accel_dev);
344 	return EFAULT;
345 }
346