xref: /freebsd/sys/dev/qat/qat_common/adf_isr.c (revision 71625ec9ad2a9bc8c09784fbd23b759830e0ee5f)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "icp_qat_uclo.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_init_admin.h"
10 #include "adf_cfg_strings.h"
11 #include "adf_transport_access_macros.h"
12 #include "adf_transport_internal.h"
13 #include <sys/types.h>
14 #include <sys/bus.h>
15 #include <sys/smp.h>
16 #include <dev/pci/pcivar.h>
17 #include <sys/malloc.h>
18 #include "adf_accel_devices.h"
19 #include "adf_common_drv.h"
20 #include "adf_cfg.h"
21 #include "adf_cfg_strings.h"
22 #include "adf_cfg_common.h"
23 #include "adf_transport_access_macros.h"
24 #include "adf_transport_internal.h"
25 #include "adf_dev_err.h"
26 
27 TASKQUEUE_DEFINE_THREAD(qat_pf);
28 
29 static int
adf_enable_msix(struct adf_accel_dev * accel_dev)30 adf_enable_msix(struct adf_accel_dev *accel_dev)
31 {
32 	struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev;
33 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
34 	int msix_num_entries = 1;
35 	int count = 0;
36 	int error = 0;
37 	int num_vectors = 0;
38 	u_int *vectors;
39 
40 	if (hw_data->set_msix_rttable)
41 		hw_data->set_msix_rttable(accel_dev);
42 
43 	/* If SR-IOV is disabled, add entries for each bank */
44 	if (!accel_dev->u1.pf.vf_info) {
45 		msix_num_entries += hw_data->num_banks;
46 		num_vectors = 0;
47 		vectors = NULL;
48 	} else {
49 		num_vectors = hw_data->num_banks + 1;
50 		vectors = malloc(num_vectors * sizeof(u_int),
51 				 M_QAT,
52 				 M_WAITOK | M_ZERO);
53 		vectors[hw_data->num_banks] = 1;
54 	}
55 
56 	count = msix_num_entries;
57 	error = pci_alloc_msix(info_pci_dev->pci_dev, &count);
58 	if (error == 0 && count != msix_num_entries) {
59 		pci_release_msi(info_pci_dev->pci_dev);
60 		error = EFBIG;
61 	}
62 	if (error) {
63 		device_printf(GET_DEV(accel_dev),
64 			      "Failed to enable MSI-X IRQ(s)\n");
65 		free(vectors, M_QAT);
66 		return error;
67 	}
68 
69 	if (vectors != NULL) {
70 		error =
71 		    pci_remap_msix(info_pci_dev->pci_dev, num_vectors, vectors);
72 		free(vectors, M_QAT);
73 		if (error) {
74 			device_printf(GET_DEV(accel_dev),
75 				      "Failed to remap MSI-X IRQ(s)\n");
76 			pci_release_msi(info_pci_dev->pci_dev);
77 			return error;
78 		}
79 	}
80 
81 	return 0;
82 }
83 
84 static void
adf_disable_msix(struct adf_accel_pci * info_pci_dev)85 adf_disable_msix(struct adf_accel_pci *info_pci_dev)
86 {
87 	pci_release_msi(info_pci_dev->pci_dev);
88 }
89 
90 static void
adf_msix_isr_bundle(void * bank_ptr)91 adf_msix_isr_bundle(void *bank_ptr)
92 {
93 	struct adf_etr_bank_data *bank = bank_ptr;
94 	struct adf_etr_data *priv_data = bank->accel_dev->transport;
95 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
96 
97 	csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
98 					    bank->bank_number,
99 					    0);
100 	adf_response_handler((uintptr_t)&priv_data->banks[bank->bank_number]);
101 	return;
102 }
103 
104 static void
adf_msix_isr_ae(void * dev_ptr)105 adf_msix_isr_ae(void *dev_ptr)
106 {
107 	struct adf_accel_dev *accel_dev = dev_ptr;
108 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
109 	struct adf_bar *pmisc =
110 	    &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
111 	struct resource *pmisc_bar_addr = pmisc->virt_addr;
112 	u32 errsou3;
113 	u32 errsou5;
114 	bool reset_required = false;
115 
116 	if (hw_data->ras_interrupts &&
117 	    hw_data->ras_interrupts(accel_dev, &reset_required))
118 		if (reset_required) {
119 			adf_notify_fatal_error(accel_dev);
120 			goto exit;
121 		}
122 
123 	if (hw_data->check_slice_hang && hw_data->check_slice_hang(accel_dev)) {
124 	}
125 
126 exit:
127 	errsou3 = ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3);
128 	errsou5 = ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5);
129 	if (errsou3 | errsou5)
130 		adf_print_err_registers(accel_dev);
131 	else
132 		device_printf(GET_DEV(accel_dev), "spurious AE interrupt\n");
133 
134 	return;
135 }
136 
137 static int
adf_get_irq_affinity(struct adf_accel_dev * accel_dev,int bank)138 adf_get_irq_affinity(struct adf_accel_dev *accel_dev, int bank)
139 {
140 	int core = CPU_FIRST();
141 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
142 	char bankName[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
143 
144 	snprintf(bankName,
145 		 ADF_CFG_MAX_KEY_LEN_IN_BYTES - 1,
146 		 ADF_ETRMGR_CORE_AFFINITY_FORMAT,
147 		 bank);
148 	bankName[ADF_CFG_MAX_KEY_LEN_IN_BYTES - 1] = '\0';
149 
150 	if (adf_cfg_get_param_value(accel_dev, "Accelerator0", bankName, val)) {
151 		device_printf(GET_DEV(accel_dev),
152 			      "No CoreAffinity Set - using default core: %d\n",
153 			      core);
154 	} else {
155 		if (compat_strtouint(val, 10, &core)) {
156 			device_printf(GET_DEV(accel_dev),
157 				      "Can't get cpu core ID\n");
158 		}
159 	}
160 	return (core);
161 }
162 
163 static int
adf_request_irqs(struct adf_accel_dev * accel_dev)164 adf_request_irqs(struct adf_accel_dev *accel_dev)
165 {
166 	struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev;
167 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
168 	struct msix_entry *msixe = info_pci_dev->msix_entries.entries;
169 	int ret = 0, rid = 0, i = 0;
170 	struct adf_etr_data *etr_data = accel_dev->transport;
171 	int computed_core = 0;
172 
173 	/* Request msix irq for all banks unless SR-IOV enabled */
174 	if (!accel_dev->u1.pf.vf_info) {
175 		for (i = 0; i < hw_data->num_banks; i++) {
176 			struct adf_etr_bank_data *bank = &etr_data->banks[i];
177 
178 			rid = i + 1;
179 			msixe[i].irq =
180 			    bus_alloc_resource_any(info_pci_dev->pci_dev,
181 						   SYS_RES_IRQ,
182 						   &rid,
183 						   RF_ACTIVE);
184 			if (msixe[i].irq == NULL) {
185 				device_printf(
186 				    GET_DEV(accel_dev),
187 				    "failed to allocate IRQ for bundle %d\n",
188 				    i);
189 				return ENXIO;
190 			}
191 
192 			ret = bus_setup_intr(info_pci_dev->pci_dev,
193 					     msixe[i].irq,
194 					     INTR_TYPE_MISC | INTR_MPSAFE,
195 					     NULL,
196 					     adf_msix_isr_bundle,
197 					     bank,
198 					     &msixe[i].cookie);
199 			if (ret) {
200 				device_printf(
201 				    GET_DEV(accel_dev),
202 				    "failed to enable IRQ for bundle %d\n",
203 				    i);
204 				bus_release_resource(info_pci_dev->pci_dev,
205 						     SYS_RES_IRQ,
206 						     rid,
207 						     msixe[i].irq);
208 				msixe[i].irq = NULL;
209 				return ret;
210 			}
211 
212 			computed_core = adf_get_irq_affinity(accel_dev, i);
213 			bus_describe_intr(info_pci_dev->pci_dev,
214 					  msixe[i].irq,
215 					  msixe[i].cookie,
216 					  "b%d",
217 					  i);
218 			bus_bind_intr(info_pci_dev->pci_dev,
219 				      msixe[i].irq,
220 				      computed_core);
221 		}
222 	}
223 
224 	/* Request msix irq for AE */
225 	rid = hw_data->num_banks + 1;
226 	msixe[i].irq = bus_alloc_resource_any(info_pci_dev->pci_dev,
227 					      SYS_RES_IRQ,
228 					      &rid,
229 					      RF_ACTIVE);
230 	if (msixe[i].irq == NULL) {
231 		device_printf(GET_DEV(accel_dev),
232 			      "failed to allocate IRQ for ae-cluster\n");
233 		return ENXIO;
234 	}
235 
236 	ret = bus_setup_intr(info_pci_dev->pci_dev,
237 			     msixe[i].irq,
238 			     INTR_TYPE_MISC | INTR_MPSAFE,
239 			     NULL,
240 			     adf_msix_isr_ae,
241 			     accel_dev,
242 			     &msixe[i].cookie);
243 	if (ret) {
244 		device_printf(GET_DEV(accel_dev),
245 			      "failed to enable IRQ for ae-cluster\n");
246 		bus_release_resource(info_pci_dev->pci_dev,
247 				     SYS_RES_IRQ,
248 				     rid,
249 				     msixe[i].irq);
250 		msixe[i].irq = NULL;
251 		return ret;
252 	}
253 
254 	bus_describe_intr(info_pci_dev->pci_dev,
255 			  msixe[i].irq,
256 			  msixe[i].cookie,
257 			  "ae");
258 	return ret;
259 }
260 
261 static void
adf_free_irqs(struct adf_accel_dev * accel_dev)262 adf_free_irqs(struct adf_accel_dev *accel_dev)
263 {
264 	struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev;
265 	struct msix_entry *msixe = info_pci_dev->msix_entries.entries;
266 	int i = 0;
267 
268 	if (info_pci_dev->msix_entries.num_entries > 0) {
269 		for (i = 0; i < info_pci_dev->msix_entries.num_entries; i++) {
270 			if (msixe[i].irq != NULL && msixe[i].cookie != NULL) {
271 				bus_teardown_intr(info_pci_dev->pci_dev,
272 						  msixe[i].irq,
273 						  msixe[i].cookie);
274 				bus_free_resource(info_pci_dev->pci_dev,
275 						  SYS_RES_IRQ,
276 						  msixe[i].irq);
277 			}
278 		}
279 	}
280 }
281 
282 static int
adf_isr_alloc_msix_entry_table(struct adf_accel_dev * accel_dev)283 adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
284 {
285 	struct msix_entry *entries;
286 	u32 msix_num_entries = 1;
287 
288 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
289 	/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
290 	if (!accel_dev->u1.pf.vf_info)
291 		msix_num_entries += hw_data->num_banks;
292 
293 	entries = malloc(msix_num_entries * sizeof(struct msix_entry),
294 			 M_QAT,
295 			 M_WAITOK | M_ZERO);
296 
297 	accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
298 	accel_dev->accel_pci_dev.msix_entries.entries = entries;
299 	return 0;
300 }
301 
302 static void
adf_isr_free_msix_entry_table(struct adf_accel_dev * accel_dev)303 adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
304 {
305 
306 	free(accel_dev->accel_pci_dev.msix_entries.entries, M_QAT);
307 	accel_dev->accel_pci_dev.msix_entries.entries = NULL;
308 }
309 
310 /**
311  * adf_vf_isr_resource_free() - Free IRQ for acceleration device
312  * @accel_dev:  Pointer to acceleration device.
313  *
314  * Function frees interrupts for acceleration device.
315  */
316 void
adf_isr_resource_free(struct adf_accel_dev * accel_dev)317 adf_isr_resource_free(struct adf_accel_dev *accel_dev)
318 {
319 	adf_free_irqs(accel_dev);
320 	adf_disable_msix(&accel_dev->accel_pci_dev);
321 	adf_isr_free_msix_entry_table(accel_dev);
322 }
323 
324 /**
325  * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
326  * @accel_dev:  Pointer to acceleration device.
327  *
328  * Function allocates interrupts for acceleration device.
329  *
330  * Return: 0 on success, error code otherwise.
331  */
332 int
adf_isr_resource_alloc(struct adf_accel_dev * accel_dev)333 adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
334 {
335 	int ret;
336 
337 	ret = adf_isr_alloc_msix_entry_table(accel_dev);
338 	if (ret)
339 		return ret;
340 	if (adf_enable_msix(accel_dev))
341 		goto err_out;
342 
343 	if (adf_request_irqs(accel_dev))
344 		goto err_out;
345 
346 	return 0;
347 err_out:
348 	adf_isr_resource_free(accel_dev);
349 	return EFAULT;
350 }
351