1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include <sys/kernel.h>
5 #include <sys/systm.h>
6
7 #include <sys/types.h>
8 #include <sys/interrupt.h>
9 #include <dev/pci/pcivar.h>
10 #include <sys/param.h>
11 #include <linux/workqueue.h>
12 #include "adf_accel_devices.h"
13 #include "adf_common_drv.h"
14 #include "adf_cfg.h"
15 #include "adf_cfg_strings.h"
16 #include "adf_cfg_common.h"
17 #include "adf_transport_access_macros.h"
18 #include "adf_transport_internal.h"
19 #include "adf_pfvf_utils.h"
20
21 static TASKQUEUE_DEFINE_THREAD(qat_vf);
22 static TASKQUEUE_DEFINE_THREAD(qat_bank_handler);
23
24 static struct workqueue_struct *adf_vf_stop_wq;
25 static DEFINE_MUTEX(vf_stop_wq_lock);
26
27 struct adf_vf_stop_data {
28 struct adf_accel_dev *accel_dev;
29 struct work_struct work;
30 };
31
32 static int
adf_enable_msi(struct adf_accel_dev * accel_dev)33 adf_enable_msi(struct adf_accel_dev *accel_dev)
34 {
35 int stat;
36 int count = 1;
37 stat = pci_alloc_msi(accel_to_pci_dev(accel_dev), &count);
38 if (stat) {
39 device_printf(GET_DEV(accel_dev),
40 "Failed to enable MSI interrupts\n");
41 return stat;
42 }
43
44 return stat;
45 }
46
47 static void
adf_disable_msi(struct adf_accel_dev * accel_dev)48 adf_disable_msi(struct adf_accel_dev *accel_dev)
49 {
50 device_t pdev = accel_to_pci_dev(accel_dev);
51 pci_release_msi(pdev);
52 }
53
54 static void
adf_dev_stop_async(struct work_struct * work)55 adf_dev_stop_async(struct work_struct *work)
56 {
57 struct adf_vf_stop_data *stop_data =
58 container_of(work, struct adf_vf_stop_data, work);
59 struct adf_accel_dev *accel_dev = stop_data->accel_dev;
60 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
61
62 adf_dev_restarting_notify(accel_dev);
63 adf_dev_stop(accel_dev);
64 adf_dev_shutdown(accel_dev);
65
66 /* Re-enable PF2VF interrupts */
67 hw_data->enable_pf2vf_interrupt(accel_dev);
68 kfree(stop_data);
69 }
70
71 int
adf_pf2vf_handle_pf_restarting(struct adf_accel_dev * accel_dev)72 adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
73 {
74 struct adf_vf_stop_data *stop_data;
75
76 clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
77 stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
78 if (!stop_data) {
79 device_printf(GET_DEV(accel_dev),
80 "Couldn't schedule stop for vf_%d\n",
81 accel_dev->accel_id);
82 return -ENOMEM;
83 }
84 stop_data->accel_dev = accel_dev;
85 INIT_WORK(&stop_data->work, adf_dev_stop_async);
86 queue_work(adf_vf_stop_wq, &stop_data->work);
87
88 return 0;
89 }
90
91 int
adf_pf2vf_handle_pf_rp_reset(struct adf_accel_dev * accel_dev,struct pfvf_message msg)92 adf_pf2vf_handle_pf_rp_reset(struct adf_accel_dev *accel_dev,
93 struct pfvf_message msg)
94 {
95 accel_dev->u1.vf.rpreset_sts = msg.data;
96 if (accel_dev->u1.vf.rpreset_sts == RPRESET_SUCCESS)
97 device_printf(
98 GET_DEV(accel_dev),
99 "rpreset resp(success) from PF type:0x%x data:0x%x\n",
100 msg.type,
101 msg.data);
102 else if (accel_dev->u1.vf.rpreset_sts == RPRESET_NOT_SUPPORTED)
103 device_printf(
104 GET_DEV(accel_dev),
105 "rpreset resp(not supported) from PF type:0x%x data:0x%x\n",
106 msg.type,
107 msg.data);
108 else if (accel_dev->u1.vf.rpreset_sts == RPRESET_INVAL_BANK)
109 device_printf(
110 GET_DEV(accel_dev),
111 "rpreset resp(invalid bank) from PF type:0x%x data:0x%x\n",
112 msg.type,
113 msg.data);
114 else
115 device_printf(
116 GET_DEV(accel_dev),
117 "rpreset resp(timeout) from PF type:0x%x data:0x%x\nn",
118 msg.type,
119 msg.data);
120
121 complete(&accel_dev->u1.vf.msg_received);
122
123 return 0;
124 }
125
126 static void
adf_pf2vf_bh_handler(void * data,int pending)127 adf_pf2vf_bh_handler(void *data, int pending)
128 {
129 struct adf_accel_dev *accel_dev = data;
130 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
131
132 if (adf_recv_and_handle_pf2vf_msg(accel_dev))
133 /* Re-enable PF2VF interrupts */
134 hw_data->enable_pf2vf_interrupt(accel_dev);
135
136 return;
137 }
138
139 static int
adf_setup_pf2vf_bh(struct adf_accel_dev * accel_dev)140 adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
141 {
142 TASK_INIT(&accel_dev->u1.vf.pf2vf_bh_tasklet,
143 0,
144 adf_pf2vf_bh_handler,
145 accel_dev);
146 mutex_init(&accel_dev->u1.vf.vf2pf_lock);
147
148 return 0;
149 }
150
151 static void
adf_cleanup_pf2vf_bh(struct adf_accel_dev * accel_dev)152 adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
153 {
154 taskqueue_cancel(taskqueue_qat_vf,
155 &accel_dev->u1.vf.pf2vf_bh_tasklet,
156 NULL);
157 taskqueue_drain(taskqueue_qat_vf, &accel_dev->u1.vf.pf2vf_bh_tasklet);
158 mutex_destroy(&accel_dev->u1.vf.vf2pf_lock);
159 }
160
161 static void
adf_bh_handler(void * data,int pending)162 adf_bh_handler(void *data, int pending)
163 {
164 struct adf_etr_bank_data *bank = (void *)data;
165
166 adf_response_handler((uintptr_t)bank);
167
168 return;
169 }
170
171 static int
adf_setup_bh(struct adf_accel_dev * accel_dev)172 adf_setup_bh(struct adf_accel_dev *accel_dev)
173 {
174 int i = 0;
175 struct adf_etr_data *priv_data = accel_dev->transport;
176
177 for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
178 TASK_INIT(&priv_data->banks[i].resp_handler,
179 0,
180 adf_bh_handler,
181 &priv_data->banks[i]);
182 }
183
184 return 0;
185 }
186
187 static void
adf_cleanup_bh(struct adf_accel_dev * accel_dev)188 adf_cleanup_bh(struct adf_accel_dev *accel_dev)
189 {
190 int i = 0;
191 struct adf_etr_data *transport;
192
193 if (!accel_dev || !accel_dev->transport)
194 return;
195
196 transport = accel_dev->transport;
197 for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
198 taskqueue_cancel(taskqueue_qat_bank_handler,
199 &transport->banks[i].resp_handler,
200 NULL);
201 taskqueue_drain(taskqueue_qat_bank_handler,
202 &transport->banks[i].resp_handler);
203 }
204 }
205
206 static void
adf_isr(void * privdata)207 adf_isr(void *privdata)
208 {
209 struct adf_accel_dev *accel_dev = privdata;
210 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
211 struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops;
212 int int_active_bundles = 0;
213 int i = 0;
214
215 /* Check for PF2VF interrupt */
216 if (hw_data->interrupt_active_pf2vf(accel_dev)) {
217 /* Disable PF to VF interrupt */
218 hw_data->disable_pf2vf_interrupt(accel_dev);
219 /* Schedule tasklet to handle interrupt BH */
220 taskqueue_enqueue(taskqueue_qat_vf,
221 &accel_dev->u1.vf.pf2vf_bh_tasklet);
222 }
223
224 if (hw_data->get_int_active_bundles)
225 int_active_bundles = hw_data->get_int_active_bundles(accel_dev);
226
227 for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
228 if (int_active_bundles & BIT(i)) {
229 struct adf_etr_data *etr_data = accel_dev->transport;
230 struct adf_etr_bank_data *bank = &etr_data->banks[i];
231
232 /* Disable Flag and Coalesce Ring Interrupts */
233 csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
234 bank->bank_number,
235 0);
236 /* Schedule tasklet to handle interrupt BH */
237 taskqueue_enqueue(taskqueue_qat_bank_handler,
238 &bank->resp_handler);
239 }
240 }
241 }
242
243 static int
adf_request_msi_irq(struct adf_accel_dev * accel_dev)244 adf_request_msi_irq(struct adf_accel_dev *accel_dev)
245 {
246 device_t pdev = accel_to_pci_dev(accel_dev);
247 int ret;
248 int rid = 1;
249 int cpu;
250
251 accel_dev->u1.vf.irq =
252 bus_alloc_resource_any(pdev, SYS_RES_IRQ, &rid, RF_ACTIVE);
253 if (accel_dev->u1.vf.irq == NULL) {
254 device_printf(GET_DEV(accel_dev), "failed to allocate IRQ\n");
255 return ENXIO;
256 }
257 ret = bus_setup_intr(pdev,
258 accel_dev->u1.vf.irq,
259 INTR_TYPE_MISC | INTR_MPSAFE,
260 NULL,
261 adf_isr,
262 accel_dev,
263 &accel_dev->u1.vf.cookie);
264 if (ret) {
265 device_printf(GET_DEV(accel_dev), "failed to enable irq\n");
266 goto errout;
267 }
268
269 cpu = accel_dev->accel_id % num_online_cpus();
270 ret = bus_bind_intr(pdev, accel_dev->u1.vf.irq, cpu);
271 if (ret) {
272 device_printf(GET_DEV(accel_dev),
273 "failed to bind IRQ handler to cpu core\n");
274 goto errout;
275 }
276 accel_dev->u1.vf.irq_enabled = true;
277
278 return ret;
279 errout:
280 bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq);
281
282 return ret;
283 }
284
285 /**
286 * adf_vf_isr_resource_free() - Free IRQ for acceleration device
287 * @accel_dev: Pointer to acceleration device.
288 *
289 * Function frees interrupts for acceleration device virtual function.
290 */
291 void
adf_vf_isr_resource_free(struct adf_accel_dev * accel_dev)292 adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
293 {
294 device_t pdev = accel_to_pci_dev(accel_dev);
295
296 if (accel_dev->u1.vf.irq_enabled) {
297 bus_teardown_intr(pdev,
298 accel_dev->u1.vf.irq,
299 accel_dev->u1.vf.cookie);
300 bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq);
301 }
302 adf_cleanup_bh(accel_dev);
303 adf_cleanup_pf2vf_bh(accel_dev);
304 adf_disable_msi(accel_dev);
305 }
306
307 /**
308 * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
309 * @accel_dev: Pointer to acceleration device.
310 *
311 * Function allocates interrupts for acceleration device virtual function.
312 *
313 * Return: 0 on success, error code otherwise.
314 */
315 int
adf_vf_isr_resource_alloc(struct adf_accel_dev * accel_dev)316 adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
317 {
318 if (adf_enable_msi(accel_dev))
319 goto err_out;
320
321 if (adf_setup_pf2vf_bh(accel_dev))
322 goto err_disable_msi;
323
324 if (adf_setup_bh(accel_dev))
325 goto err_out;
326
327 if (adf_request_msi_irq(accel_dev))
328 goto err_disable_msi;
329
330 return 0;
331
332 err_disable_msi:
333 adf_disable_msi(accel_dev);
334
335 err_out:
336 return -EFAULT;
337 }
338
339 /**
340 * adf_flush_vf_wq() - Flush workqueue for VF
341 * @accel_dev: Pointer to acceleration device.
342 *
343 * Function disables the PF/VF interrupts on the VF so that no new messages
344 * are received and flushes the workqueue 'adf_vf_stop_wq'.
345 *
346 * Return: void.
347 */
348 void
adf_flush_vf_wq(struct adf_accel_dev * accel_dev)349 adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
350 {
351 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
352
353 hw_data->disable_pf2vf_interrupt(accel_dev);
354
355 if (adf_vf_stop_wq)
356 flush_workqueue(adf_vf_stop_wq);
357 }
358
359 /**
360 * adf_init_vf_wq() - Init workqueue for VF
361 *
362 * Function init workqueue 'adf_vf_stop_wq' for VF.
363 *
364 * Return: 0 on success, error code otherwise.
365 */
366 int
adf_init_vf_wq(void)367 adf_init_vf_wq(void)
368 {
369 int ret = 0;
370
371 mutex_lock(&vf_stop_wq_lock);
372 if (!adf_vf_stop_wq)
373 adf_vf_stop_wq =
374 alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
375
376 if (!adf_vf_stop_wq)
377 ret = ENOMEM;
378
379 mutex_unlock(&vf_stop_wq_lock);
380 return ret;
381 }
382
383 void
adf_exit_vf_wq(void)384 adf_exit_vf_wq(void)
385 {
386 if (adf_vf_stop_wq)
387 destroy_workqueue(adf_vf_stop_wq);
388
389 adf_vf_stop_wq = NULL;
390 }
391