xref: /freebsd/sys/dev/qat/qat_common/adf_vf_isr.c (revision 9f23cbd6cae82fd77edfad7173432fa8dccd0a95)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include <sys/kernel.h>
6 #include <sys/systm.h>
7 #include <sys/cdefs.h>
8 #include <sys/types.h>
9 #include <sys/interrupt.h>
10 #include <dev/pci/pcivar.h>
11 #include <sys/param.h>
12 #include <linux/workqueue.h>
13 #include "adf_accel_devices.h"
14 #include "adf_common_drv.h"
15 #include "adf_cfg.h"
16 #include "adf_cfg_strings.h"
17 #include "adf_cfg_common.h"
18 #include "adf_transport_access_macros.h"
19 #include "adf_transport_internal.h"
20 #include "adf_pfvf_utils.h"
21 
22 static TASKQUEUE_DEFINE_THREAD(qat_vf);
23 static TASKQUEUE_DEFINE_THREAD(qat_bank_handler);
24 
25 static struct workqueue_struct *adf_vf_stop_wq;
26 static DEFINE_MUTEX(vf_stop_wq_lock);
27 
28 struct adf_vf_stop_data {
29 	struct adf_accel_dev *accel_dev;
30 	struct work_struct work;
31 };
32 
33 static int
34 adf_enable_msi(struct adf_accel_dev *accel_dev)
35 {
36 	int stat;
37 	int count = 1;
38 	stat = pci_alloc_msi(accel_to_pci_dev(accel_dev), &count);
39 	if (stat) {
40 		device_printf(GET_DEV(accel_dev),
41 			      "Failed to enable MSI interrupts\n");
42 		return stat;
43 	}
44 
45 	return stat;
46 }
47 
48 static void
49 adf_disable_msi(struct adf_accel_dev *accel_dev)
50 {
51 	device_t pdev = accel_to_pci_dev(accel_dev);
52 	pci_release_msi(pdev);
53 }
54 
55 static void
56 adf_dev_stop_async(struct work_struct *work)
57 {
58 	struct adf_vf_stop_data *stop_data =
59 	    container_of(work, struct adf_vf_stop_data, work);
60 	struct adf_accel_dev *accel_dev = stop_data->accel_dev;
61 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
62 
63 	adf_dev_restarting_notify(accel_dev);
64 	adf_dev_stop(accel_dev);
65 	adf_dev_shutdown(accel_dev);
66 
67 	/* Re-enable PF2VF interrupts */
68 	hw_data->enable_pf2vf_interrupt(accel_dev);
69 	kfree(stop_data);
70 }
71 
72 int
73 adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
74 {
75 	struct adf_vf_stop_data *stop_data;
76 
77 	clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
78 	stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
79 	if (!stop_data) {
80 		device_printf(GET_DEV(accel_dev),
81 			      "Couldn't schedule stop for vf_%d\n",
82 			      accel_dev->accel_id);
83 		return -ENOMEM;
84 	}
85 	stop_data->accel_dev = accel_dev;
86 	INIT_WORK(&stop_data->work, adf_dev_stop_async);
87 	queue_work(adf_vf_stop_wq, &stop_data->work);
88 
89 	return 0;
90 }
91 
92 int
93 adf_pf2vf_handle_pf_rp_reset(struct adf_accel_dev *accel_dev,
94 			     struct pfvf_message msg)
95 {
96 	accel_dev->u1.vf.rpreset_sts = msg.data;
97 	if (accel_dev->u1.vf.rpreset_sts == RPRESET_SUCCESS)
98 		device_printf(
99 		    GET_DEV(accel_dev),
100 		    "rpreset resp(success) from PF type:0x%x data:0x%x\n",
101 		    msg.type,
102 		    msg.data);
103 	else if (accel_dev->u1.vf.rpreset_sts == RPRESET_NOT_SUPPORTED)
104 		device_printf(
105 		    GET_DEV(accel_dev),
106 		    "rpreset resp(not supported) from PF type:0x%x data:0x%x\n",
107 		    msg.type,
108 		    msg.data);
109 	else if (accel_dev->u1.vf.rpreset_sts == RPRESET_INVAL_BANK)
110 		device_printf(
111 		    GET_DEV(accel_dev),
112 		    "rpreset resp(invalid bank) from PF type:0x%x data:0x%x\n",
113 		    msg.type,
114 		    msg.data);
115 	else
116 		device_printf(
117 		    GET_DEV(accel_dev),
118 		    "rpreset resp(timeout) from PF type:0x%x data:0x%x\nn",
119 		    msg.type,
120 		    msg.data);
121 
122 	complete(&accel_dev->u1.vf.msg_received);
123 
124 	return 0;
125 }
126 
127 static void
128 adf_pf2vf_bh_handler(void *data, int pending)
129 {
130 	struct adf_accel_dev *accel_dev = data;
131 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
132 
133 	if (adf_recv_and_handle_pf2vf_msg(accel_dev))
134 		/* Re-enable PF2VF interrupts */
135 		hw_data->enable_pf2vf_interrupt(accel_dev);
136 
137 	return;
138 }
139 
140 static int
141 adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
142 {
143 	TASK_INIT(&accel_dev->u1.vf.pf2vf_bh_tasklet,
144 		  0,
145 		  adf_pf2vf_bh_handler,
146 		  accel_dev);
147 	mutex_init(&accel_dev->u1.vf.vf2pf_lock);
148 
149 	return 0;
150 }
151 
152 static void
153 adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
154 {
155 	taskqueue_cancel(taskqueue_qat_vf,
156 			 &accel_dev->u1.vf.pf2vf_bh_tasklet,
157 			 NULL);
158 	taskqueue_drain(taskqueue_qat_vf, &accel_dev->u1.vf.pf2vf_bh_tasklet);
159 	mutex_destroy(&accel_dev->u1.vf.vf2pf_lock);
160 }
161 
162 static void
163 adf_bh_handler(void *data, int pending)
164 {
165 	struct adf_etr_bank_data *bank = (void *)data;
166 
167 	adf_response_handler((uintptr_t)bank);
168 
169 	return;
170 }
171 
172 static int
173 adf_setup_bh(struct adf_accel_dev *accel_dev)
174 {
175 	int i = 0;
176 	struct adf_etr_data *priv_data = accel_dev->transport;
177 
178 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
179 		TASK_INIT(&priv_data->banks[i].resp_handler,
180 			  0,
181 			  adf_bh_handler,
182 			  &priv_data->banks[i]);
183 	}
184 
185 	return 0;
186 }
187 
188 static void
189 adf_cleanup_bh(struct adf_accel_dev *accel_dev)
190 {
191 	int i = 0;
192 	struct adf_etr_data *transport;
193 
194 	if (!accel_dev || !accel_dev->transport)
195 		return;
196 
197 	transport = accel_dev->transport;
198 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
199 		taskqueue_cancel(taskqueue_qat_bank_handler,
200 				 &transport->banks[i].resp_handler,
201 				 NULL);
202 		taskqueue_drain(taskqueue_qat_bank_handler,
203 				&transport->banks[i].resp_handler);
204 	}
205 }
206 
207 static void
208 adf_isr(void *privdata)
209 {
210 	struct adf_accel_dev *accel_dev = privdata;
211 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
212 	struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops;
213 	int int_active_bundles = 0;
214 	int i = 0;
215 
216 	/* Check for PF2VF interrupt */
217 	if (hw_data->interrupt_active_pf2vf(accel_dev)) {
218 		/* Disable PF to VF interrupt */
219 		hw_data->disable_pf2vf_interrupt(accel_dev);
220 		/* Schedule tasklet to handle interrupt BH */
221 		taskqueue_enqueue(taskqueue_qat_vf,
222 				  &accel_dev->u1.vf.pf2vf_bh_tasklet);
223 	}
224 
225 	if (hw_data->get_int_active_bundles)
226 		int_active_bundles = hw_data->get_int_active_bundles(accel_dev);
227 
228 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
229 		if (int_active_bundles & BIT(i)) {
230 			struct adf_etr_data *etr_data = accel_dev->transport;
231 			struct adf_etr_bank_data *bank = &etr_data->banks[i];
232 
233 			/* Disable Flag and Coalesce Ring Interrupts */
234 			csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
235 							    bank->bank_number,
236 							    0);
237 			/* Schedule tasklet to handle interrupt BH */
238 			taskqueue_enqueue(taskqueue_qat_bank_handler,
239 					  &bank->resp_handler);
240 		}
241 	}
242 }
243 
244 static int
245 adf_request_msi_irq(struct adf_accel_dev *accel_dev)
246 {
247 	device_t pdev = accel_to_pci_dev(accel_dev);
248 	int ret;
249 	int rid = 1;
250 	int cpu;
251 
252 	accel_dev->u1.vf.irq =
253 	    bus_alloc_resource_any(pdev, SYS_RES_IRQ, &rid, RF_ACTIVE);
254 	if (accel_dev->u1.vf.irq == NULL) {
255 		device_printf(GET_DEV(accel_dev), "failed to allocate IRQ\n");
256 		return ENXIO;
257 	}
258 	ret = bus_setup_intr(pdev,
259 			     accel_dev->u1.vf.irq,
260 			     INTR_TYPE_MISC | INTR_MPSAFE,
261 			     NULL,
262 			     adf_isr,
263 			     accel_dev,
264 			     &accel_dev->u1.vf.cookie);
265 	if (ret) {
266 		device_printf(GET_DEV(accel_dev), "failed to enable irq\n");
267 		goto errout;
268 	}
269 
270 	cpu = accel_dev->accel_id % num_online_cpus();
271 	ret = bus_bind_intr(pdev, accel_dev->u1.vf.irq, cpu);
272 	if (ret) {
273 		device_printf(GET_DEV(accel_dev),
274 			      "failed to bind IRQ handler to cpu core\n");
275 		goto errout;
276 	}
277 	accel_dev->u1.vf.irq_enabled = true;
278 
279 	return ret;
280 errout:
281 	bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq);
282 
283 	return ret;
284 }
285 
286 /**
287  * adf_vf_isr_resource_free() - Free IRQ for acceleration device
288  * @accel_dev:  Pointer to acceleration device.
289  *
290  * Function frees interrupts for acceleration device virtual function.
291  */
292 void
293 adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
294 {
295 	device_t pdev = accel_to_pci_dev(accel_dev);
296 
297 	if (accel_dev->u1.vf.irq_enabled) {
298 		bus_teardown_intr(pdev,
299 				  accel_dev->u1.vf.irq,
300 				  accel_dev->u1.vf.cookie);
301 		bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq);
302 	}
303 	adf_cleanup_bh(accel_dev);
304 	adf_cleanup_pf2vf_bh(accel_dev);
305 	adf_disable_msi(accel_dev);
306 }
307 
308 /**
309  * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
310  * @accel_dev:  Pointer to acceleration device.
311  *
312  * Function allocates interrupts for acceleration device virtual function.
313  *
314  * Return: 0 on success, error code otherwise.
315  */
316 int
317 adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
318 {
319 	if (adf_enable_msi(accel_dev))
320 		goto err_out;
321 
322 	if (adf_setup_pf2vf_bh(accel_dev))
323 		goto err_disable_msi;
324 
325 	if (adf_setup_bh(accel_dev))
326 		goto err_out;
327 
328 	if (adf_request_msi_irq(accel_dev))
329 		goto err_disable_msi;
330 
331 	return 0;
332 
333 err_disable_msi:
334 	adf_disable_msi(accel_dev);
335 
336 err_out:
337 	return -EFAULT;
338 }
339 
340 /**
341  * adf_flush_vf_wq() - Flush workqueue for VF
342  * @accel_dev:  Pointer to acceleration device.
343  *
344  * Function disables the PF/VF interrupts on the VF so that no new messages
345  * are received and flushes the workqueue 'adf_vf_stop_wq'.
346  *
347  * Return: void.
348  */
349 void
350 adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
351 {
352 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
353 
354 	hw_data->disable_pf2vf_interrupt(accel_dev);
355 
356 	if (adf_vf_stop_wq)
357 		flush_workqueue(adf_vf_stop_wq);
358 }
359 
360 /**
361  * adf_init_vf_wq() - Init workqueue for VF
362  *
363  * Function init workqueue 'adf_vf_stop_wq' for VF.
364  *
365  * Return: 0 on success, error code otherwise.
366  */
367 int
368 adf_init_vf_wq(void)
369 {
370 	int ret = 0;
371 
372 	mutex_lock(&vf_stop_wq_lock);
373 	if (!adf_vf_stop_wq)
374 		adf_vf_stop_wq =
375 		    alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
376 
377 	if (!adf_vf_stop_wq)
378 		ret = ENOMEM;
379 
380 	mutex_unlock(&vf_stop_wq_lock);
381 	return ret;
382 }
383 
384 void
385 adf_exit_vf_wq(void)
386 {
387 	if (adf_vf_stop_wq)
388 		destroy_workqueue(adf_vf_stop_wq);
389 
390 	adf_vf_stop_wq = NULL;
391 }
392