xref: /freebsd/sys/dev/qat/qat_common/adf_vf_isr.c (revision ded037e65e5239671b1292ec987a2e0894b217b5)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2025 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include <sys/kernel.h>
5 #include <sys/systm.h>
6 
7 #include <sys/types.h>
8 #include <sys/interrupt.h>
9 #include <dev/pci/pcivar.h>
10 #include <sys/param.h>
11 #include <linux/workqueue.h>
12 #include "adf_accel_devices.h"
13 #include "adf_common_drv.h"
14 #include "adf_cfg.h"
15 #include "adf_cfg_strings.h"
16 #include "adf_cfg_common.h"
17 #include "adf_transport_access_macros.h"
18 #include "adf_transport_internal.h"
19 #include "adf_pfvf_utils.h"
20 #include "adf_pfvf_vf_msg.h"
21 
22 static TASKQUEUE_DEFINE_THREAD(qat_vf);
23 static TASKQUEUE_DEFINE_THREAD(qat_bank_handler);
24 
25 static struct workqueue_struct *adf_vf_stop_wq;
26 static DEFINE_MUTEX(vf_stop_wq_lock);
27 
28 struct adf_vf_stop_data {
29 	struct adf_accel_dev *accel_dev;
30 	struct work_struct work;
31 };
32 
33 static int
adf_enable_msi(struct adf_accel_dev * accel_dev)34 adf_enable_msi(struct adf_accel_dev *accel_dev)
35 {
36 	int stat;
37 	int count = 1;
38 	stat = pci_alloc_msi(accel_to_pci_dev(accel_dev), &count);
39 	if (stat) {
40 		device_printf(GET_DEV(accel_dev),
41 			      "Failed to enable MSI interrupts\n");
42 		return stat;
43 	}
44 
45 	return stat;
46 }
47 
48 static void
adf_disable_msi(struct adf_accel_dev * accel_dev)49 adf_disable_msi(struct adf_accel_dev *accel_dev)
50 {
51 	device_t pdev = accel_to_pci_dev(accel_dev);
52 	pci_release_msi(pdev);
53 }
54 
55 static void
adf_dev_stop_async(struct work_struct * work)56 adf_dev_stop_async(struct work_struct *work)
57 {
58 	struct adf_vf_stop_data *stop_data =
59 	    container_of(work, struct adf_vf_stop_data, work);
60 	struct adf_accel_dev *accel_dev = stop_data->accel_dev;
61 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
62 
63 	adf_dev_restarting_notify(accel_dev);
64 	adf_dev_stop(accel_dev);
65 	adf_dev_shutdown(accel_dev);
66 
67 	/* Re-enable PF2VF interrupts */
68 	hw_data->enable_pf2vf_interrupt(accel_dev);
69 	adf_vf2pf_restarting_complete(accel_dev);
70 	kfree(stop_data);
71 }
72 
73 int
adf_pf2vf_handle_pf_restarting(struct adf_accel_dev * accel_dev)74 adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
75 {
76 	struct adf_vf_stop_data *stop_data;
77 
78 	clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
79 	stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
80 	if (!stop_data) {
81 		device_printf(GET_DEV(accel_dev),
82 			      "Couldn't schedule stop for vf_%d\n",
83 			      accel_dev->accel_id);
84 		return -ENOMEM;
85 	}
86 	stop_data->accel_dev = accel_dev;
87 	INIT_WORK(&stop_data->work, adf_dev_stop_async);
88 	queue_work(adf_vf_stop_wq, &stop_data->work);
89 
90 	return 0;
91 }
92 
93 int
adf_pf2vf_handle_pf_rp_reset(struct adf_accel_dev * accel_dev,struct pfvf_message msg)94 adf_pf2vf_handle_pf_rp_reset(struct adf_accel_dev *accel_dev,
95 			     struct pfvf_message msg)
96 {
97 	accel_dev->u1.vf.rpreset_sts = msg.data;
98 	if (accel_dev->u1.vf.rpreset_sts == RPRESET_SUCCESS)
99 		device_printf(
100 		    GET_DEV(accel_dev),
101 		    "rpreset resp(success) from PF type:0x%x data:0x%x\n",
102 		    msg.type,
103 		    msg.data);
104 	else if (accel_dev->u1.vf.rpreset_sts == RPRESET_NOT_SUPPORTED)
105 		device_printf(
106 		    GET_DEV(accel_dev),
107 		    "rpreset resp(not supported) from PF type:0x%x data:0x%x\n",
108 		    msg.type,
109 		    msg.data);
110 	else if (accel_dev->u1.vf.rpreset_sts == RPRESET_INVAL_BANK)
111 		device_printf(
112 		    GET_DEV(accel_dev),
113 		    "rpreset resp(invalid bank) from PF type:0x%x data:0x%x\n",
114 		    msg.type,
115 		    msg.data);
116 	else
117 		device_printf(
118 		    GET_DEV(accel_dev),
119 		    "rpreset resp(timeout) from PF type:0x%x data:0x%x\nn",
120 		    msg.type,
121 		    msg.data);
122 
123 	complete(&accel_dev->u1.vf.msg_received);
124 
125 	return 0;
126 }
127 
128 int
adf_pf2vf_handle_pf_error(struct adf_accel_dev * accel_dev)129 adf_pf2vf_handle_pf_error(struct adf_accel_dev *accel_dev)
130 {
131 	device_printf(GET_DEV(accel_dev), "Fatal error received from PF\n");
132 
133 	if (adf_notify_fatal_error(accel_dev))
134 		device_printf(GET_DEV(accel_dev), "Couldn't notify fatal error\n");
135 
136 	return 0;
137 }
138 
139 static void
adf_pf2vf_bh_handler(void * data,int pending)140 adf_pf2vf_bh_handler(void *data, int pending)
141 {
142 	struct adf_accel_dev *accel_dev = data;
143 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
144 
145 	if (adf_recv_and_handle_pf2vf_msg(accel_dev))
146 		/* Re-enable PF2VF interrupts */
147 		hw_data->enable_pf2vf_interrupt(accel_dev);
148 
149 	return;
150 }
151 
152 static int
adf_setup_pf2vf_bh(struct adf_accel_dev * accel_dev)153 adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
154 {
155 	TASK_INIT(&accel_dev->u1.vf.pf2vf_bh_tasklet,
156 		  0,
157 		  adf_pf2vf_bh_handler,
158 		  accel_dev);
159 	mutex_init(&accel_dev->u1.vf.vf2pf_lock);
160 
161 	return 0;
162 }
163 
164 static void
adf_cleanup_pf2vf_bh(struct adf_accel_dev * accel_dev)165 adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
166 {
167 	taskqueue_cancel(taskqueue_qat_vf,
168 			 &accel_dev->u1.vf.pf2vf_bh_tasklet,
169 			 NULL);
170 	taskqueue_drain(taskqueue_qat_vf, &accel_dev->u1.vf.pf2vf_bh_tasklet);
171 	mutex_destroy(&accel_dev->u1.vf.vf2pf_lock);
172 }
173 
174 static void
adf_bh_handler(void * data,int pending)175 adf_bh_handler(void *data, int pending)
176 {
177 	struct adf_etr_bank_data *bank = (void *)data;
178 
179 	adf_response_handler((uintptr_t)bank);
180 
181 	return;
182 }
183 
184 static int
adf_setup_bh(struct adf_accel_dev * accel_dev)185 adf_setup_bh(struct adf_accel_dev *accel_dev)
186 {
187 	int i = 0;
188 	struct adf_etr_data *priv_data = accel_dev->transport;
189 
190 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
191 		TASK_INIT(&priv_data->banks[i].resp_handler,
192 			  0,
193 			  adf_bh_handler,
194 			  &priv_data->banks[i]);
195 	}
196 
197 	return 0;
198 }
199 
200 static void
adf_cleanup_bh(struct adf_accel_dev * accel_dev)201 adf_cleanup_bh(struct adf_accel_dev *accel_dev)
202 {
203 	int i = 0;
204 	struct adf_etr_data *transport;
205 
206 	if (!accel_dev || !accel_dev->transport)
207 		return;
208 
209 	transport = accel_dev->transport;
210 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
211 		taskqueue_cancel(taskqueue_qat_bank_handler,
212 				 &transport->banks[i].resp_handler,
213 				 NULL);
214 		taskqueue_drain(taskqueue_qat_bank_handler,
215 				&transport->banks[i].resp_handler);
216 	}
217 }
218 
219 static void
adf_isr(void * privdata)220 adf_isr(void *privdata)
221 {
222 	struct adf_accel_dev *accel_dev = privdata;
223 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
224 	struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops;
225 	int int_active_bundles = 0;
226 	int i = 0;
227 
228 	/* Check for PF2VF interrupt */
229 	if (hw_data->interrupt_active_pf2vf(accel_dev)) {
230 		/* Disable PF to VF interrupt */
231 		hw_data->disable_pf2vf_interrupt(accel_dev);
232 		/* Schedule tasklet to handle interrupt BH */
233 		taskqueue_enqueue(taskqueue_qat_vf,
234 				  &accel_dev->u1.vf.pf2vf_bh_tasklet);
235 	}
236 
237 	if (hw_data->get_int_active_bundles)
238 		int_active_bundles = hw_data->get_int_active_bundles(accel_dev);
239 
240 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
241 		if (int_active_bundles & BIT(i)) {
242 			struct adf_etr_data *etr_data = accel_dev->transport;
243 			struct adf_etr_bank_data *bank = &etr_data->banks[i];
244 
245 			/* Disable Flag and Coalesce Ring Interrupts */
246 			csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
247 							    bank->bank_number,
248 							    0);
249 			/* Schedule tasklet to handle interrupt BH */
250 			taskqueue_enqueue(taskqueue_qat_bank_handler,
251 					  &bank->resp_handler);
252 		}
253 	}
254 }
255 
256 static int
adf_request_msi_irq(struct adf_accel_dev * accel_dev)257 adf_request_msi_irq(struct adf_accel_dev *accel_dev)
258 {
259 	device_t pdev = accel_to_pci_dev(accel_dev);
260 	int ret;
261 	int rid = 1;
262 	int cpu;
263 
264 	accel_dev->u1.vf.irq =
265 	    bus_alloc_resource_any(pdev, SYS_RES_IRQ, &rid, RF_ACTIVE);
266 	if (accel_dev->u1.vf.irq == NULL) {
267 		device_printf(GET_DEV(accel_dev), "failed to allocate IRQ\n");
268 		return ENXIO;
269 	}
270 	ret = bus_setup_intr(pdev,
271 			     accel_dev->u1.vf.irq,
272 			     INTR_TYPE_MISC | INTR_MPSAFE,
273 			     NULL,
274 			     adf_isr,
275 			     accel_dev,
276 			     &accel_dev->u1.vf.cookie);
277 	if (ret) {
278 		device_printf(GET_DEV(accel_dev), "failed to enable irq\n");
279 		goto errout;
280 	}
281 
282 	cpu = accel_dev->accel_id % num_online_cpus();
283 	ret = bus_bind_intr(pdev, accel_dev->u1.vf.irq, cpu);
284 	if (ret) {
285 		device_printf(GET_DEV(accel_dev),
286 			      "failed to bind IRQ handler to cpu core\n");
287 		goto errout;
288 	}
289 	accel_dev->u1.vf.irq_enabled = true;
290 
291 	return ret;
292 errout:
293 	bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq);
294 
295 	return ret;
296 }
297 
298 /**
299  * adf_vf_isr_resource_free() - Free IRQ for acceleration device
300  * @accel_dev:  Pointer to acceleration device.
301  *
302  * Function frees interrupts for acceleration device virtual function.
303  */
304 void
adf_vf_isr_resource_free(struct adf_accel_dev * accel_dev)305 adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
306 {
307 	device_t pdev = accel_to_pci_dev(accel_dev);
308 
309 	if (accel_dev->u1.vf.irq_enabled) {
310 		bus_teardown_intr(pdev,
311 				  accel_dev->u1.vf.irq,
312 				  accel_dev->u1.vf.cookie);
313 		bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq);
314 	}
315 	adf_cleanup_bh(accel_dev);
316 	adf_cleanup_pf2vf_bh(accel_dev);
317 	adf_disable_msi(accel_dev);
318 }
319 
320 /**
321  * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
322  * @accel_dev:  Pointer to acceleration device.
323  *
324  * Function allocates interrupts for acceleration device virtual function.
325  *
326  * Return: 0 on success, error code otherwise.
327  */
328 int
adf_vf_isr_resource_alloc(struct adf_accel_dev * accel_dev)329 adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
330 {
331 	if (adf_enable_msi(accel_dev))
332 		goto err_out;
333 
334 	if (adf_setup_pf2vf_bh(accel_dev))
335 		goto err_disable_msi;
336 
337 	if (adf_setup_bh(accel_dev))
338 		goto err_out;
339 
340 	if (adf_request_msi_irq(accel_dev))
341 		goto err_disable_msi;
342 
343 	return 0;
344 
345 err_disable_msi:
346 	adf_disable_msi(accel_dev);
347 
348 err_out:
349 	return -EFAULT;
350 }
351 
352 /**
353  * adf_flush_vf_wq() - Flush workqueue for VF
354  * @accel_dev:  Pointer to acceleration device.
355  *
356  * Function disables the PF/VF interrupts on the VF so that no new messages
357  * are received and flushes the workqueue 'adf_vf_stop_wq'.
358  *
359  * Return: void.
360  */
361 void
adf_flush_vf_wq(struct adf_accel_dev * accel_dev)362 adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
363 {
364 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
365 
366 	hw_data->disable_pf2vf_interrupt(accel_dev);
367 
368 	if (adf_vf_stop_wq)
369 		flush_workqueue(adf_vf_stop_wq);
370 }
371 
372 /**
373  * adf_init_vf_wq() - Init workqueue for VF
374  *
375  * Function init workqueue 'adf_vf_stop_wq' for VF.
376  *
377  * Return: 0 on success, error code otherwise.
378  */
379 int
adf_init_vf_wq(void)380 adf_init_vf_wq(void)
381 {
382 	int ret = 0;
383 
384 	mutex_lock(&vf_stop_wq_lock);
385 	if (!adf_vf_stop_wq)
386 		adf_vf_stop_wq =
387 		    alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
388 
389 	if (!adf_vf_stop_wq)
390 		ret = ENOMEM;
391 
392 	mutex_unlock(&vf_stop_wq_lock);
393 	return ret;
394 }
395 
396 void
adf_exit_vf_wq(void)397 adf_exit_vf_wq(void)
398 {
399 	if (adf_vf_stop_wq)
400 		destroy_workqueue(adf_vf_stop_wq);
401 
402 	adf_vf_stop_wq = NULL;
403 }
404