xref: /linux/drivers/crypto/intel/qat/qat_common/adf_isr.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/kernel.h>
4 #include <linux/init.h>
5 #include <linux/types.h>
6 #include <linux/pci.h>
7 #include <linux/slab.h>
8 #include <linux/errno.h>
9 #include <linux/interrupt.h>
10 #include "adf_accel_devices.h"
11 #include "adf_common_drv.h"
12 #include "adf_cfg.h"
13 #include "adf_cfg_strings.h"
14 #include "adf_cfg_common.h"
15 #include "adf_transport_access_macros.h"
16 #include "adf_transport_internal.h"
17 
18 #define ADF_MAX_NUM_VFS	32
19 static struct workqueue_struct *adf_misc_wq;
20 
21 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
22 {
23 	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
24 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
25 	u32 msix_num_entries = hw_data->num_banks + 1;
26 	int ret;
27 
28 	if (hw_data->set_msix_rttable)
29 		hw_data->set_msix_rttable(accel_dev);
30 
31 	ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries,
32 				    msix_num_entries, PCI_IRQ_MSIX);
33 	if (unlikely(ret < 0)) {
34 		dev_err(&GET_DEV(accel_dev),
35 			"Failed to allocate %d MSI-X vectors\n",
36 			msix_num_entries);
37 		return ret;
38 	}
39 	return 0;
40 }
41 
42 static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
43 {
44 	pci_free_irq_vectors(pci_dev_info->pci_dev);
45 }
46 
47 static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
48 {
49 	struct adf_etr_bank_data *bank = bank_ptr;
50 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
51 
52 	csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
53 					    0);
54 	tasklet_hi_schedule(&bank->resp_handler);
55 	return IRQ_HANDLED;
56 }
57 
58 #ifdef CONFIG_PCI_IOV
59 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
60 {
61 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
62 	unsigned long flags;
63 
64 	spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
65 	GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
66 	spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
67 }
68 
69 void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
70 {
71 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
72 	unsigned long flags;
73 
74 	spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
75 	GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr);
76 	spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
77 }
78 
79 static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
80 {
81 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
82 	u32 pending;
83 
84 	spin_lock(&accel_dev->pf.vf2pf_ints_lock);
85 	pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr);
86 	spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
87 
88 	return pending;
89 }
90 
91 static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
92 {
93 	bool irq_handled = false;
94 	unsigned long vf_mask;
95 
96 	/* Get the interrupt sources triggered by VFs, except for those already disabled */
97 	vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev);
98 	if (vf_mask) {
99 		struct adf_accel_vf_info *vf_info;
100 		int i;
101 
102 		/*
103 		 * Handle VF2PF interrupt unless the VF is malicious and
104 		 * is attempting to flood the host OS with VF2PF interrupts.
105 		 */
106 		for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
107 			vf_info = accel_dev->pf.vf_info + i;
108 
109 			if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
110 				dev_info(&GET_DEV(accel_dev),
111 					 "Too many ints from VF%d\n",
112 					  vf_info->vf_nr);
113 				continue;
114 			}
115 
116 			adf_schedule_vf2pf_handler(vf_info);
117 			irq_handled = true;
118 		}
119 	}
120 	return irq_handled;
121 }
122 #endif /* CONFIG_PCI_IOV */
123 
124 static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
125 {
126 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
127 
128 	if (hw_data->handle_pm_interrupt &&
129 	    hw_data->handle_pm_interrupt(accel_dev))
130 		return true;
131 
132 	return false;
133 }
134 
135 static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
136 {
137 	struct adf_accel_dev *accel_dev = dev_ptr;
138 
139 #ifdef CONFIG_PCI_IOV
140 	/* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
141 	if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev))
142 		return IRQ_HANDLED;
143 #endif /* CONFIG_PCI_IOV */
144 
145 	if (adf_handle_pm_int(accel_dev))
146 		return IRQ_HANDLED;
147 
148 	dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
149 		accel_dev->accel_id);
150 
151 	return IRQ_NONE;
152 }
153 
154 static void adf_free_irqs(struct adf_accel_dev *accel_dev)
155 {
156 	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
157 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
158 	struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
159 	struct adf_etr_data *etr_data = accel_dev->transport;
160 	int clust_irq = hw_data->num_banks;
161 	int irq, i = 0;
162 
163 	if (pci_dev_info->msix_entries.num_entries > 1) {
164 		for (i = 0; i < hw_data->num_banks; i++) {
165 			if (irqs[i].enabled) {
166 				irq = pci_irq_vector(pci_dev_info->pci_dev, i);
167 				irq_set_affinity_hint(irq, NULL);
168 				free_irq(irq, &etr_data->banks[i]);
169 			}
170 		}
171 	}
172 
173 	if (irqs[i].enabled) {
174 		irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
175 		free_irq(irq, accel_dev);
176 	}
177 }
178 
179 static int adf_request_irqs(struct adf_accel_dev *accel_dev)
180 {
181 	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
182 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
183 	struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
184 	struct adf_etr_data *etr_data = accel_dev->transport;
185 	int clust_irq = hw_data->num_banks;
186 	int ret, irq, i = 0;
187 	char *name;
188 
189 	/* Request msix irq for all banks unless SR-IOV enabled */
190 	if (!accel_dev->pf.vf_info) {
191 		for (i = 0; i < hw_data->num_banks; i++) {
192 			struct adf_etr_bank_data *bank = &etr_data->banks[i];
193 			unsigned int cpu, cpus = num_online_cpus();
194 
195 			name = irqs[i].name;
196 			snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
197 				 "qat%d-bundle%d", accel_dev->accel_id, i);
198 			irq = pci_irq_vector(pci_dev_info->pci_dev, i);
199 			if (unlikely(irq < 0)) {
200 				dev_err(&GET_DEV(accel_dev),
201 					"Failed to get IRQ number of device vector %d - %s\n",
202 					i, name);
203 				ret = irq;
204 				goto err;
205 			}
206 			ret = request_irq(irq, adf_msix_isr_bundle, 0,
207 					  &name[0], bank);
208 			if (ret) {
209 				dev_err(&GET_DEV(accel_dev),
210 					"Failed to allocate IRQ %d for %s\n",
211 					irq, name);
212 				goto err;
213 			}
214 
215 			cpu = ((accel_dev->accel_id * hw_data->num_banks) +
216 			       i) % cpus;
217 			irq_set_affinity_hint(irq, get_cpu_mask(cpu));
218 			irqs[i].enabled = true;
219 		}
220 	}
221 
222 	/* Request msix irq for AE */
223 	name = irqs[i].name;
224 	snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
225 		 "qat%d-ae-cluster", accel_dev->accel_id);
226 	irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
227 	if (unlikely(irq < 0)) {
228 		dev_err(&GET_DEV(accel_dev),
229 			"Failed to get IRQ number of device vector %d - %s\n",
230 			i, name);
231 		ret = irq;
232 		goto err;
233 	}
234 	ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev);
235 	if (ret) {
236 		dev_err(&GET_DEV(accel_dev),
237 			"Failed to allocate IRQ %d for %s\n", irq, name);
238 		goto err;
239 	}
240 	irqs[i].enabled = true;
241 	return ret;
242 err:
243 	adf_free_irqs(accel_dev);
244 	return ret;
245 }
246 
247 static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
248 {
249 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
250 	u32 msix_num_entries = 1;
251 	struct adf_irq *irqs;
252 
253 	/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
254 	if (!accel_dev->pf.vf_info)
255 		msix_num_entries += hw_data->num_banks;
256 
257 	irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
258 			    GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
259 	if (!irqs)
260 		return -ENOMEM;
261 
262 	accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
263 	accel_dev->accel_pci_dev.msix_entries.irqs = irqs;
264 	return 0;
265 }
266 
267 static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev)
268 {
269 	kfree(accel_dev->accel_pci_dev.msix_entries.irqs);
270 	accel_dev->accel_pci_dev.msix_entries.irqs = NULL;
271 }
272 
273 static int adf_setup_bh(struct adf_accel_dev *accel_dev)
274 {
275 	struct adf_etr_data *priv_data = accel_dev->transport;
276 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
277 	int i;
278 
279 	for (i = 0; i < hw_data->num_banks; i++)
280 		tasklet_init(&priv_data->banks[i].resp_handler,
281 			     adf_response_handler,
282 			     (unsigned long)&priv_data->banks[i]);
283 	return 0;
284 }
285 
286 static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
287 {
288 	struct adf_etr_data *priv_data = accel_dev->transport;
289 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
290 	int i;
291 
292 	for (i = 0; i < hw_data->num_banks; i++) {
293 		tasklet_disable(&priv_data->banks[i].resp_handler);
294 		tasklet_kill(&priv_data->banks[i].resp_handler);
295 	}
296 }
297 
298 /**
299  * adf_isr_resource_free() - Free IRQ for acceleration device
300  * @accel_dev:  Pointer to acceleration device.
301  *
302  * Function frees interrupts for acceleration device.
303  */
304 void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
305 {
306 	adf_free_irqs(accel_dev);
307 	adf_cleanup_bh(accel_dev);
308 	adf_disable_msix(&accel_dev->accel_pci_dev);
309 	adf_isr_free_msix_vectors_data(accel_dev);
310 }
311 EXPORT_SYMBOL_GPL(adf_isr_resource_free);
312 
313 /**
314  * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
315  * @accel_dev:  Pointer to acceleration device.
316  *
317  * Function allocates interrupts for acceleration device.
318  *
319  * Return: 0 on success, error code otherwise.
320  */
321 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
322 {
323 	int ret;
324 
325 	ret = adf_isr_alloc_msix_vectors_data(accel_dev);
326 	if (ret)
327 		goto err_out;
328 
329 	ret = adf_enable_msix(accel_dev);
330 	if (ret)
331 		goto err_free_msix_table;
332 
333 	ret = adf_setup_bh(accel_dev);
334 	if (ret)
335 		goto err_disable_msix;
336 
337 	ret = adf_request_irqs(accel_dev);
338 	if (ret)
339 		goto err_cleanup_bh;
340 
341 	return 0;
342 
343 err_cleanup_bh:
344 	adf_cleanup_bh(accel_dev);
345 
346 err_disable_msix:
347 	adf_disable_msix(&accel_dev->accel_pci_dev);
348 
349 err_free_msix_table:
350 	adf_isr_free_msix_vectors_data(accel_dev);
351 
352 err_out:
353 	return ret;
354 }
355 EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
356 
357 /**
358  * adf_init_misc_wq() - Init misc workqueue
359  *
360  * Function init workqueue 'qat_misc_wq' for general purpose.
361  *
362  * Return: 0 on success, error code otherwise.
363  */
364 int __init adf_init_misc_wq(void)
365 {
366 	adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
367 
368 	return !adf_misc_wq ? -ENOMEM : 0;
369 }
370 
371 void adf_exit_misc_wq(void)
372 {
373 	if (adf_misc_wq)
374 		destroy_workqueue(adf_misc_wq);
375 
376 	adf_misc_wq = NULL;
377 }
378 
379 bool adf_misc_wq_queue_work(struct work_struct *work)
380 {
381 	return queue_work(adf_misc_wq, work);
382 }
383 
384 bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
385 				    unsigned long delay)
386 {
387 	return queue_delayed_work(adf_misc_wq, work, delay);
388 }
389