xref: /linux/drivers/infiniband/hw/hfi1/msix.c (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright(c) 2018 - 2020 Intel Corporation.
4  *
5  * This file is provided under a dual BSD/GPLv2 license.  When using or
6  * redistributing this file, you may do so under either license.
7  *
8  * GPL LICENSE SUMMARY
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Redistribution and use in source and binary forms, with or without
22  * modification, are permitted provided that the following conditions
23  * are met:
24  *
25  *  - Redistributions of source code must retain the above copyright
26  *    notice, this list of conditions and the following disclaimer.
27  *  - Redistributions in binary form must reproduce the above copyright
28  *    notice, this list of conditions and the following disclaimer in
29  *    the documentation and/or other materials provided with the
30  *    distribution.
31  *  - Neither the name of Intel Corporation nor the names of its
32  *    contributors may be used to endorse or promote products derived
33  *    from this software without specific prior written permission.
34  *
35  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  *
47  */
48 
49 #include "hfi.h"
50 #include "affinity.h"
51 #include "sdma.h"
52 #include "netdev.h"
53 
54 /**
55  * msix_initialize() - Calculate, request and configure MSIx IRQs
56  * @dd: valid hfi1 devdata
57  *
58  */
59 int msix_initialize(struct hfi1_devdata *dd)
60 {
61 	u32 total;
62 	int ret;
63 	struct hfi1_msix_entry *entries;
64 
65 	/*
66 	 * MSIx interrupt count:
67 	 *	one for the general, "slow path" interrupt
68 	 *	one per used SDMA engine
69 	 *	one per kernel receive context
70 	 *	one for each VNIC context
71 	 *      ...any new IRQs should be added here.
72 	 */
73 	total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts;
74 
75 	if (total >= CCE_NUM_MSIX_VECTORS)
76 		return -EINVAL;
77 
78 	ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX);
79 	if (ret < 0) {
80 		dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret);
81 		return ret;
82 	}
83 
84 	entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries),
85 			  GFP_KERNEL);
86 	if (!entries) {
87 		pci_free_irq_vectors(dd->pcidev);
88 		return -ENOMEM;
89 	}
90 
91 	dd->msix_info.msix_entries = entries;
92 	spin_lock_init(&dd->msix_info.msix_lock);
93 	bitmap_zero(dd->msix_info.in_use_msix, total);
94 	dd->msix_info.max_requested = total;
95 	dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
96 
97 	return 0;
98 }
99 
100 /**
101  * msix_request_irq() - Allocate a free MSIx IRQ
102  * @dd: valid devdata
103  * @arg: context information for the IRQ
104  * @handler: IRQ handler
105  * @thread: IRQ thread handler (could be NULL)
106  * @type: affinty IRQ type
107  * @name: IRQ name
108  *
109  * Allocated an MSIx vector if available, and then create the appropriate
110  * meta data needed to keep track of the pci IRQ request.
111  *
112  * Return:
113  *   < 0   Error
114  *   >= 0  MSIx vector
115  *
116  */
117 static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
118 			    irq_handler_t handler, irq_handler_t thread,
119 			    enum irq_type type, const char *name)
120 {
121 	unsigned long nr;
122 	int irq;
123 	int ret;
124 	struct hfi1_msix_entry *me;
125 
126 	/* Allocate an MSIx vector */
127 	spin_lock(&dd->msix_info.msix_lock);
128 	nr = find_first_zero_bit(dd->msix_info.in_use_msix,
129 				 dd->msix_info.max_requested);
130 	if (nr < dd->msix_info.max_requested)
131 		__set_bit(nr, dd->msix_info.in_use_msix);
132 	spin_unlock(&dd->msix_info.msix_lock);
133 
134 	if (nr == dd->msix_info.max_requested)
135 		return -ENOSPC;
136 
137 	if (type < IRQ_SDMA || type >= IRQ_OTHER)
138 		return -EINVAL;
139 
140 	irq = pci_irq_vector(dd->pcidev, nr);
141 	ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
142 	if (ret) {
143 		dd_dev_err(dd,
144 			   "%s: request for IRQ %d failed, MSIx %lx, err %d\n",
145 			   name, irq, nr, ret);
146 		spin_lock(&dd->msix_info.msix_lock);
147 		__clear_bit(nr, dd->msix_info.in_use_msix);
148 		spin_unlock(&dd->msix_info.msix_lock);
149 		return ret;
150 	}
151 
152 	/*
153 	 * assign arg after pci_request_irq call, so it will be
154 	 * cleaned up
155 	 */
156 	me = &dd->msix_info.msix_entries[nr];
157 	me->irq = irq;
158 	me->arg = arg;
159 	me->type = type;
160 
161 	/* This is a request, so a failure is not fatal */
162 	ret = hfi1_get_irq_affinity(dd, me);
163 	if (ret)
164 		dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret);
165 
166 	return nr;
167 }
168 
169 static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd,
170 				       irq_handler_t handler,
171 				       irq_handler_t thread,
172 				       const char *name)
173 {
174 	int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
175 				  rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT,
176 				  name);
177 	if (nr < 0)
178 		return nr;
179 
180 	/*
181 	 * Set the interrupt register and mask for this
182 	 * context's interrupt.
183 	 */
184 	rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64;
185 	rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64);
186 	rcd->msix_intr = nr;
187 	remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
188 
189 	return 0;
190 }
191 
192 /**
193  * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
194  * @rcd: valid rcd context
195  *
196  */
197 int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
198 {
199 	char name[MAX_NAME_SIZE];
200 
201 	snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
202 		 rcd->dd->unit, rcd->ctxt);
203 
204 	return msix_request_rcd_irq_common(rcd, receive_context_interrupt,
205 					   receive_context_thread, name);
206 }
207 
208 /**
209  * msix_netdev_request_rcd_irq  - Helper function for RCVAVAIL IRQs
210  * for netdev context
211  * @rcd: valid netdev contexti
212  */
213 int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd)
214 {
215 	char name[MAX_NAME_SIZE];
216 
217 	snprintf(name, sizeof(name), DRIVER_NAME "_%d nd kctxt%d",
218 		 rcd->dd->unit, rcd->ctxt);
219 	return msix_request_rcd_irq_common(rcd, receive_context_interrupt_napi,
220 					   NULL, name);
221 }
222 
223 /**
224  * msix_request_sdma_irq  - Helper for getting SDMA IRQ resources
225  * @sde: valid sdma engine
226  *
227  */
228 int msix_request_sdma_irq(struct sdma_engine *sde)
229 {
230 	int nr;
231 	char name[MAX_NAME_SIZE];
232 
233 	snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
234 		 sde->dd->unit, sde->this_idx);
235 	nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
236 			      IRQ_SDMA, name);
237 	if (nr < 0)
238 		return nr;
239 	sde->msix_intr = nr;
240 	remap_sdma_interrupts(sde->dd, sde->this_idx, nr);
241 
242 	return 0;
243 }
244 
245 /**
246  * msix_request_general_irq - Helper for getting general IRQ
247  * resources
248  * @dd: valid device data
249  */
250 int msix_request_general_irq(struct hfi1_devdata *dd)
251 {
252 	int nr;
253 	char name[MAX_NAME_SIZE];
254 
255 	snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
256 	nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL,
257 			      name);
258 	if (nr < 0)
259 		return nr;
260 
261 	/* general interrupt must be MSIx vector 0 */
262 	if (nr) {
263 		msix_free_irq(dd, (u8)nr);
264 		dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr);
265 		return -EINVAL;
266 	}
267 
268 	return 0;
269 }
270 
271 /**
272  * enable_sdma_srcs - Helper to enable SDMA IRQ srcs
273  * @dd: valid devdata structure
274  * @i: index of SDMA engine
275  */
276 static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
277 {
278 	set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
279 	set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
280 		      IS_SDMA_PROGRESS_START + i, true);
281 	set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
282 	set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i,
283 		      true);
284 }
285 
286 /**
287  * msix_request_irqs() - Allocate all MSIx IRQs
288  * @dd: valid devdata structure
289  *
290  * Helper function to request the used MSIx IRQs.
291  *
292  */
293 int msix_request_irqs(struct hfi1_devdata *dd)
294 {
295 	int i;
296 	int ret = msix_request_general_irq(dd);
297 
298 	if (ret)
299 		return ret;
300 
301 	for (i = 0; i < dd->num_sdma; i++) {
302 		struct sdma_engine *sde = &dd->per_sdma[i];
303 
304 		ret = msix_request_sdma_irq(sde);
305 		if (ret)
306 			return ret;
307 		enable_sdma_srcs(sde->dd, i);
308 	}
309 
310 	for (i = 0; i < dd->n_krcv_queues; i++) {
311 		struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
312 
313 		if (rcd)
314 			ret = msix_request_rcd_irq(rcd);
315 		hfi1_rcd_put(rcd);
316 		if (ret)
317 			return ret;
318 	}
319 
320 	return 0;
321 }
322 
323 /**
324  * msix_free_irq() - Free the specified MSIx resources and IRQ
325  * @dd: valid devdata
326  * @msix_intr: MSIx vector to free.
327  *
328  */
329 void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
330 {
331 	struct hfi1_msix_entry *me;
332 
333 	if (msix_intr >= dd->msix_info.max_requested)
334 		return;
335 
336 	me = &dd->msix_info.msix_entries[msix_intr];
337 
338 	if (!me->arg) /* => no irq, no affinity */
339 		return;
340 
341 	hfi1_put_irq_affinity(dd, me);
342 	pci_free_irq(dd->pcidev, msix_intr, me->arg);
343 
344 	me->arg = NULL;
345 
346 	spin_lock(&dd->msix_info.msix_lock);
347 	__clear_bit(msix_intr, dd->msix_info.in_use_msix);
348 	spin_unlock(&dd->msix_info.msix_lock);
349 }
350 
351 /**
352  * msix_clean_up_interrupts  - Free all MSIx IRQ resources
353  * @dd: valid device data data structure
354  *
355  * Free the MSIx and associated PCI resources, if they have been allocated.
356  */
357 void msix_clean_up_interrupts(struct hfi1_devdata *dd)
358 {
359 	int i;
360 	struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
361 
362 	/* remove irqs - must happen before disabling/turning off */
363 	for (i = 0; i < dd->msix_info.max_requested; i++, me++)
364 		msix_free_irq(dd, i);
365 
366 	/* clean structures */
367 	kfree(dd->msix_info.msix_entries);
368 	dd->msix_info.msix_entries = NULL;
369 	dd->msix_info.max_requested = 0;
370 
371 	pci_free_irq_vectors(dd->pcidev);
372 }
373 
374 /**
375  * msix_netdev_synchronize_irq - netdev IRQ synchronize
376  * @dd: valid devdata
377  */
378 void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
379 {
380 	int i;
381 	int ctxt_count = hfi1_netdev_ctxt_count(dd);
382 
383 	for (i = 0; i < ctxt_count; i++) {
384 		struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i);
385 		struct hfi1_msix_entry *me;
386 
387 		me = &dd->msix_info.msix_entries[rcd->msix_intr];
388 
389 		synchronize_irq(me->irq);
390 	}
391 }
392