1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3 * Copyright(c) 2018 - 2020 Intel Corporation.
4 */
5
6 #include "hfi.h"
7 #include "affinity.h"
8 #include "sdma.h"
9 #include "netdev.h"
10
11 /**
12 * msix_initialize() - Calculate, request and configure MSIx IRQs
13 * @dd: valid hfi1 devdata
14 *
15 */
msix_initialize(struct hfi1_devdata * dd)16 int msix_initialize(struct hfi1_devdata *dd)
17 {
18 u32 total;
19 int ret;
20 struct hfi1_msix_entry *entries;
21
22 /*
23 * MSIx interrupt count:
24 * one for the general, "slow path" interrupt
25 * one per used SDMA engine
26 * one per kernel receive context
27 * one for each VNIC context
28 * ...any new IRQs should be added here.
29 */
30 total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts;
31
32 if (total >= CCE_NUM_MSIX_VECTORS)
33 return -EINVAL;
34
35 ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX);
36 if (ret < 0) {
37 dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret);
38 return ret;
39 }
40
41 entries = kzalloc_objs(*dd->msix_info.msix_entries, total);
42 if (!entries) {
43 pci_free_irq_vectors(dd->pcidev);
44 return -ENOMEM;
45 }
46
47 dd->msix_info.msix_entries = entries;
48 spin_lock_init(&dd->msix_info.msix_lock);
49 bitmap_zero(dd->msix_info.in_use_msix, total);
50 dd->msix_info.max_requested = total;
51 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
52
53 return 0;
54 }
55
56 /**
57 * msix_request_irq() - Allocate a free MSIx IRQ
58 * @dd: valid devdata
59 * @arg: context information for the IRQ
60 * @handler: IRQ handler
61 * @thread: IRQ thread handler (could be NULL)
62 * @type: affinty IRQ type
63 * @name: IRQ name
64 *
65 * Allocated an MSIx vector if available, and then create the appropriate
66 * meta data needed to keep track of the pci IRQ request.
67 *
68 * Return:
69 * < 0 Error
70 * >= 0 MSIx vector
71 *
72 */
msix_request_irq(struct hfi1_devdata * dd,void * arg,irq_handler_t handler,irq_handler_t thread,enum irq_type type,const char * name)73 static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
74 irq_handler_t handler, irq_handler_t thread,
75 enum irq_type type, const char *name)
76 {
77 unsigned long nr;
78 int irq;
79 int ret;
80 struct hfi1_msix_entry *me;
81
82 /* Allocate an MSIx vector */
83 spin_lock(&dd->msix_info.msix_lock);
84 nr = find_first_zero_bit(dd->msix_info.in_use_msix,
85 dd->msix_info.max_requested);
86 if (nr < dd->msix_info.max_requested)
87 __set_bit(nr, dd->msix_info.in_use_msix);
88 spin_unlock(&dd->msix_info.msix_lock);
89
90 if (nr == dd->msix_info.max_requested)
91 return -ENOSPC;
92
93 if (type < IRQ_SDMA || type >= IRQ_OTHER)
94 return -EINVAL;
95
96 irq = pci_irq_vector(dd->pcidev, nr);
97 ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
98 if (ret) {
99 dd_dev_err(dd,
100 "%s: request for IRQ %d failed, MSIx %lx, err %d\n",
101 name, irq, nr, ret);
102 spin_lock(&dd->msix_info.msix_lock);
103 __clear_bit(nr, dd->msix_info.in_use_msix);
104 spin_unlock(&dd->msix_info.msix_lock);
105 return ret;
106 }
107
108 /*
109 * assign arg after pci_request_irq call, so it will be
110 * cleaned up
111 */
112 me = &dd->msix_info.msix_entries[nr];
113 me->irq = irq;
114 me->arg = arg;
115 me->type = type;
116
117 /* This is a request, so a failure is not fatal */
118 ret = hfi1_get_irq_affinity(dd, me);
119 if (ret)
120 dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret);
121
122 return nr;
123 }
124
msix_request_rcd_irq_common(struct hfi1_ctxtdata * rcd,irq_handler_t handler,irq_handler_t thread,const char * name)125 static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd,
126 irq_handler_t handler,
127 irq_handler_t thread,
128 const char *name)
129 {
130 int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
131 rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT,
132 name);
133 if (nr < 0)
134 return nr;
135
136 /*
137 * Set the interrupt register and mask for this
138 * context's interrupt.
139 */
140 rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64;
141 rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64);
142 rcd->msix_intr = nr;
143 remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
144
145 return 0;
146 }
147
148 /**
149 * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
150 * @rcd: valid rcd context
151 *
152 */
msix_request_rcd_irq(struct hfi1_ctxtdata * rcd)153 int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
154 {
155 char name[MAX_NAME_SIZE];
156
157 snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
158 rcd->dd->unit, rcd->ctxt);
159
160 return msix_request_rcd_irq_common(rcd, receive_context_interrupt,
161 receive_context_thread, name);
162 }
163
164 /**
165 * msix_netdev_request_rcd_irq - Helper function for RCVAVAIL IRQs
166 * for netdev context
167 * @rcd: valid netdev contexti
168 */
msix_netdev_request_rcd_irq(struct hfi1_ctxtdata * rcd)169 int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd)
170 {
171 char name[MAX_NAME_SIZE];
172
173 snprintf(name, sizeof(name), DRIVER_NAME "_%d nd kctxt%d",
174 rcd->dd->unit, rcd->ctxt);
175 return msix_request_rcd_irq_common(rcd, receive_context_interrupt_napi,
176 NULL, name);
177 }
178
179 /**
180 * msix_request_sdma_irq - Helper for getting SDMA IRQ resources
181 * @sde: valid sdma engine
182 *
183 */
msix_request_sdma_irq(struct sdma_engine * sde)184 int msix_request_sdma_irq(struct sdma_engine *sde)
185 {
186 int nr;
187 char name[MAX_NAME_SIZE];
188
189 snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
190 sde->dd->unit, sde->this_idx);
191 nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
192 IRQ_SDMA, name);
193 if (nr < 0)
194 return nr;
195 sde->msix_intr = nr;
196 remap_sdma_interrupts(sde->dd, sde->this_idx, nr);
197
198 return 0;
199 }
200
201 /**
202 * msix_request_general_irq - Helper for getting general IRQ
203 * resources
204 * @dd: valid device data
205 */
msix_request_general_irq(struct hfi1_devdata * dd)206 int msix_request_general_irq(struct hfi1_devdata *dd)
207 {
208 int nr;
209 char name[MAX_NAME_SIZE];
210
211 snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
212 nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL,
213 name);
214 if (nr < 0)
215 return nr;
216
217 /* general interrupt must be MSIx vector 0 */
218 if (nr) {
219 msix_free_irq(dd, (u8)nr);
220 dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr);
221 return -EINVAL;
222 }
223
224 return 0;
225 }
226
227 /**
228 * enable_sdma_srcs - Helper to enable SDMA IRQ srcs
229 * @dd: valid devdata structure
230 * @i: index of SDMA engine
231 */
enable_sdma_srcs(struct hfi1_devdata * dd,int i)232 static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
233 {
234 set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
235 set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
236 IS_SDMA_PROGRESS_START + i, true);
237 set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
238 set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i,
239 true);
240 }
241
242 /**
243 * msix_request_irqs() - Allocate all MSIx IRQs
244 * @dd: valid devdata structure
245 *
246 * Helper function to request the used MSIx IRQs.
247 *
248 */
msix_request_irqs(struct hfi1_devdata * dd)249 int msix_request_irqs(struct hfi1_devdata *dd)
250 {
251 int i;
252 int ret = msix_request_general_irq(dd);
253
254 if (ret)
255 return ret;
256
257 for (i = 0; i < dd->num_sdma; i++) {
258 struct sdma_engine *sde = &dd->per_sdma[i];
259
260 ret = msix_request_sdma_irq(sde);
261 if (ret)
262 return ret;
263 enable_sdma_srcs(sde->dd, i);
264 }
265
266 for (i = 0; i < dd->n_krcv_queues; i++) {
267 struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
268
269 if (rcd)
270 ret = msix_request_rcd_irq(rcd);
271 hfi1_rcd_put(rcd);
272 if (ret)
273 return ret;
274 }
275
276 return 0;
277 }
278
279 /**
280 * msix_free_irq() - Free the specified MSIx resources and IRQ
281 * @dd: valid devdata
282 * @msix_intr: MSIx vector to free.
283 *
284 */
msix_free_irq(struct hfi1_devdata * dd,u8 msix_intr)285 void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
286 {
287 struct hfi1_msix_entry *me;
288
289 if (msix_intr >= dd->msix_info.max_requested)
290 return;
291
292 me = &dd->msix_info.msix_entries[msix_intr];
293
294 if (!me->arg) /* => no irq, no affinity */
295 return;
296
297 hfi1_put_irq_affinity(dd, me);
298 pci_free_irq(dd->pcidev, msix_intr, me->arg);
299
300 me->arg = NULL;
301
302 spin_lock(&dd->msix_info.msix_lock);
303 __clear_bit(msix_intr, dd->msix_info.in_use_msix);
304 spin_unlock(&dd->msix_info.msix_lock);
305 }
306
307 /**
308 * msix_clean_up_interrupts - Free all MSIx IRQ resources
309 * @dd: valid device data data structure
310 *
311 * Free the MSIx and associated PCI resources, if they have been allocated.
312 */
msix_clean_up_interrupts(struct hfi1_devdata * dd)313 void msix_clean_up_interrupts(struct hfi1_devdata *dd)
314 {
315 int i;
316 struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
317
318 /* remove irqs - must happen before disabling/turning off */
319 for (i = 0; i < dd->msix_info.max_requested; i++, me++)
320 msix_free_irq(dd, i);
321
322 /* clean structures */
323 kfree(dd->msix_info.msix_entries);
324 dd->msix_info.msix_entries = NULL;
325 dd->msix_info.max_requested = 0;
326
327 pci_free_irq_vectors(dd->pcidev);
328 }
329
330 /**
331 * msix_netdev_synchronize_irq - netdev IRQ synchronize
332 * @dd: valid devdata
333 */
msix_netdev_synchronize_irq(struct hfi1_devdata * dd)334 void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
335 {
336 int i;
337 int ctxt_count = hfi1_netdev_ctxt_count(dd);
338
339 for (i = 0; i < ctxt_count; i++) {
340 struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i);
341 struct hfi1_msix_entry *me;
342
343 me = &dd->msix_info.msix_entries[rcd->msix_intr];
344
345 synchronize_irq(me->irq);
346 }
347 }
348