1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/pci.h>
5 #include <linux/interrupt.h>
6 #include <linux/notifier.h>
7 #include <linux/mlx5/driver.h>
8 #include <linux/mlx5/vport.h>
9 #include "mlx5_core.h"
10 #include "mlx5_irq.h"
11 #include "pci_irq.h"
12 #include "lib/sf.h"
13 #include "lib/eq.h"
14 #ifdef CONFIG_RFS_ACCEL
15 #include <linux/cpu_rmap.h>
16 #endif
17
18 #define MLX5_SFS_PER_CTRL_IRQ 64
19 #define MLX5_MAX_MSIX_PER_SF 256
20 #define MLX5_IRQ_CTRL_SF_MAX 8
21 /* min num of vectors for SFs to be enabled */
22 #define MLX5_IRQ_VEC_COMP_BASE_SF 2
23 #define MLX5_IRQ_VEC_COMP_BASE 1
24
25 #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
26 #define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
27 #define MLX5_EQ_SHARE_IRQ_MIN_COMP (1)
28 #define MLX5_EQ_SHARE_IRQ_MIN_CTRL (4)
29
30 struct mlx5_irq {
31 struct atomic_notifier_head nh;
32 cpumask_var_t mask;
33 char name[MLX5_MAX_IRQ_FORMATTED_NAME];
34 struct mlx5_irq_pool *pool;
35 int refcount;
36 struct msi_map map;
37 u32 pool_index;
38 };
39
40 struct mlx5_irq_table {
41 struct mlx5_irq_pool *pcif_pool;
42 struct mlx5_irq_pool *sf_ctrl_pool;
43 struct mlx5_irq_pool *sf_comp_pool;
44 };
45
mlx5_core_func_to_vport(const struct mlx5_core_dev * dev,int func,bool ec_vf_func)46 static int mlx5_core_func_to_vport(const struct mlx5_core_dev *dev,
47 int func,
48 bool ec_vf_func)
49 {
50 if (!ec_vf_func)
51 return func;
52 return mlx5_core_ec_vf_vport_base(dev) + func - 1;
53 }
54
55 /**
56 * mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
57 * to be ssigned to each VF.
58 * @dev: PF to work on
59 * @num_vfs: Number of enabled VFs
60 */
mlx5_get_default_msix_vec_count(struct mlx5_core_dev * dev,int num_vfs)61 int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
62 {
63 int num_vf_msix, min_msix, max_msix;
64
65 num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
66 if (!num_vf_msix)
67 return 0;
68
69 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
70 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
71
72 /* Limit maximum number of MSI-X vectors so the default configuration
73 * has some available in the pool. This will allow the user to increase
74 * the number of vectors in a VF without having to first size-down other
75 * VFs.
76 */
77 return max(min(num_vf_msix / num_vfs, max_msix / 2), min_msix);
78 }
79
80 /**
81 * mlx5_set_msix_vec_count - Set dynamically allocated MSI-X on the VF
82 * @dev: PF to work on
83 * @function_id: Internal PCI VF function IDd
84 * @msix_vec_count: Number of MSI-X vectors to set
85 */
mlx5_set_msix_vec_count(struct mlx5_core_dev * dev,int function_id,int msix_vec_count)86 int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
87 int msix_vec_count)
88 {
89 int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
90 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
91 void *hca_cap = NULL, *query_cap = NULL, *cap;
92 int num_vf_msix, min_msix, max_msix;
93 bool ec_vf_function;
94 int vport;
95 int ret;
96
97 num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
98 if (!num_vf_msix)
99 return 0;
100
101 if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev))
102 return -EOPNOTSUPP;
103
104 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
105 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
106
107 if (msix_vec_count < min_msix)
108 return -EINVAL;
109
110 if (msix_vec_count > max_msix)
111 return -EOVERFLOW;
112
113 query_cap = kvzalloc(query_sz, GFP_KERNEL);
114 hca_cap = kvzalloc(set_sz, GFP_KERNEL);
115 if (!hca_cap || !query_cap) {
116 ret = -ENOMEM;
117 goto out;
118 }
119
120 ec_vf_function = mlx5_core_ec_sriov_enabled(dev);
121 vport = mlx5_core_func_to_vport(dev, function_id, ec_vf_function);
122 ret = mlx5_vport_get_other_func_general_cap(dev, vport, query_cap);
123 if (ret)
124 goto out;
125
126 cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
127 memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
128 MLX5_UN_SZ_BYTES(hca_cap_union));
129 MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
130
131 MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
132 MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
133 MLX5_SET(set_hca_cap_in, hca_cap, ec_vf_function, ec_vf_function);
134 MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id);
135
136 MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
137 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
138 ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
139 out:
140 kvfree(hca_cap);
141 kvfree(query_cap);
142 return ret;
143 }
144
145 /* mlx5_system_free_irq - Free an IRQ
146 * @irq: IRQ to free
147 *
148 * Free the IRQ and other resources such as rmap from the system.
149 * BUT doesn't free or remove reference from mlx5.
150 * This function is very important for the shutdown flow, where we need to
151 * cleanup system resources but keep mlx5 objects alive,
152 * see mlx5_irq_table_free_irqs().
153 */
mlx5_system_free_irq(struct mlx5_irq * irq)154 static void mlx5_system_free_irq(struct mlx5_irq *irq)
155 {
156 struct mlx5_irq_pool *pool = irq->pool;
157 #ifdef CONFIG_RFS_ACCEL
158 struct cpu_rmap *rmap;
159 #endif
160
161 /* free_irq requires that affinity_hint and rmap will be cleared before
162 * calling it. To satisfy this requirement, we call
163 * irq_cpu_rmap_remove() to remove the notifier
164 */
165 irq_update_affinity_hint(irq->map.virq, NULL);
166 #ifdef CONFIG_RFS_ACCEL
167 rmap = mlx5_eq_table_get_rmap(pool->dev);
168 if (rmap)
169 irq_cpu_rmap_remove(rmap, irq->map.virq);
170 #endif
171
172 free_irq(irq->map.virq, &irq->nh);
173 if (irq->map.index && pci_msix_can_alloc_dyn(pool->dev->pdev))
174 pci_msix_free_irq(pool->dev->pdev, irq->map);
175 }
176
irq_release(struct mlx5_irq * irq)177 static void irq_release(struct mlx5_irq *irq)
178 {
179 struct mlx5_irq_pool *pool = irq->pool;
180
181 xa_erase(&pool->irqs, irq->pool_index);
182 mlx5_system_free_irq(irq);
183 free_cpumask_var(irq->mask);
184 kfree(irq);
185 }
186
mlx5_irq_put(struct mlx5_irq * irq)187 int mlx5_irq_put(struct mlx5_irq *irq)
188 {
189 struct mlx5_irq_pool *pool = irq->pool;
190 int ret = 0;
191
192 mutex_lock(&pool->lock);
193 irq->refcount--;
194 if (!irq->refcount) {
195 irq_release(irq);
196 ret = 1;
197 }
198 mutex_unlock(&pool->lock);
199 return ret;
200 }
201
mlx5_irq_read_locked(struct mlx5_irq * irq)202 int mlx5_irq_read_locked(struct mlx5_irq *irq)
203 {
204 lockdep_assert_held(&irq->pool->lock);
205 return irq->refcount;
206 }
207
mlx5_irq_get_locked(struct mlx5_irq * irq)208 int mlx5_irq_get_locked(struct mlx5_irq *irq)
209 {
210 lockdep_assert_held(&irq->pool->lock);
211 if (WARN_ON_ONCE(!irq->refcount))
212 return 0;
213 irq->refcount++;
214 return 1;
215 }
216
irq_get(struct mlx5_irq * irq)217 static int irq_get(struct mlx5_irq *irq)
218 {
219 int err;
220
221 mutex_lock(&irq->pool->lock);
222 err = mlx5_irq_get_locked(irq);
223 mutex_unlock(&irq->pool->lock);
224 return err;
225 }
226
irq_int_handler(int irq,void * nh)227 static irqreturn_t irq_int_handler(int irq, void *nh)
228 {
229 atomic_notifier_call_chain(nh, 0, NULL);
230 return IRQ_HANDLED;
231 }
232
irq_sf_set_name(struct mlx5_irq_pool * pool,char * name,int vecidx)233 static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
234 {
235 snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
236 }
237
irq_set_name(struct mlx5_irq_pool * pool,char * name,int vecidx)238 static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
239 {
240 if (!pool->xa_num_irqs.max) {
241 /* in case we only have a single irq for the device */
242 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx);
243 return;
244 }
245
246 if (!vecidx) {
247 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
248 return;
249 }
250
251 vecidx -= MLX5_IRQ_VEC_COMP_BASE;
252 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
253 }
254
mlx5_irq_alloc(struct mlx5_irq_pool * pool,int i,struct irq_affinity_desc * af_desc,struct cpu_rmap ** rmap)255 struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
256 struct irq_affinity_desc *af_desc,
257 struct cpu_rmap **rmap)
258 {
259 struct mlx5_core_dev *dev = pool->dev;
260 char name[MLX5_MAX_IRQ_NAME];
261 struct mlx5_irq *irq;
262 int err;
263
264 irq = kzalloc(sizeof(*irq), GFP_KERNEL);
265 if (!irq || !zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
266 kfree(irq);
267 return ERR_PTR(-ENOMEM);
268 }
269
270 if (!i || !pci_msix_can_alloc_dyn(dev->pdev)) {
271 /* The vector at index 0 is always statically allocated. If
272 * dynamic irq is not supported all vectors are statically
273 * allocated. In both cases just get the irq number and set
274 * the index.
275 */
276 irq->map.virq = pci_irq_vector(dev->pdev, i);
277 irq->map.index = i;
278 } else {
279 irq->map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, af_desc);
280 if (!irq->map.virq) {
281 err = irq->map.index;
282 goto err_alloc_irq;
283 }
284 }
285
286 if (i && rmap && *rmap) {
287 #ifdef CONFIG_RFS_ACCEL
288 err = irq_cpu_rmap_add(*rmap, irq->map.virq);
289 if (err)
290 goto err_irq_rmap;
291 #endif
292 }
293 if (!mlx5_irq_pool_is_sf_pool(pool))
294 irq_set_name(pool, name, i);
295 else
296 irq_sf_set_name(pool, name, i);
297 ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
298 snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
299 MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
300 err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
301 &irq->nh);
302 if (err) {
303 mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
304 goto err_req_irq;
305 }
306
307 if (af_desc) {
308 cpumask_copy(irq->mask, &af_desc->mask);
309 irq_set_affinity_and_hint(irq->map.virq, irq->mask);
310 }
311 irq->pool = pool;
312 irq->refcount = 1;
313 irq->pool_index = i;
314 err = xa_err(xa_store(&pool->irqs, irq->pool_index, irq, GFP_KERNEL));
315 if (err) {
316 mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
317 irq->pool_index, err);
318 goto err_xa;
319 }
320 return irq;
321 err_xa:
322 if (af_desc)
323 irq_update_affinity_hint(irq->map.virq, NULL);
324 free_irq(irq->map.virq, &irq->nh);
325 err_req_irq:
326 #ifdef CONFIG_RFS_ACCEL
327 if (i && rmap && *rmap) {
328 free_irq_cpu_rmap(*rmap);
329 *rmap = NULL;
330 }
331 err_irq_rmap:
332 #endif
333 if (i && pci_msix_can_alloc_dyn(dev->pdev))
334 pci_msix_free_irq(dev->pdev, irq->map);
335 err_alloc_irq:
336 free_cpumask_var(irq->mask);
337 kfree(irq);
338 return ERR_PTR(err);
339 }
340
mlx5_irq_attach_nb(struct mlx5_irq * irq,struct notifier_block * nb)341 int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
342 {
343 int ret;
344
345 ret = irq_get(irq);
346 if (!ret)
347 /* Something very bad happens here, we are enabling EQ
348 * on non-existing IRQ.
349 */
350 return -ENOENT;
351 ret = atomic_notifier_chain_register(&irq->nh, nb);
352 if (ret)
353 mlx5_irq_put(irq);
354 return ret;
355 }
356
mlx5_irq_detach_nb(struct mlx5_irq * irq,struct notifier_block * nb)357 int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
358 {
359 int err = 0;
360
361 err = atomic_notifier_chain_unregister(&irq->nh, nb);
362 mlx5_irq_put(irq);
363 return err;
364 }
365
mlx5_irq_get_affinity_mask(struct mlx5_irq * irq)366 struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
367 {
368 return irq->mask;
369 }
370
mlx5_irq_get_irq(const struct mlx5_irq * irq)371 int mlx5_irq_get_irq(const struct mlx5_irq *irq)
372 {
373 return irq->map.virq;
374 }
375
mlx5_irq_get_index(struct mlx5_irq * irq)376 int mlx5_irq_get_index(struct mlx5_irq *irq)
377 {
378 return irq->map.index;
379 }
380
mlx5_irq_get_pool(struct mlx5_irq * irq)381 struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq)
382 {
383 return irq->pool;
384 }
385
386 /* irq_pool API */
387
388 /* requesting an irq from a given pool according to given index */
389 static struct mlx5_irq *
irq_pool_request_vector(struct mlx5_irq_pool * pool,int vecidx,struct irq_affinity_desc * af_desc,struct cpu_rmap ** rmap)390 irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
391 struct irq_affinity_desc *af_desc,
392 struct cpu_rmap **rmap)
393 {
394 struct mlx5_irq *irq;
395
396 mutex_lock(&pool->lock);
397 irq = xa_load(&pool->irqs, vecidx);
398 if (irq) {
399 mlx5_irq_get_locked(irq);
400 goto unlock;
401 }
402 irq = mlx5_irq_alloc(pool, vecidx, af_desc, rmap);
403 unlock:
404 mutex_unlock(&pool->lock);
405 return irq;
406 }
407
sf_ctrl_irq_pool_get(struct mlx5_irq_table * irq_table)408 static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_table)
409 {
410 return irq_table->sf_ctrl_pool;
411 }
412
413 static struct mlx5_irq_pool *
sf_comp_irq_pool_get(struct mlx5_irq_table * irq_table)414 sf_comp_irq_pool_get(struct mlx5_irq_table *irq_table)
415 {
416 return irq_table->sf_comp_pool;
417 }
418
419 struct mlx5_irq_pool *
mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev * dev)420 mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev)
421 {
422 struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
423 struct mlx5_irq_pool *pool = NULL;
424
425 if (mlx5_core_is_sf(dev))
426 pool = sf_comp_irq_pool_get(irq_table);
427
428 /* In some configs, there won't be a pool of SFs IRQs. Hence, returning
429 * the PF IRQs pool in case the SF pool doesn't exist.
430 */
431 return pool ? pool : irq_table->pcif_pool;
432 }
433
ctrl_irq_pool_get(struct mlx5_core_dev * dev)434 static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
435 {
436 struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
437 struct mlx5_irq_pool *pool = NULL;
438
439 if (mlx5_core_is_sf(dev))
440 pool = sf_ctrl_irq_pool_get(irq_table);
441
442 /* In some configs, there won't be a pool of SFs IRQs. Hence, returning
443 * the PF IRQs pool in case the SF pool doesn't exist.
444 */
445 return pool ? pool : irq_table->pcif_pool;
446 }
447
_mlx5_irq_release(struct mlx5_irq * irq)448 static void _mlx5_irq_release(struct mlx5_irq *irq)
449 {
450 synchronize_irq(irq->map.virq);
451 mlx5_irq_put(irq);
452 }
453
454 /**
455 * mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
456 * @dev: mlx5 device that releasing the IRQ.
457 * @ctrl_irq: ctrl IRQ to be released.
458 */
mlx5_ctrl_irq_release(struct mlx5_core_dev * dev,struct mlx5_irq * ctrl_irq)459 void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
460 {
461 mlx5_irq_affinity_irq_release(dev, ctrl_irq);
462 }
463
464 /**
465 * mlx5_ctrl_irq_request - request a ctrl IRQ for mlx5 device.
466 * @dev: mlx5 device that requesting the IRQ.
467 *
468 * This function returns a pointer to IRQ, or ERR_PTR in case of error.
469 */
mlx5_ctrl_irq_request(struct mlx5_core_dev * dev)470 struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
471 {
472 struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
473 struct irq_affinity_desc *af_desc;
474 struct mlx5_irq *irq;
475
476 af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
477 if (!af_desc)
478 return ERR_PTR(-ENOMEM);
479
480 cpumask_copy(&af_desc->mask, cpu_online_mask);
481 af_desc->is_managed = false;
482 if (!mlx5_irq_pool_is_sf_pool(pool)) {
483 /* In case we are allocating a control IRQ from a pci device's pool.
484 * This can happen also for a SF if the SFs pool is empty.
485 */
486 if (!pool->xa_num_irqs.max) {
487 cpumask_clear(&af_desc->mask);
488 /* In case we only have a single IRQ for PF/VF */
489 cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc->mask);
490 }
491 /* Allocate the IRQ in index 0. The vector was already allocated */
492 irq = irq_pool_request_vector(pool, 0, af_desc, NULL);
493 } else {
494 irq = mlx5_irq_affinity_request(dev, pool, af_desc);
495 }
496
497 kvfree(af_desc);
498
499 return irq;
500 }
501
502 /**
503 * mlx5_irq_request - request an IRQ for mlx5 PF/VF device.
504 * @dev: mlx5 device that requesting the IRQ.
505 * @vecidx: vector index of the IRQ. This argument is ignore if affinity is
506 * provided.
507 * @af_desc: affinity descriptor for this IRQ.
508 * @rmap: pointer to reverse map pointer for completion interrupts
509 *
510 * This function returns a pointer to IRQ, or ERR_PTR in case of error.
511 */
mlx5_irq_request(struct mlx5_core_dev * dev,u16 vecidx,struct irq_affinity_desc * af_desc,struct cpu_rmap ** rmap)512 struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
513 struct irq_affinity_desc *af_desc,
514 struct cpu_rmap **rmap)
515 {
516 struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
517 struct mlx5_irq_pool *pool;
518 struct mlx5_irq *irq;
519
520 pool = irq_table->pcif_pool;
521 irq = irq_pool_request_vector(pool, vecidx, af_desc, rmap);
522 if (IS_ERR(irq))
523 return irq;
524 mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
525 irq->map.virq, cpumask_pr_args(&af_desc->mask),
526 irq->refcount / MLX5_EQ_REFS_PER_IRQ);
527 return irq;
528 }
529
530 /**
531 * mlx5_irq_release_vector - release one IRQ back to the system.
532 * @irq: the irq to release.
533 */
mlx5_irq_release_vector(struct mlx5_irq * irq)534 void mlx5_irq_release_vector(struct mlx5_irq *irq)
535 {
536 _mlx5_irq_release(irq);
537 }
538
539 /**
540 * mlx5_irq_request_vector - request one IRQ for mlx5 device.
541 * @dev: mlx5 device that is requesting the IRQ.
542 * @cpu: CPU to bind the IRQ to.
543 * @vecidx: vector index to request an IRQ for.
544 * @rmap: pointer to reverse map pointer for completion interrupts
545 *
546 * Each IRQ is bound to at most 1 CPU.
547 * This function is requests one IRQ, for the given @vecidx.
548 *
549 * This function returns a pointer to the irq on success, or an error pointer
550 * in case of an error.
551 */
mlx5_irq_request_vector(struct mlx5_core_dev * dev,u16 cpu,u16 vecidx,struct cpu_rmap ** rmap)552 struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
553 u16 vecidx, struct cpu_rmap **rmap)
554 {
555 struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
556 struct mlx5_irq_pool *pool = table->pcif_pool;
557 int offset = MLX5_IRQ_VEC_COMP_BASE;
558 struct irq_affinity_desc *af_desc;
559 struct mlx5_irq *irq;
560
561 af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
562 if (!af_desc)
563 return ERR_PTR(-ENOMEM);
564
565 if (!pool->xa_num_irqs.max)
566 offset = 0;
567
568 af_desc->is_managed = false;
569 cpumask_clear(&af_desc->mask);
570 cpumask_set_cpu(cpu, &af_desc->mask);
571
572 irq = mlx5_irq_request(dev, vecidx + offset, af_desc, rmap);
573
574 kvfree(af_desc);
575
576 return irq;
577 }
578
579 static struct mlx5_irq_pool *
irq_pool_alloc(struct mlx5_core_dev * dev,int start,int size,char * name,u32 min_threshold,u32 max_threshold)580 irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
581 u32 min_threshold, u32 max_threshold)
582 {
583 struct mlx5_irq_pool *pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
584
585 if (!pool)
586 return ERR_PTR(-ENOMEM);
587 pool->dev = dev;
588 mutex_init(&pool->lock);
589 xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
590 pool->xa_num_irqs.min = start;
591 pool->xa_num_irqs.max = start + size - 1;
592 if (name)
593 snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
594 "%s", name);
595 pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
596 pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
597 mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
598 name ? name : "mlx5_pcif_pool", size, start);
599 return pool;
600 }
601
irq_pool_free(struct mlx5_irq_pool * pool)602 static void irq_pool_free(struct mlx5_irq_pool *pool)
603 {
604 struct mlx5_irq *irq;
605 unsigned long index;
606
607 /* There are cases in which we are destroying the irq_table before
608 * freeing all the IRQs, fast teardown for example. Hence, free the irqs
609 * which might not have been freed.
610 */
611 xa_for_each(&pool->irqs, index, irq)
612 irq_release(irq);
613 xa_destroy(&pool->irqs);
614 mutex_destroy(&pool->lock);
615 kfree(pool->irqs_per_cpu);
616 kvfree(pool);
617 }
618
irq_pools_init(struct mlx5_core_dev * dev,int sf_vec,int pcif_vec,bool dynamic_vec)619 static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec,
620 bool dynamic_vec)
621 {
622 struct mlx5_irq_table *table = dev->priv.irq_table;
623 int sf_vec_available = sf_vec;
624 int num_sf_ctrl;
625 int err;
626
627 /* init pcif_pool */
628 table->pcif_pool = irq_pool_alloc(dev, 0, pcif_vec, NULL,
629 MLX5_EQ_SHARE_IRQ_MIN_COMP,
630 MLX5_EQ_SHARE_IRQ_MAX_COMP);
631 if (IS_ERR(table->pcif_pool))
632 return PTR_ERR(table->pcif_pool);
633 if (!mlx5_sf_max_functions(dev))
634 return 0;
635 if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
636 mlx5_core_dbg(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
637 return 0;
638 }
639
640 /* init sf_ctrl_pool */
641 num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
642 MLX5_SFS_PER_CTRL_IRQ);
643 num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
644 if (!dynamic_vec && (num_sf_ctrl + 1) > sf_vec_available) {
645 mlx5_core_dbg(dev,
646 "Not enough IRQs for SFs control and completion pool, required=%d avail=%d\n",
647 num_sf_ctrl + 1, sf_vec_available);
648 return 0;
649 }
650
651 table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl,
652 "mlx5_sf_ctrl",
653 MLX5_EQ_SHARE_IRQ_MIN_CTRL,
654 MLX5_EQ_SHARE_IRQ_MAX_CTRL);
655 if (IS_ERR(table->sf_ctrl_pool)) {
656 err = PTR_ERR(table->sf_ctrl_pool);
657 goto err_pf;
658 }
659 sf_vec_available -= num_sf_ctrl;
660
661 /* init sf_comp_pool, remaining vectors are for the SF completions */
662 table->sf_comp_pool = irq_pool_alloc(dev, pcif_vec + num_sf_ctrl,
663 sf_vec_available, "mlx5_sf_comp",
664 MLX5_EQ_SHARE_IRQ_MIN_COMP,
665 MLX5_EQ_SHARE_IRQ_MAX_COMP);
666 if (IS_ERR(table->sf_comp_pool)) {
667 err = PTR_ERR(table->sf_comp_pool);
668 goto err_sf_ctrl;
669 }
670
671 table->sf_comp_pool->irqs_per_cpu = kcalloc(nr_cpu_ids, sizeof(u16), GFP_KERNEL);
672 if (!table->sf_comp_pool->irqs_per_cpu) {
673 err = -ENOMEM;
674 goto err_irqs_per_cpu;
675 }
676
677 return 0;
678
679 err_irqs_per_cpu:
680 irq_pool_free(table->sf_comp_pool);
681 err_sf_ctrl:
682 irq_pool_free(table->sf_ctrl_pool);
683 err_pf:
684 irq_pool_free(table->pcif_pool);
685 return err;
686 }
687
irq_pools_destroy(struct mlx5_irq_table * table)688 static void irq_pools_destroy(struct mlx5_irq_table *table)
689 {
690 if (table->sf_ctrl_pool) {
691 irq_pool_free(table->sf_comp_pool);
692 irq_pool_free(table->sf_ctrl_pool);
693 }
694 irq_pool_free(table->pcif_pool);
695 }
696
mlx5_irq_pool_free_irqs(struct mlx5_irq_pool * pool)697 static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
698 {
699 struct mlx5_irq *irq;
700 unsigned long index;
701
702 xa_for_each(&pool->irqs, index, irq)
703 mlx5_system_free_irq(irq);
704
705 }
706
mlx5_irq_pools_free_irqs(struct mlx5_irq_table * table)707 static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
708 {
709 if (table->sf_ctrl_pool) {
710 mlx5_irq_pool_free_irqs(table->sf_comp_pool);
711 mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
712 }
713 mlx5_irq_pool_free_irqs(table->pcif_pool);
714 }
715
716 /* irq_table API */
717
mlx5_irq_table_init(struct mlx5_core_dev * dev)718 int mlx5_irq_table_init(struct mlx5_core_dev *dev)
719 {
720 struct mlx5_irq_table *irq_table;
721
722 if (mlx5_core_is_sf(dev))
723 return 0;
724
725 irq_table = kvzalloc_node(sizeof(*irq_table), GFP_KERNEL,
726 dev->priv.numa_node);
727 if (!irq_table)
728 return -ENOMEM;
729
730 dev->priv.irq_table = irq_table;
731 return 0;
732 }
733
mlx5_irq_table_cleanup(struct mlx5_core_dev * dev)734 void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
735 {
736 if (mlx5_core_is_sf(dev))
737 return;
738
739 kvfree(dev->priv.irq_table);
740 }
741
mlx5_irq_table_get_num_comp(struct mlx5_irq_table * table)742 int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
743 {
744 if (!table->pcif_pool->xa_num_irqs.max)
745 return 1;
746 return table->pcif_pool->xa_num_irqs.max - table->pcif_pool->xa_num_irqs.min;
747 }
748
mlx5_irq_table_create(struct mlx5_core_dev * dev)749 int mlx5_irq_table_create(struct mlx5_core_dev *dev)
750 {
751 int num_eqs = mlx5_max_eq_cap_get(dev);
752 bool dynamic_vec;
753 int total_vec;
754 int pcif_vec;
755 int req_vec;
756 int err;
757 int n;
758
759 if (mlx5_core_is_sf(dev))
760 return 0;
761
762 /* PCI PF vectors usage is limited by online cpus, device EQs and
763 * PCI MSI-X capability.
764 */
765 pcif_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
766 pcif_vec = min_t(int, pcif_vec, num_eqs);
767 pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev));
768
769 total_vec = pcif_vec;
770 if (mlx5_sf_max_functions(dev))
771 total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev);
772 total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev));
773
774 req_vec = pci_msix_can_alloc_dyn(dev->pdev) ? 1 : total_vec;
775 n = pci_alloc_irq_vectors(dev->pdev, 1, req_vec, PCI_IRQ_MSIX);
776 if (n < 0)
777 return n;
778
779 /* Further limit vectors of the pools based on platform for non dynamic case */
780 dynamic_vec = pci_msix_can_alloc_dyn(dev->pdev);
781 if (!dynamic_vec) {
782 pcif_vec = min_t(int, n, pcif_vec);
783 total_vec = min_t(int, n, total_vec);
784 }
785
786 err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec, dynamic_vec);
787 if (err)
788 pci_free_irq_vectors(dev->pdev);
789
790 return err;
791 }
792
mlx5_irq_table_destroy(struct mlx5_core_dev * dev)793 void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
794 {
795 struct mlx5_irq_table *table = dev->priv.irq_table;
796
797 if (mlx5_core_is_sf(dev))
798 return;
799
800 /* There are cases where IRQs still will be in used when we reaching
801 * to here. Hence, making sure all the irqs are released.
802 */
803 irq_pools_destroy(table);
804 pci_free_irq_vectors(dev->pdev);
805 }
806
mlx5_irq_table_free_irqs(struct mlx5_core_dev * dev)807 void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev)
808 {
809 struct mlx5_irq_table *table = dev->priv.irq_table;
810
811 if (mlx5_core_is_sf(dev))
812 return;
813
814 mlx5_irq_pools_free_irqs(table);
815 pci_free_irq_vectors(dev->pdev);
816 }
817
mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table * table)818 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
819 {
820 if (table->sf_comp_pool)
821 return min_t(int, num_online_cpus(),
822 table->sf_comp_pool->xa_num_irqs.max -
823 table->sf_comp_pool->xa_num_irqs.min + 1);
824 else
825 return mlx5_irq_table_get_num_comp(table);
826 }
827
mlx5_irq_table_get(struct mlx5_core_dev * dev)828 struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
829 {
830 #ifdef CONFIG_MLX5_SF
831 if (mlx5_core_is_sf(dev))
832 return dev->priv.parent_mdev->priv.irq_table;
833 #endif
834 return dev->priv.irq_table;
835 }
836