1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3
4 #include <linux/init.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/ethtool.h>
8 #include <linux/netdevice.h>
9
10 #include "bnge.h"
11 #include "bnge_hwrm.h"
12 #include "bnge_hwrm_lib.h"
13 #include "bnge_resc.h"
14
bnge_num_tx_to_cp(struct bnge_dev * bd,u16 tx)15 static u16 bnge_num_tx_to_cp(struct bnge_dev *bd, u16 tx)
16 {
17 u16 tcs = bd->num_tc;
18
19 if (!tcs)
20 tcs = 1;
21
22 return tx / tcs;
23 }
24
bnge_get_max_func_irqs(struct bnge_dev * bd)25 static u16 bnge_get_max_func_irqs(struct bnge_dev *bd)
26 {
27 struct bnge_hw_resc *hw_resc = &bd->hw_resc;
28
29 return min_t(u16, hw_resc->max_irqs, hw_resc->max_nqs);
30 }
31
bnge_get_max_func_stat_ctxs(struct bnge_dev * bd)32 static unsigned int bnge_get_max_func_stat_ctxs(struct bnge_dev *bd)
33 {
34 return bd->hw_resc.max_stat_ctxs;
35 }
36
bnge_aux_has_enough_resources(struct bnge_dev * bd)37 bool bnge_aux_has_enough_resources(struct bnge_dev *bd)
38 {
39 unsigned int max_stat_ctxs;
40
41 max_stat_ctxs = bnge_get_max_func_stat_ctxs(bd);
42 if (max_stat_ctxs <= BNGE_MIN_ROCE_STAT_CTXS ||
43 bd->nq_nr_rings == max_stat_ctxs)
44 return false;
45
46 return true;
47 }
48
bnge_get_max_func_cp_rings(struct bnge_dev * bd)49 static unsigned int bnge_get_max_func_cp_rings(struct bnge_dev *bd)
50 {
51 return bd->hw_resc.max_cp_rings;
52 }
53
bnge_aux_get_dflt_msix(struct bnge_dev * bd)54 static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
55 {
56 int roce_msix = BNGE_MAX_ROCE_MSIX;
57
58 return min_t(int, roce_msix, num_online_cpus() + 1);
59 }
60
bnge_aux_get_msix(struct bnge_dev * bd)61 u16 bnge_aux_get_msix(struct bnge_dev *bd)
62 {
63 if (bnge_is_roce_en(bd))
64 return bd->aux_num_msix;
65
66 return 0;
67 }
68
bnge_aux_set_msix_num(struct bnge_dev * bd,u16 num)69 static void bnge_aux_set_msix_num(struct bnge_dev *bd, u16 num)
70 {
71 if (bnge_is_roce_en(bd))
72 bd->aux_num_msix = num;
73 }
74
bnge_aux_get_stat_ctxs(struct bnge_dev * bd)75 static u16 bnge_aux_get_stat_ctxs(struct bnge_dev *bd)
76 {
77 if (bnge_is_roce_en(bd))
78 return bd->aux_num_stat_ctxs;
79
80 return 0;
81 }
82
bnge_aux_set_stat_ctxs(struct bnge_dev * bd,u16 num_aux_ctx)83 static void bnge_aux_set_stat_ctxs(struct bnge_dev *bd, u16 num_aux_ctx)
84 {
85 if (bnge_is_roce_en(bd))
86 bd->aux_num_stat_ctxs = num_aux_ctx;
87 }
88
bnge_func_stat_ctxs_demand(struct bnge_dev * bd)89 static u16 bnge_func_stat_ctxs_demand(struct bnge_dev *bd)
90 {
91 return bd->nq_nr_rings + bnge_aux_get_stat_ctxs(bd);
92 }
93
bnge_get_dflt_aux_stat_ctxs(struct bnge_dev * bd)94 static int bnge_get_dflt_aux_stat_ctxs(struct bnge_dev *bd)
95 {
96 int stat_ctx = 0;
97
98 if (bnge_is_roce_en(bd)) {
99 stat_ctx = BNGE_MIN_ROCE_STAT_CTXS;
100
101 if (!bd->pf.port_id && bd->port_count > 1)
102 stat_ctx++;
103 }
104
105 return stat_ctx;
106 }
107
bnge_nqs_demand(struct bnge_dev * bd)108 static u16 bnge_nqs_demand(struct bnge_dev *bd)
109 {
110 return bd->nq_nr_rings + bnge_aux_get_msix(bd);
111 }
112
bnge_cprs_demand(struct bnge_dev * bd)113 static u16 bnge_cprs_demand(struct bnge_dev *bd)
114 {
115 return bd->tx_nr_rings + bd->rx_nr_rings;
116 }
117
bnge_get_avail_msix(struct bnge_dev * bd,int num)118 static u16 bnge_get_avail_msix(struct bnge_dev *bd, int num)
119 {
120 u16 max_irq = bnge_get_max_func_irqs(bd);
121 u16 total_demand = bd->nq_nr_rings + num;
122
123 if (max_irq < total_demand) {
124 num = max_irq - bd->nq_nr_rings;
125 if (num <= 0)
126 return 0;
127 }
128
129 return num;
130 }
131
bnge_num_cp_to_tx(struct bnge_dev * bd,u16 tx_chunks)132 static u16 bnge_num_cp_to_tx(struct bnge_dev *bd, u16 tx_chunks)
133 {
134 return tx_chunks * bd->num_tc;
135 }
136
bnge_fix_rings_count(u16 * rx,u16 * tx,u16 max,bool shared)137 int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared)
138 {
139 u16 _rx = *rx, _tx = *tx;
140
141 if (shared) {
142 *rx = min_t(u16, _rx, max);
143 *tx = min_t(u16, _tx, max);
144 } else {
145 if (max < 2)
146 return -ENOMEM;
147 while (_rx + _tx > max) {
148 if (_rx > _tx && _rx > 1)
149 _rx--;
150 else if (_tx > 1)
151 _tx--;
152 }
153 *rx = _rx;
154 *tx = _tx;
155 }
156
157 return 0;
158 }
159
bnge_adjust_rings(struct bnge_dev * bd,u16 * rx,u16 * tx,u16 max_nq,bool sh)160 static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx,
161 u16 *tx, u16 max_nq, bool sh)
162 {
163 u16 tx_chunks = bnge_num_tx_to_cp(bd, *tx);
164
165 if (tx_chunks != *tx) {
166 u16 tx_saved = tx_chunks, rc;
167
168 rc = bnge_fix_rings_count(rx, &tx_chunks, max_nq, sh);
169 if (rc)
170 return rc;
171 if (tx_chunks != tx_saved)
172 *tx = bnge_num_cp_to_tx(bd, tx_chunks);
173 return 0;
174 }
175
176 return bnge_fix_rings_count(rx, tx, max_nq, sh);
177 }
178
bnge_cal_nr_rss_ctxs(u16 rx_rings)179 int bnge_cal_nr_rss_ctxs(u16 rx_rings)
180 {
181 if (!rx_rings)
182 return 0;
183
184 return bnge_adjust_pow_two(rx_rings - 1,
185 BNGE_RSS_TABLE_ENTRIES);
186 }
187
bnge_rss_ctxs_in_use(struct bnge_dev * bd,struct bnge_hw_rings * hwr)188 static u16 bnge_rss_ctxs_in_use(struct bnge_dev *bd,
189 struct bnge_hw_rings *hwr)
190 {
191 return bnge_cal_nr_rss_ctxs(hwr->grp);
192 }
193
bnge_get_total_vnics(struct bnge_dev * bd,u16 rx_rings)194 static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings)
195 {
196 return 1;
197 }
198
bnge_get_rxfh_indir_size(struct bnge_dev * bd)199 u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
200 {
201 return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) *
202 BNGE_RSS_TABLE_ENTRIES;
203 }
204
bnge_set_dflt_rss_indir_tbl(struct bnge_dev * bd)205 static void bnge_set_dflt_rss_indir_tbl(struct bnge_dev *bd)
206 {
207 u16 max_entries, pad;
208 u32 *rss_indir_tbl;
209 int i;
210
211 max_entries = bnge_get_rxfh_indir_size(bd);
212 rss_indir_tbl = &bd->rss_indir_tbl[0];
213
214 for (i = 0; i < max_entries; i++)
215 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i,
216 bd->rx_nr_rings);
217
218 pad = bd->rss_indir_tbl_entries - max_entries;
219 if (pad)
220 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
221 }
222
bnge_copy_reserved_rings(struct bnge_dev * bd,struct bnge_hw_rings * hwr)223 static void bnge_copy_reserved_rings(struct bnge_dev *bd,
224 struct bnge_hw_rings *hwr)
225 {
226 struct bnge_hw_resc *hw_resc = &bd->hw_resc;
227
228 hwr->tx = hw_resc->resv_tx_rings;
229 hwr->rx = hw_resc->resv_rx_rings;
230 hwr->nq = hw_resc->resv_irqs;
231 hwr->cmpl = hw_resc->resv_cp_rings;
232 hwr->grp = hw_resc->resv_hw_ring_grps;
233 hwr->vnic = hw_resc->resv_vnics;
234 hwr->stat = hw_resc->resv_stat_ctxs;
235 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
236 }
237
bnge_rings_ok(struct bnge_hw_rings * hwr)238 static bool bnge_rings_ok(struct bnge_hw_rings *hwr)
239 {
240 return hwr->tx && hwr->rx && hwr->nq && hwr->grp && hwr->vnic &&
241 hwr->stat && hwr->cmpl;
242 }
243
bnge_need_reserve_rings(struct bnge_dev * bd)244 static bool bnge_need_reserve_rings(struct bnge_dev *bd)
245 {
246 struct bnge_hw_resc *hw_resc = &bd->hw_resc;
247 u16 cprs = bnge_cprs_demand(bd);
248 u16 rx = bd->rx_nr_rings, stat;
249 u16 nqs = bnge_nqs_demand(bd);
250 u16 vnic;
251
252 if (hw_resc->resv_tx_rings != bd->tx_nr_rings)
253 return true;
254
255 vnic = bnge_get_total_vnics(bd, rx);
256
257 if (bnge_is_agg_reqd(bd))
258 rx <<= 1;
259 stat = bnge_func_stat_ctxs_demand(bd);
260 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cprs ||
261 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat)
262 return true;
263 if (hw_resc->resv_irqs != nqs)
264 return true;
265
266 return false;
267 }
268
bnge_reserve_rings(struct bnge_dev * bd)269 int bnge_reserve_rings(struct bnge_dev *bd)
270 {
271 u16 aux_dflt_msix = bnge_aux_get_dflt_msix(bd);
272 struct bnge_hw_rings hwr = {0};
273 u16 rx_rings, old_rx_rings;
274 u16 nq = bd->nq_nr_rings;
275 u16 aux_msix = 0;
276 bool sh = false;
277 u16 tx_cp;
278 int rc;
279
280 if (!bnge_need_reserve_rings(bd))
281 return 0;
282
283 if (!bnge_aux_registered(bd)) {
284 aux_msix = bnge_get_avail_msix(bd, aux_dflt_msix);
285 if (!aux_msix)
286 bnge_aux_set_stat_ctxs(bd, 0);
287
288 if (aux_msix > aux_dflt_msix)
289 aux_msix = aux_dflt_msix;
290 hwr.nq = nq + aux_msix;
291 } else {
292 hwr.nq = bnge_nqs_demand(bd);
293 }
294
295 hwr.tx = bd->tx_nr_rings;
296 hwr.rx = bd->rx_nr_rings;
297 if (bd->flags & BNGE_EN_SHARED_CHNL)
298 sh = true;
299 hwr.cmpl = hwr.rx + hwr.tx;
300
301 hwr.vnic = bnge_get_total_vnics(bd, hwr.rx);
302
303 if (bnge_is_agg_reqd(bd))
304 hwr.rx <<= 1;
305 hwr.grp = bd->rx_nr_rings;
306 hwr.rss_ctx = bnge_rss_ctxs_in_use(bd, &hwr);
307 hwr.stat = bnge_func_stat_ctxs_demand(bd);
308 old_rx_rings = bd->hw_resc.resv_rx_rings;
309
310 rc = bnge_hwrm_reserve_rings(bd, &hwr);
311 if (rc)
312 return rc;
313
314 bnge_copy_reserved_rings(bd, &hwr);
315
316 rx_rings = hwr.rx;
317 if (bnge_is_agg_reqd(bd)) {
318 if (hwr.rx >= 2)
319 rx_rings = hwr.rx >> 1;
320 else
321 return -ENOMEM;
322 }
323
324 rx_rings = min_t(u16, rx_rings, hwr.grp);
325 hwr.nq = min_t(u16, hwr.nq, bd->nq_nr_rings);
326 if (hwr.stat > bnge_aux_get_stat_ctxs(bd))
327 hwr.stat -= bnge_aux_get_stat_ctxs(bd);
328 hwr.nq = min_t(u16, hwr.nq, hwr.stat);
329
330 /* Adjust the rings */
331 rc = bnge_adjust_rings(bd, &rx_rings, &hwr.tx, hwr.nq, sh);
332 if (bnge_is_agg_reqd(bd))
333 hwr.rx = rx_rings << 1;
334 tx_cp = hwr.tx;
335 hwr.nq = sh ? max_t(u16, tx_cp, rx_rings) : tx_cp + rx_rings;
336 bd->tx_nr_rings = hwr.tx;
337
338 if (rx_rings != bd->rx_nr_rings)
339 dev_warn(bd->dev, "RX rings resv reduced to %d than earlier %d requested\n",
340 rx_rings, bd->rx_nr_rings);
341
342 bd->rx_nr_rings = rx_rings;
343 bd->nq_nr_rings = hwr.nq;
344
345 if (!bnge_rings_ok(&hwr))
346 return -ENOMEM;
347
348 if (old_rx_rings != bd->hw_resc.resv_rx_rings)
349 bnge_set_dflt_rss_indir_tbl(bd);
350
351 if (!bnge_aux_registered(bd)) {
352 u16 resv_msix, resv_ctx, aux_ctxs;
353 struct bnge_hw_resc *hw_resc;
354
355 hw_resc = &bd->hw_resc;
356 resv_msix = hw_resc->resv_irqs - bd->nq_nr_rings;
357 aux_msix = min_t(u16, resv_msix, aux_msix);
358 bnge_aux_set_msix_num(bd, aux_msix);
359 resv_ctx = hw_resc->resv_stat_ctxs - bd->nq_nr_rings;
360 aux_ctxs = min(resv_ctx, bnge_aux_get_stat_ctxs(bd));
361 bnge_aux_set_stat_ctxs(bd, aux_ctxs);
362 }
363
364 return rc;
365 }
366
bnge_alloc_irqs(struct bnge_dev * bd)367 int bnge_alloc_irqs(struct bnge_dev *bd)
368 {
369 u16 aux_msix, tx_cp, num_entries;
370 int i, irqs_demand, rc;
371 u16 max, min = 1;
372
373 irqs_demand = bnge_nqs_demand(bd);
374 max = bnge_get_max_func_irqs(bd);
375 if (irqs_demand > max)
376 irqs_demand = max;
377
378 if (!(bd->flags & BNGE_EN_SHARED_CHNL))
379 min = 2;
380
381 irqs_demand = pci_alloc_irq_vectors(bd->pdev, min, irqs_demand,
382 PCI_IRQ_MSIX);
383 aux_msix = bnge_aux_get_msix(bd);
384 if (irqs_demand < 0 || irqs_demand < aux_msix) {
385 rc = -ENODEV;
386 goto err_free_irqs;
387 }
388
389 num_entries = irqs_demand;
390 if (pci_msix_can_alloc_dyn(bd->pdev))
391 num_entries = max;
392 bd->irq_tbl = kcalloc(num_entries, sizeof(*bd->irq_tbl), GFP_KERNEL);
393 if (!bd->irq_tbl) {
394 rc = -ENOMEM;
395 goto err_free_irqs;
396 }
397
398 for (i = 0; i < irqs_demand; i++)
399 bd->irq_tbl[i].vector = pci_irq_vector(bd->pdev, i);
400
401 bd->irqs_acquired = irqs_demand;
402 /* Reduce rings based upon num of vectors allocated.
403 * We dont need to consider NQs as they have been calculated
404 * and must be more than irqs_demand.
405 */
406 rc = bnge_adjust_rings(bd, &bd->rx_nr_rings,
407 &bd->tx_nr_rings,
408 irqs_demand - aux_msix, min == 1);
409 if (rc)
410 goto err_free_irqs;
411
412 tx_cp = bnge_num_tx_to_cp(bd, bd->tx_nr_rings);
413 bd->nq_nr_rings = (min == 1) ?
414 max_t(u16, tx_cp, bd->rx_nr_rings) :
415 tx_cp + bd->rx_nr_rings;
416
417 /* Readjust tx_nr_rings_per_tc */
418 if (!bd->num_tc)
419 bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
420
421 return 0;
422
423 err_free_irqs:
424 dev_err(bd->dev, "Failed to allocate IRQs err = %d\n", rc);
425 bnge_free_irqs(bd);
426 return rc;
427 }
428
bnge_free_irqs(struct bnge_dev * bd)429 void bnge_free_irqs(struct bnge_dev *bd)
430 {
431 pci_free_irq_vectors(bd->pdev);
432 kfree(bd->irq_tbl);
433 bd->irq_tbl = NULL;
434 }
435
_bnge_get_max_rings(struct bnge_dev * bd,u16 * max_rx,u16 * max_tx,u16 * max_nq)436 static void _bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
437 u16 *max_tx, u16 *max_nq)
438 {
439 struct bnge_hw_resc *hw_resc = &bd->hw_resc;
440 u16 max_ring_grps = 0, max_cp;
441 int rc;
442
443 *max_tx = hw_resc->max_tx_rings;
444 *max_rx = hw_resc->max_rx_rings;
445 *max_nq = min_t(int, bnge_get_max_func_irqs(bd),
446 hw_resc->max_stat_ctxs);
447 max_ring_grps = hw_resc->max_hw_ring_grps;
448 if (bnge_is_agg_reqd(bd))
449 *max_rx >>= 1;
450
451 max_cp = bnge_get_max_func_cp_rings(bd);
452
453 /* Fix RX and TX rings according to number of CPs available */
454 rc = bnge_fix_rings_count(max_rx, max_tx, max_cp, false);
455 if (rc) {
456 *max_rx = 0;
457 *max_tx = 0;
458 }
459
460 *max_rx = min_t(int, *max_rx, max_ring_grps);
461 }
462
bnge_get_max_rings(struct bnge_dev * bd,u16 * max_rx,u16 * max_tx,bool shared)463 static int bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
464 u16 *max_tx, bool shared)
465 {
466 u16 rx, tx, nq;
467
468 _bnge_get_max_rings(bd, &rx, &tx, &nq);
469 *max_rx = rx;
470 *max_tx = tx;
471 if (!rx || !tx || !nq)
472 return -ENOMEM;
473
474 return bnge_fix_rings_count(max_rx, max_tx, nq, shared);
475 }
476
bnge_get_dflt_rings(struct bnge_dev * bd,u16 * max_rx,u16 * max_tx,bool shared)477 static int bnge_get_dflt_rings(struct bnge_dev *bd, u16 *max_rx, u16 *max_tx,
478 bool shared)
479 {
480 int rc;
481
482 rc = bnge_get_max_rings(bd, max_rx, max_tx, shared);
483 if (rc) {
484 dev_info(bd->dev, "Not enough rings available\n");
485 return rc;
486 }
487
488 if (bnge_is_roce_en(bd)) {
489 int max_cp, max_stat, max_irq;
490
491 /* Reserve minimum resources for RoCE */
492 max_cp = bnge_get_max_func_cp_rings(bd);
493 max_stat = bnge_get_max_func_stat_ctxs(bd);
494 max_irq = bnge_get_max_func_irqs(bd);
495 if (max_cp <= BNGE_MIN_ROCE_CP_RINGS ||
496 max_irq <= BNGE_MIN_ROCE_CP_RINGS ||
497 max_stat <= BNGE_MIN_ROCE_STAT_CTXS)
498 return 0;
499
500 max_cp -= BNGE_MIN_ROCE_CP_RINGS;
501 max_irq -= BNGE_MIN_ROCE_CP_RINGS;
502 max_stat -= BNGE_MIN_ROCE_STAT_CTXS;
503 max_cp = min_t(u16, max_cp, max_irq);
504 max_cp = min_t(u16, max_cp, max_stat);
505 rc = bnge_adjust_rings(bd, max_rx, max_tx, max_cp, shared);
506 if (rc)
507 rc = 0;
508 }
509
510 return rc;
511 }
512
513 /* In initial default shared ring setting, each shared ring must have a
514 * RX/TX ring pair.
515 */
bnge_trim_dflt_sh_rings(struct bnge_dev * bd)516 static void bnge_trim_dflt_sh_rings(struct bnge_dev *bd)
517 {
518 bd->nq_nr_rings = min_t(u16, bd->tx_nr_rings_per_tc, bd->rx_nr_rings);
519 bd->rx_nr_rings = bd->nq_nr_rings;
520 bd->tx_nr_rings_per_tc = bd->nq_nr_rings;
521 bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
522 }
523
bnge_net_init_dflt_rings(struct bnge_dev * bd,bool sh)524 static int bnge_net_init_dflt_rings(struct bnge_dev *bd, bool sh)
525 {
526 u16 dflt_rings, max_rx_rings, max_tx_rings;
527 int rc;
528
529 if (sh)
530 bd->flags |= BNGE_EN_SHARED_CHNL;
531
532 dflt_rings = netif_get_num_default_rss_queues();
533
534 rc = bnge_get_dflt_rings(bd, &max_rx_rings, &max_tx_rings, sh);
535 if (rc)
536 return rc;
537 bd->rx_nr_rings = min_t(u16, dflt_rings, max_rx_rings);
538 bd->tx_nr_rings_per_tc = min_t(u16, dflt_rings, max_tx_rings);
539 if (sh)
540 bnge_trim_dflt_sh_rings(bd);
541 else
542 bd->nq_nr_rings = bd->tx_nr_rings_per_tc + bd->rx_nr_rings;
543 bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
544
545 rc = bnge_reserve_rings(bd);
546 if (rc && rc != -ENODEV)
547 dev_warn(bd->dev, "Unable to reserve tx rings\n");
548 bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
549 if (sh)
550 bnge_trim_dflt_sh_rings(bd);
551
552 /* Rings may have been reduced, re-reserve them again */
553 if (bnge_need_reserve_rings(bd)) {
554 rc = bnge_reserve_rings(bd);
555 if (rc && rc != -ENODEV)
556 dev_warn(bd->dev, "Fewer rings reservation failed\n");
557 bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
558 }
559 if (rc) {
560 bd->tx_nr_rings = 0;
561 bd->rx_nr_rings = 0;
562 }
563
564 return rc;
565 }
566
bnge_alloc_rss_indir_tbl(struct bnge_dev * bd)567 static int bnge_alloc_rss_indir_tbl(struct bnge_dev *bd)
568 {
569 u16 entries;
570
571 entries = BNGE_MAX_RSS_TABLE_ENTRIES;
572
573 bd->rss_indir_tbl_entries = entries;
574 bd->rss_indir_tbl =
575 kmalloc_array(entries, sizeof(*bd->rss_indir_tbl), GFP_KERNEL);
576 if (!bd->rss_indir_tbl)
577 return -ENOMEM;
578
579 return 0;
580 }
581
bnge_net_init_dflt_config(struct bnge_dev * bd)582 int bnge_net_init_dflt_config(struct bnge_dev *bd)
583 {
584 struct bnge_hw_resc *hw_resc;
585 int rc;
586
587 rc = bnge_alloc_rss_indir_tbl(bd);
588 if (rc)
589 return rc;
590
591 rc = bnge_net_init_dflt_rings(bd, true);
592 if (rc)
593 goto err_free_tbl;
594
595 hw_resc = &bd->hw_resc;
596 bd->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
597 BNGE_L2_FLTR_MAX_FLTR;
598
599 return 0;
600
601 err_free_tbl:
602 kfree(bd->rss_indir_tbl);
603 bd->rss_indir_tbl = NULL;
604 return rc;
605 }
606
bnge_net_uninit_dflt_config(struct bnge_dev * bd)607 void bnge_net_uninit_dflt_config(struct bnge_dev *bd)
608 {
609 kfree(bd->rss_indir_tbl);
610 bd->rss_indir_tbl = NULL;
611 }
612
bnge_aux_init_dflt_config(struct bnge_dev * bd)613 void bnge_aux_init_dflt_config(struct bnge_dev *bd)
614 {
615 bd->aux_num_msix = bnge_aux_get_dflt_msix(bd);
616 bd->aux_num_stat_ctxs = bnge_get_dflt_aux_stat_ctxs(bd);
617 }
618