xref: /linux/drivers/net/ethernet/broadcom/bnge/bnge_resc.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3 
4 #include <linux/init.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/ethtool.h>
8 #include <linux/netdevice.h>
9 
10 #include "bnge.h"
11 #include "bnge_hwrm.h"
12 #include "bnge_hwrm_lib.h"
13 #include "bnge_resc.h"
14 
bnge_num_tx_to_cp(struct bnge_dev * bd,u16 tx)15 static u16 bnge_num_tx_to_cp(struct bnge_dev *bd, u16 tx)
16 {
17 	u16 tcs = bd->num_tc;
18 
19 	if (!tcs)
20 		tcs = 1;
21 
22 	return tx / tcs;
23 }
24 
bnge_get_max_func_irqs(struct bnge_dev * bd)25 static u16 bnge_get_max_func_irqs(struct bnge_dev *bd)
26 {
27 	struct bnge_hw_resc *hw_resc = &bd->hw_resc;
28 
29 	return min_t(u16, hw_resc->max_irqs, hw_resc->max_nqs);
30 }
31 
bnge_get_max_func_stat_ctxs(struct bnge_dev * bd)32 static unsigned int bnge_get_max_func_stat_ctxs(struct bnge_dev *bd)
33 {
34 	return bd->hw_resc.max_stat_ctxs;
35 }
36 
bnge_get_max_func_cp_rings(struct bnge_dev * bd)37 static unsigned int bnge_get_max_func_cp_rings(struct bnge_dev *bd)
38 {
39 	return bd->hw_resc.max_cp_rings;
40 }
41 
bnge_aux_get_dflt_msix(struct bnge_dev * bd)42 static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
43 {
44 	int roce_msix = BNGE_MAX_ROCE_MSIX;
45 
46 	return min_t(int, roce_msix, num_online_cpus() + 1);
47 }
48 
bnge_aux_get_msix(struct bnge_dev * bd)49 static u16 bnge_aux_get_msix(struct bnge_dev *bd)
50 {
51 	if (bnge_is_roce_en(bd))
52 		return bd->aux_num_msix;
53 
54 	return 0;
55 }
56 
bnge_aux_set_msix_num(struct bnge_dev * bd,u16 num)57 static void bnge_aux_set_msix_num(struct bnge_dev *bd, u16 num)
58 {
59 	if (bnge_is_roce_en(bd))
60 		bd->aux_num_msix = num;
61 }
62 
bnge_aux_get_stat_ctxs(struct bnge_dev * bd)63 static u16 bnge_aux_get_stat_ctxs(struct bnge_dev *bd)
64 {
65 	if (bnge_is_roce_en(bd))
66 		return bd->aux_num_stat_ctxs;
67 
68 	return 0;
69 }
70 
bnge_aux_set_stat_ctxs(struct bnge_dev * bd,u16 num_aux_ctx)71 static void bnge_aux_set_stat_ctxs(struct bnge_dev *bd, u16 num_aux_ctx)
72 {
73 	if (bnge_is_roce_en(bd))
74 		bd->aux_num_stat_ctxs = num_aux_ctx;
75 }
76 
bnge_func_stat_ctxs_demand(struct bnge_dev * bd)77 static u16 bnge_func_stat_ctxs_demand(struct bnge_dev *bd)
78 {
79 	return bd->nq_nr_rings + bnge_aux_get_stat_ctxs(bd);
80 }
81 
bnge_get_dflt_aux_stat_ctxs(struct bnge_dev * bd)82 static int bnge_get_dflt_aux_stat_ctxs(struct bnge_dev *bd)
83 {
84 	int stat_ctx = 0;
85 
86 	if (bnge_is_roce_en(bd)) {
87 		stat_ctx = BNGE_MIN_ROCE_STAT_CTXS;
88 
89 		if (!bd->pf.port_id && bd->port_count > 1)
90 			stat_ctx++;
91 	}
92 
93 	return stat_ctx;
94 }
95 
bnge_nqs_demand(struct bnge_dev * bd)96 static u16 bnge_nqs_demand(struct bnge_dev *bd)
97 {
98 	return bd->nq_nr_rings + bnge_aux_get_msix(bd);
99 }
100 
bnge_cprs_demand(struct bnge_dev * bd)101 static u16 bnge_cprs_demand(struct bnge_dev *bd)
102 {
103 	return bd->tx_nr_rings + bd->rx_nr_rings;
104 }
105 
bnge_get_avail_msix(struct bnge_dev * bd,int num)106 static u16 bnge_get_avail_msix(struct bnge_dev *bd, int num)
107 {
108 	u16 max_irq = bnge_get_max_func_irqs(bd);
109 	u16 total_demand = bd->nq_nr_rings + num;
110 
111 	if (max_irq < total_demand) {
112 		num = max_irq - bd->nq_nr_rings;
113 		if (num <= 0)
114 			return 0;
115 	}
116 
117 	return num;
118 }
119 
bnge_num_cp_to_tx(struct bnge_dev * bd,u16 tx_chunks)120 static u16 bnge_num_cp_to_tx(struct bnge_dev *bd, u16 tx_chunks)
121 {
122 	return tx_chunks * bd->num_tc;
123 }
124 
bnge_fix_rings_count(u16 * rx,u16 * tx,u16 max,bool shared)125 int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared)
126 {
127 	u16 _rx = *rx, _tx = *tx;
128 
129 	if (shared) {
130 		*rx = min_t(u16, _rx, max);
131 		*tx = min_t(u16, _tx, max);
132 	} else {
133 		if (max < 2)
134 			return -ENOMEM;
135 		while (_rx + _tx > max) {
136 			if (_rx > _tx && _rx > 1)
137 				_rx--;
138 			else if (_tx > 1)
139 				_tx--;
140 		}
141 		*rx = _rx;
142 		*tx = _tx;
143 	}
144 
145 	return 0;
146 }
147 
bnge_adjust_rings(struct bnge_dev * bd,u16 * rx,u16 * tx,u16 max_nq,bool sh)148 static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx,
149 			     u16 *tx, u16 max_nq, bool sh)
150 {
151 	u16 tx_chunks = bnge_num_tx_to_cp(bd, *tx);
152 
153 	if (tx_chunks != *tx) {
154 		u16 tx_saved = tx_chunks, rc;
155 
156 		rc = bnge_fix_rings_count(rx, &tx_chunks, max_nq, sh);
157 		if (rc)
158 			return rc;
159 		if (tx_chunks != tx_saved)
160 			*tx = bnge_num_cp_to_tx(bd, tx_chunks);
161 		return 0;
162 	}
163 
164 	return bnge_fix_rings_count(rx, tx, max_nq, sh);
165 }
166 
bnge_cal_nr_rss_ctxs(u16 rx_rings)167 static int bnge_cal_nr_rss_ctxs(u16 rx_rings)
168 {
169 	if (!rx_rings)
170 		return 0;
171 
172 	return bnge_adjust_pow_two(rx_rings - 1,
173 				   BNGE_RSS_TABLE_ENTRIES);
174 }
175 
bnge_rss_ctxs_in_use(struct bnge_dev * bd,struct bnge_hw_rings * hwr)176 static u16 bnge_rss_ctxs_in_use(struct bnge_dev *bd,
177 				struct bnge_hw_rings *hwr)
178 {
179 	return bnge_cal_nr_rss_ctxs(hwr->grp);
180 }
181 
bnge_get_total_vnics(struct bnge_dev * bd,u16 rx_rings)182 static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings)
183 {
184 	return 1;
185 }
186 
bnge_get_rxfh_indir_size(struct bnge_dev * bd)187 static u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
188 {
189 	return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) *
190 	       BNGE_RSS_TABLE_ENTRIES;
191 }
192 
bnge_set_dflt_rss_indir_tbl(struct bnge_dev * bd)193 static void bnge_set_dflt_rss_indir_tbl(struct bnge_dev *bd)
194 {
195 	u16 max_entries, pad;
196 	u32 *rss_indir_tbl;
197 	int i;
198 
199 	max_entries = bnge_get_rxfh_indir_size(bd);
200 	rss_indir_tbl = &bd->rss_indir_tbl[0];
201 
202 	for (i = 0; i < max_entries; i++)
203 		rss_indir_tbl[i] = ethtool_rxfh_indir_default(i,
204 							      bd->rx_nr_rings);
205 
206 	pad = bd->rss_indir_tbl_entries - max_entries;
207 	if (pad)
208 		memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
209 }
210 
bnge_copy_reserved_rings(struct bnge_dev * bd,struct bnge_hw_rings * hwr)211 static void bnge_copy_reserved_rings(struct bnge_dev *bd,
212 				     struct bnge_hw_rings *hwr)
213 {
214 	struct bnge_hw_resc *hw_resc = &bd->hw_resc;
215 
216 	hwr->tx = hw_resc->resv_tx_rings;
217 	hwr->rx = hw_resc->resv_rx_rings;
218 	hwr->nq = hw_resc->resv_irqs;
219 	hwr->cmpl = hw_resc->resv_cp_rings;
220 	hwr->grp = hw_resc->resv_hw_ring_grps;
221 	hwr->vnic = hw_resc->resv_vnics;
222 	hwr->stat = hw_resc->resv_stat_ctxs;
223 	hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
224 }
225 
bnge_rings_ok(struct bnge_hw_rings * hwr)226 static bool bnge_rings_ok(struct bnge_hw_rings *hwr)
227 {
228 	return hwr->tx && hwr->rx && hwr->nq && hwr->grp && hwr->vnic &&
229 	       hwr->stat && hwr->cmpl;
230 }
231 
bnge_need_reserve_rings(struct bnge_dev * bd)232 static bool bnge_need_reserve_rings(struct bnge_dev *bd)
233 {
234 	struct bnge_hw_resc *hw_resc = &bd->hw_resc;
235 	u16 cprs = bnge_cprs_demand(bd);
236 	u16 rx = bd->rx_nr_rings, stat;
237 	u16 nqs = bnge_nqs_demand(bd);
238 	u16 vnic;
239 
240 	if (hw_resc->resv_tx_rings != bd->tx_nr_rings)
241 		return true;
242 
243 	vnic = bnge_get_total_vnics(bd, rx);
244 
245 	if (bnge_is_agg_reqd(bd))
246 		rx <<= 1;
247 	stat = bnge_func_stat_ctxs_demand(bd);
248 	if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cprs ||
249 	    hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat)
250 		return true;
251 	if (hw_resc->resv_irqs != nqs)
252 		return true;
253 
254 	return false;
255 }
256 
bnge_reserve_rings(struct bnge_dev * bd)257 int bnge_reserve_rings(struct bnge_dev *bd)
258 {
259 	u16 aux_dflt_msix = bnge_aux_get_dflt_msix(bd);
260 	struct bnge_hw_rings hwr = {0};
261 	u16 rx_rings, old_rx_rings;
262 	u16 nq = bd->nq_nr_rings;
263 	u16 aux_msix = 0;
264 	bool sh = false;
265 	u16 tx_cp;
266 	int rc;
267 
268 	if (!bnge_need_reserve_rings(bd))
269 		return 0;
270 
271 	if (!bnge_aux_registered(bd)) {
272 		aux_msix = bnge_get_avail_msix(bd, aux_dflt_msix);
273 		if (!aux_msix)
274 			bnge_aux_set_stat_ctxs(bd, 0);
275 
276 		if (aux_msix > aux_dflt_msix)
277 			aux_msix = aux_dflt_msix;
278 		hwr.nq = nq + aux_msix;
279 	} else {
280 		hwr.nq = bnge_nqs_demand(bd);
281 	}
282 
283 	hwr.tx = bd->tx_nr_rings;
284 	hwr.rx = bd->rx_nr_rings;
285 	if (bd->flags & BNGE_EN_SHARED_CHNL)
286 		sh = true;
287 	hwr.cmpl = hwr.rx + hwr.tx;
288 
289 	hwr.vnic = bnge_get_total_vnics(bd, hwr.rx);
290 
291 	if (bnge_is_agg_reqd(bd))
292 		hwr.rx <<= 1;
293 	hwr.grp = bd->rx_nr_rings;
294 	hwr.rss_ctx = bnge_rss_ctxs_in_use(bd, &hwr);
295 	hwr.stat = bnge_func_stat_ctxs_demand(bd);
296 	old_rx_rings = bd->hw_resc.resv_rx_rings;
297 
298 	rc = bnge_hwrm_reserve_rings(bd, &hwr);
299 	if (rc)
300 		return rc;
301 
302 	bnge_copy_reserved_rings(bd, &hwr);
303 
304 	rx_rings = hwr.rx;
305 	if (bnge_is_agg_reqd(bd)) {
306 		if (hwr.rx >= 2)
307 			rx_rings = hwr.rx >> 1;
308 		else
309 			return -ENOMEM;
310 	}
311 
312 	rx_rings = min_t(u16, rx_rings, hwr.grp);
313 	hwr.nq = min_t(u16, hwr.nq, bd->nq_nr_rings);
314 	if (hwr.stat > bnge_aux_get_stat_ctxs(bd))
315 		hwr.stat -= bnge_aux_get_stat_ctxs(bd);
316 	hwr.nq = min_t(u16, hwr.nq, hwr.stat);
317 
318 	/* Adjust the rings */
319 	rc = bnge_adjust_rings(bd, &rx_rings, &hwr.tx, hwr.nq, sh);
320 	if (bnge_is_agg_reqd(bd))
321 		hwr.rx = rx_rings << 1;
322 	tx_cp = hwr.tx;
323 	hwr.nq = sh ? max_t(u16, tx_cp, rx_rings) : tx_cp + rx_rings;
324 	bd->tx_nr_rings = hwr.tx;
325 
326 	if (rx_rings != bd->rx_nr_rings)
327 		dev_warn(bd->dev, "RX rings resv reduced to %d than earlier %d requested\n",
328 			 rx_rings, bd->rx_nr_rings);
329 
330 	bd->rx_nr_rings = rx_rings;
331 	bd->nq_nr_rings = hwr.nq;
332 
333 	if (!bnge_rings_ok(&hwr))
334 		return -ENOMEM;
335 
336 	if (old_rx_rings != bd->hw_resc.resv_rx_rings)
337 		bnge_set_dflt_rss_indir_tbl(bd);
338 
339 	if (!bnge_aux_registered(bd)) {
340 		u16 resv_msix, resv_ctx, aux_ctxs;
341 		struct bnge_hw_resc *hw_resc;
342 
343 		hw_resc = &bd->hw_resc;
344 		resv_msix = hw_resc->resv_irqs - bd->nq_nr_rings;
345 		aux_msix = min_t(u16, resv_msix, aux_msix);
346 		bnge_aux_set_msix_num(bd, aux_msix);
347 		resv_ctx = hw_resc->resv_stat_ctxs  - bd->nq_nr_rings;
348 		aux_ctxs = min(resv_ctx, bnge_aux_get_stat_ctxs(bd));
349 		bnge_aux_set_stat_ctxs(bd, aux_ctxs);
350 	}
351 
352 	return rc;
353 }
354 
bnge_alloc_irqs(struct bnge_dev * bd)355 int bnge_alloc_irqs(struct bnge_dev *bd)
356 {
357 	u16 aux_msix, tx_cp, num_entries;
358 	int i, irqs_demand, rc;
359 	u16 max, min = 1;
360 
361 	irqs_demand = bnge_nqs_demand(bd);
362 	max = bnge_get_max_func_irqs(bd);
363 	if (irqs_demand > max)
364 		irqs_demand = max;
365 
366 	if (!(bd->flags & BNGE_EN_SHARED_CHNL))
367 		min = 2;
368 
369 	irqs_demand = pci_alloc_irq_vectors(bd->pdev, min, irqs_demand,
370 					    PCI_IRQ_MSIX);
371 	aux_msix = bnge_aux_get_msix(bd);
372 	if (irqs_demand < 0 || irqs_demand < aux_msix) {
373 		rc = -ENODEV;
374 		goto err_free_irqs;
375 	}
376 
377 	num_entries = irqs_demand;
378 	if (pci_msix_can_alloc_dyn(bd->pdev))
379 		num_entries = max;
380 	bd->irq_tbl = kcalloc(num_entries, sizeof(*bd->irq_tbl), GFP_KERNEL);
381 	if (!bd->irq_tbl) {
382 		rc = -ENOMEM;
383 		goto err_free_irqs;
384 	}
385 
386 	for (i = 0; i < irqs_demand; i++)
387 		bd->irq_tbl[i].vector = pci_irq_vector(bd->pdev, i);
388 
389 	bd->irqs_acquired = irqs_demand;
390 	/* Reduce rings based upon num of vectors allocated.
391 	 * We dont need to consider NQs as they have been calculated
392 	 * and must be more than irqs_demand.
393 	 */
394 	rc = bnge_adjust_rings(bd, &bd->rx_nr_rings,
395 			       &bd->tx_nr_rings,
396 			       irqs_demand - aux_msix, min == 1);
397 	if (rc)
398 		goto err_free_irqs;
399 
400 	tx_cp = bnge_num_tx_to_cp(bd, bd->tx_nr_rings);
401 	bd->nq_nr_rings = (min == 1) ?
402 		max_t(u16, tx_cp, bd->rx_nr_rings) :
403 		tx_cp + bd->rx_nr_rings;
404 
405 	/* Readjust tx_nr_rings_per_tc */
406 	if (!bd->num_tc)
407 		bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
408 
409 	return 0;
410 
411 err_free_irqs:
412 	dev_err(bd->dev, "Failed to allocate IRQs err = %d\n", rc);
413 	bnge_free_irqs(bd);
414 	return rc;
415 }
416 
bnge_free_irqs(struct bnge_dev * bd)417 void bnge_free_irqs(struct bnge_dev *bd)
418 {
419 	pci_free_irq_vectors(bd->pdev);
420 	kfree(bd->irq_tbl);
421 	bd->irq_tbl = NULL;
422 }
423 
_bnge_get_max_rings(struct bnge_dev * bd,u16 * max_rx,u16 * max_tx,u16 * max_nq)424 static void _bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
425 				u16 *max_tx, u16 *max_nq)
426 {
427 	struct bnge_hw_resc *hw_resc = &bd->hw_resc;
428 	u16 max_ring_grps = 0, max_cp;
429 	int rc;
430 
431 	*max_tx = hw_resc->max_tx_rings;
432 	*max_rx = hw_resc->max_rx_rings;
433 	*max_nq = min_t(int, bnge_get_max_func_irqs(bd),
434 			hw_resc->max_stat_ctxs);
435 	max_ring_grps = hw_resc->max_hw_ring_grps;
436 	if (bnge_is_agg_reqd(bd))
437 		*max_rx >>= 1;
438 
439 	max_cp = bnge_get_max_func_cp_rings(bd);
440 
441 	/* Fix RX and TX rings according to number of CPs available */
442 	rc = bnge_fix_rings_count(max_rx, max_tx, max_cp, false);
443 	if (rc) {
444 		*max_rx = 0;
445 		*max_tx = 0;
446 	}
447 
448 	*max_rx = min_t(int, *max_rx, max_ring_grps);
449 }
450 
bnge_get_max_rings(struct bnge_dev * bd,u16 * max_rx,u16 * max_tx,bool shared)451 static int bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
452 			      u16 *max_tx, bool shared)
453 {
454 	u16 rx, tx, nq;
455 
456 	_bnge_get_max_rings(bd, &rx, &tx, &nq);
457 	*max_rx = rx;
458 	*max_tx = tx;
459 	if (!rx || !tx || !nq)
460 		return -ENOMEM;
461 
462 	return bnge_fix_rings_count(max_rx, max_tx, nq, shared);
463 }
464 
bnge_get_dflt_rings(struct bnge_dev * bd,u16 * max_rx,u16 * max_tx,bool shared)465 static int bnge_get_dflt_rings(struct bnge_dev *bd, u16 *max_rx, u16 *max_tx,
466 			       bool shared)
467 {
468 	int rc;
469 
470 	rc = bnge_get_max_rings(bd, max_rx, max_tx, shared);
471 	if (rc) {
472 		dev_info(bd->dev, "Not enough rings available\n");
473 		return rc;
474 	}
475 
476 	if (bnge_is_roce_en(bd)) {
477 		int max_cp, max_stat, max_irq;
478 
479 		/* Reserve minimum resources for RoCE */
480 		max_cp = bnge_get_max_func_cp_rings(bd);
481 		max_stat = bnge_get_max_func_stat_ctxs(bd);
482 		max_irq = bnge_get_max_func_irqs(bd);
483 		if (max_cp <= BNGE_MIN_ROCE_CP_RINGS ||
484 		    max_irq <= BNGE_MIN_ROCE_CP_RINGS ||
485 		    max_stat <= BNGE_MIN_ROCE_STAT_CTXS)
486 			return 0;
487 
488 		max_cp -= BNGE_MIN_ROCE_CP_RINGS;
489 		max_irq -= BNGE_MIN_ROCE_CP_RINGS;
490 		max_stat -= BNGE_MIN_ROCE_STAT_CTXS;
491 		max_cp = min_t(u16, max_cp, max_irq);
492 		max_cp = min_t(u16, max_cp, max_stat);
493 		rc = bnge_adjust_rings(bd, max_rx, max_tx, max_cp, shared);
494 		if (rc)
495 			rc = 0;
496 	}
497 
498 	return rc;
499 }
500 
501 /* In initial default shared ring setting, each shared ring must have a
502  * RX/TX ring pair.
503  */
bnge_trim_dflt_sh_rings(struct bnge_dev * bd)504 static void bnge_trim_dflt_sh_rings(struct bnge_dev *bd)
505 {
506 	bd->nq_nr_rings = min_t(u16, bd->tx_nr_rings_per_tc, bd->rx_nr_rings);
507 	bd->rx_nr_rings = bd->nq_nr_rings;
508 	bd->tx_nr_rings_per_tc = bd->nq_nr_rings;
509 	bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
510 }
511 
bnge_net_init_dflt_rings(struct bnge_dev * bd,bool sh)512 static int bnge_net_init_dflt_rings(struct bnge_dev *bd, bool sh)
513 {
514 	u16 dflt_rings, max_rx_rings, max_tx_rings;
515 	int rc;
516 
517 	if (sh)
518 		bd->flags |= BNGE_EN_SHARED_CHNL;
519 
520 	dflt_rings = netif_get_num_default_rss_queues();
521 
522 	rc = bnge_get_dflt_rings(bd, &max_rx_rings, &max_tx_rings, sh);
523 	if (rc)
524 		return rc;
525 	bd->rx_nr_rings = min_t(u16, dflt_rings, max_rx_rings);
526 	bd->tx_nr_rings_per_tc = min_t(u16, dflt_rings, max_tx_rings);
527 	if (sh)
528 		bnge_trim_dflt_sh_rings(bd);
529 	else
530 		bd->nq_nr_rings = bd->tx_nr_rings_per_tc + bd->rx_nr_rings;
531 	bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
532 
533 	rc = bnge_reserve_rings(bd);
534 	if (rc && rc != -ENODEV)
535 		dev_warn(bd->dev, "Unable to reserve tx rings\n");
536 	bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
537 	if (sh)
538 		bnge_trim_dflt_sh_rings(bd);
539 
540 	/* Rings may have been reduced, re-reserve them again */
541 	if (bnge_need_reserve_rings(bd)) {
542 		rc = bnge_reserve_rings(bd);
543 		if (rc && rc != -ENODEV)
544 			dev_warn(bd->dev, "Fewer rings reservation failed\n");
545 		bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
546 	}
547 	if (rc) {
548 		bd->tx_nr_rings = 0;
549 		bd->rx_nr_rings = 0;
550 	}
551 
552 	return rc;
553 }
554 
bnge_alloc_rss_indir_tbl(struct bnge_dev * bd)555 static int bnge_alloc_rss_indir_tbl(struct bnge_dev *bd)
556 {
557 	u16 entries;
558 
559 	entries = BNGE_MAX_RSS_TABLE_ENTRIES;
560 
561 	bd->rss_indir_tbl_entries = entries;
562 	bd->rss_indir_tbl =
563 		kmalloc_array(entries, sizeof(*bd->rss_indir_tbl), GFP_KERNEL);
564 	if (!bd->rss_indir_tbl)
565 		return -ENOMEM;
566 
567 	return 0;
568 }
569 
bnge_net_init_dflt_config(struct bnge_dev * bd)570 int bnge_net_init_dflt_config(struct bnge_dev *bd)
571 {
572 	struct bnge_hw_resc *hw_resc;
573 	int rc;
574 
575 	rc = bnge_alloc_rss_indir_tbl(bd);
576 	if (rc)
577 		return rc;
578 
579 	rc = bnge_net_init_dflt_rings(bd, true);
580 	if (rc)
581 		goto err_free_tbl;
582 
583 	hw_resc = &bd->hw_resc;
584 	bd->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
585 		       BNGE_L2_FLTR_MAX_FLTR;
586 
587 	return 0;
588 
589 err_free_tbl:
590 	kfree(bd->rss_indir_tbl);
591 	bd->rss_indir_tbl = NULL;
592 	return rc;
593 }
594 
bnge_net_uninit_dflt_config(struct bnge_dev * bd)595 void bnge_net_uninit_dflt_config(struct bnge_dev *bd)
596 {
597 	kfree(bd->rss_indir_tbl);
598 	bd->rss_indir_tbl = NULL;
599 }
600 
bnge_aux_init_dflt_config(struct bnge_dev * bd)601 void bnge_aux_init_dflt_config(struct bnge_dev *bd)
602 {
603 	bd->aux_num_msix = bnge_aux_get_dflt_msix(bd);
604 	bd->aux_num_stat_ctxs = bnge_get_dflt_aux_stat_ctxs(bd);
605 }
606