xref: /linux/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c (revision a34b0e4e21d6be3c3d620aa7f9dfbf0e9550c19e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3 
4 #include <asm/byteorder.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/dmapool.h>
7 #include <linux/delay.h>
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/list.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <net/netdev_lock.h>
14 #include <net/netdev_queues.h>
15 #include <net/netdev_rx_queue.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if.h>
18 #include <net/ip.h>
19 #include <net/netdev_queues.h>
20 #include <linux/skbuff.h>
21 #include <net/page_pool/helpers.h>
22 
23 #include "bnge.h"
24 #include "bnge_hwrm_lib.h"
25 #include "bnge_ethtool.h"
26 #include "bnge_rmem.h"
27 #include "bnge_txrx.h"
28 
29 #define BNGE_RING_TO_TC_OFF(bd, tx)	\
30 	((tx) % (bd)->tx_nr_rings_per_tc)
31 
32 #define BNGE_RING_TO_TC(bd, tx)		\
33 	((tx) / (bd)->tx_nr_rings_per_tc)
34 
35 #define BNGE_TC_TO_RING_BASE(bd, tc)	\
36 	((tc) * (bd)->tx_nr_rings_per_tc)
37 
38 static void bnge_free_stats_mem(struct bnge_net *bn,
39 				struct bnge_stats_mem *stats)
40 {
41 	struct bnge_dev *bd = bn->bd;
42 
43 	if (stats->hw_stats) {
44 		dma_free_coherent(bd->dev, stats->len, stats->hw_stats,
45 				  stats->hw_stats_map);
46 		stats->hw_stats = NULL;
47 	}
48 }
49 
50 static int bnge_alloc_stats_mem(struct bnge_net *bn,
51 				struct bnge_stats_mem *stats)
52 {
53 	struct bnge_dev *bd = bn->bd;
54 
55 	stats->hw_stats = dma_alloc_coherent(bd->dev, stats->len,
56 					     &stats->hw_stats_map, GFP_KERNEL);
57 	if (!stats->hw_stats)
58 		return -ENOMEM;
59 
60 	return 0;
61 }
62 
63 static void bnge_free_ring_stats(struct bnge_net *bn)
64 {
65 	struct bnge_dev *bd = bn->bd;
66 	int i;
67 
68 	if (!bn->bnapi)
69 		return;
70 
71 	for (i = 0; i < bd->nq_nr_rings; i++) {
72 		struct bnge_napi *bnapi = bn->bnapi[i];
73 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
74 
75 		bnge_free_stats_mem(bn, &nqr->stats);
76 	}
77 }
78 
79 static int bnge_alloc_ring_stats(struct bnge_net *bn)
80 {
81 	struct bnge_dev *bd = bn->bd;
82 	u32 size, i;
83 	int rc;
84 
85 	size = bd->hw_ring_stats_size;
86 
87 	for (i = 0; i < bd->nq_nr_rings; i++) {
88 		struct bnge_napi *bnapi = bn->bnapi[i];
89 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
90 
91 		nqr->stats.len = size;
92 		rc = bnge_alloc_stats_mem(bn, &nqr->stats);
93 		if (rc)
94 			goto err_free_ring_stats;
95 
96 		nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
97 	}
98 	return 0;
99 
100 err_free_ring_stats:
101 	bnge_free_ring_stats(bn);
102 	return rc;
103 }
104 
105 static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
106 {
107 	struct bnge_ring_struct *ring = &nqr->ring_struct;
108 
109 	kfree(nqr->desc_ring);
110 	nqr->desc_ring = NULL;
111 	ring->ring_mem.pg_arr = NULL;
112 	kfree(nqr->desc_mapping);
113 	nqr->desc_mapping = NULL;
114 	ring->ring_mem.dma_arr = NULL;
115 }
116 
117 static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
118 {
119 	struct bnge_ring_struct *ring = &cpr->ring_struct;
120 
121 	kfree(cpr->desc_ring);
122 	cpr->desc_ring = NULL;
123 	ring->ring_mem.pg_arr = NULL;
124 	kfree(cpr->desc_mapping);
125 	cpr->desc_mapping = NULL;
126 	ring->ring_mem.dma_arr = NULL;
127 }
128 
129 static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
130 {
131 	nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL);
132 	if (!nqr->desc_ring)
133 		return -ENOMEM;
134 
135 	nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL);
136 	if (!nqr->desc_mapping)
137 		goto err_free_desc_ring;
138 	return 0;
139 
140 err_free_desc_ring:
141 	kfree(nqr->desc_ring);
142 	nqr->desc_ring = NULL;
143 	return -ENOMEM;
144 }
145 
146 static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
147 {
148 	cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL);
149 	if (!cpr->desc_ring)
150 		return -ENOMEM;
151 
152 	cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL);
153 	if (!cpr->desc_mapping)
154 		goto err_free_desc_ring;
155 	return 0;
156 
157 err_free_desc_ring:
158 	kfree(cpr->desc_ring);
159 	cpr->desc_ring = NULL;
160 	return -ENOMEM;
161 }
162 
163 static void bnge_free_nq_arrays(struct bnge_net *bn)
164 {
165 	struct bnge_dev *bd = bn->bd;
166 	int i;
167 
168 	for (i = 0; i < bd->nq_nr_rings; i++) {
169 		struct bnge_napi *bnapi = bn->bnapi[i];
170 
171 		bnge_free_nq_desc_arr(&bnapi->nq_ring);
172 	}
173 }
174 
175 static int bnge_alloc_nq_arrays(struct bnge_net *bn)
176 {
177 	struct bnge_dev *bd = bn->bd;
178 	int i, rc;
179 
180 	for (i = 0; i < bd->nq_nr_rings; i++) {
181 		struct bnge_napi *bnapi = bn->bnapi[i];
182 
183 		rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
184 		if (rc)
185 			goto err_free_nq_arrays;
186 	}
187 	return 0;
188 
189 err_free_nq_arrays:
190 	bnge_free_nq_arrays(bn);
191 	return rc;
192 }
193 
194 static void bnge_free_nq_tree(struct bnge_net *bn)
195 {
196 	struct bnge_dev *bd = bn->bd;
197 	int i;
198 
199 	for (i = 0; i < bd->nq_nr_rings; i++) {
200 		struct bnge_napi *bnapi = bn->bnapi[i];
201 		struct bnge_nq_ring_info *nqr;
202 		struct bnge_ring_struct *ring;
203 		int j;
204 
205 		nqr = &bnapi->nq_ring;
206 		ring = &nqr->ring_struct;
207 
208 		bnge_free_ring(bd, &ring->ring_mem);
209 
210 		if (!nqr->cp_ring_arr)
211 			continue;
212 
213 		for (j = 0; j < nqr->cp_ring_count; j++) {
214 			struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
215 
216 			ring = &cpr->ring_struct;
217 			bnge_free_ring(bd, &ring->ring_mem);
218 			bnge_free_cp_desc_arr(cpr);
219 		}
220 		kfree(nqr->cp_ring_arr);
221 		nqr->cp_ring_arr = NULL;
222 		nqr->cp_ring_count = 0;
223 	}
224 }
225 
226 static int alloc_one_cp_ring(struct bnge_net *bn,
227 			     struct bnge_cp_ring_info *cpr)
228 {
229 	struct bnge_ring_mem_info *rmem;
230 	struct bnge_ring_struct *ring;
231 	struct bnge_dev *bd = bn->bd;
232 	int rc;
233 
234 	rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
235 	if (rc)
236 		return -ENOMEM;
237 	ring = &cpr->ring_struct;
238 	rmem = &ring->ring_mem;
239 	rmem->nr_pages = bn->cp_nr_pages;
240 	rmem->page_size = HW_CMPD_RING_SIZE;
241 	rmem->pg_arr = (void **)cpr->desc_ring;
242 	rmem->dma_arr = cpr->desc_mapping;
243 	rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
244 	rc = bnge_alloc_ring(bd, rmem);
245 	if (rc)
246 		goto err_free_cp_desc_arr;
247 	return rc;
248 
249 err_free_cp_desc_arr:
250 	bnge_free_cp_desc_arr(cpr);
251 	return rc;
252 }
253 
254 static int bnge_alloc_nq_tree(struct bnge_net *bn)
255 {
256 	struct bnge_dev *bd = bn->bd;
257 	int i, j, ulp_msix, rc;
258 	int tcs = 1;
259 
260 	ulp_msix = bnge_aux_get_msix(bd);
261 	for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
262 		bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
263 		struct bnge_napi *bnapi = bn->bnapi[i];
264 		struct bnge_nq_ring_info *nqr;
265 		struct bnge_cp_ring_info *cpr;
266 		struct bnge_ring_struct *ring;
267 		int cp_count = 0, k;
268 		int rx = 0, tx = 0;
269 
270 		nqr = &bnapi->nq_ring;
271 		nqr->bnapi = bnapi;
272 		ring = &nqr->ring_struct;
273 
274 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
275 		if (rc)
276 			goto err_free_nq_tree;
277 
278 		ring->map_idx = ulp_msix + i;
279 
280 		if (i < bd->rx_nr_rings) {
281 			cp_count++;
282 			rx = 1;
283 		}
284 
285 		if ((sh && i < bd->tx_nr_rings) ||
286 		    (!sh && i >= bd->rx_nr_rings)) {
287 			cp_count += tcs;
288 			tx = 1;
289 		}
290 
291 		nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
292 					   GFP_KERNEL);
293 		if (!nqr->cp_ring_arr) {
294 			rc = -ENOMEM;
295 			goto err_free_nq_tree;
296 		}
297 
298 		nqr->cp_ring_count = cp_count;
299 
300 		for (k = 0; k < cp_count; k++) {
301 			cpr = &nqr->cp_ring_arr[k];
302 			rc = alloc_one_cp_ring(bn, cpr);
303 			if (rc)
304 				goto err_free_nq_tree;
305 
306 			cpr->bnapi = bnapi;
307 			cpr->cp_idx = k;
308 			if (!k && rx) {
309 				bn->rx_ring[i].rx_cpr = cpr;
310 				cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
311 			} else {
312 				int n, tc = k - rx;
313 
314 				n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
315 				bn->tx_ring[n].tx_cpr = cpr;
316 				cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
317 			}
318 		}
319 		if (tx)
320 			j++;
321 	}
322 	return 0;
323 
324 err_free_nq_tree:
325 	bnge_free_nq_tree(bn);
326 	return rc;
327 }
328 
329 static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
330 {
331 	return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
332 }
333 
334 static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn,
335 				       struct bnge_rx_ring_info *rxr)
336 {
337 	int i, max_idx;
338 
339 	if (!rxr->rx_buf_ring)
340 		return;
341 
342 	max_idx = bn->rx_nr_pages * RX_DESC_CNT;
343 
344 	for (i = 0; i < max_idx; i++) {
345 		struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
346 		void *data = rx_buf->data;
347 
348 		if (!data)
349 			continue;
350 
351 		rx_buf->data = NULL;
352 		page_pool_free_va(rxr->head_pool, data, true);
353 	}
354 }
355 
356 static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
357 					struct bnge_rx_ring_info *rxr)
358 {
359 	int i, max_idx;
360 
361 	if (!rxr->rx_agg_buf_ring)
362 		return;
363 
364 	max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT;
365 
366 	for (i = 0; i < max_idx; i++) {
367 		struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i];
368 		netmem_ref netmem = rx_agg_buf->netmem;
369 
370 		if (!netmem)
371 			continue;
372 
373 		rx_agg_buf->netmem = 0;
374 		__clear_bit(i, rxr->rx_agg_bmap);
375 
376 		page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
377 	}
378 }
379 
380 static void bnge_free_one_tpa_info_data(struct bnge_net *bn,
381 					struct bnge_rx_ring_info *rxr)
382 {
383 	int i;
384 
385 	for (i = 0; i < bn->max_tpa; i++) {
386 		struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[i];
387 		u8 *data = tpa_info->data;
388 
389 		if (!data)
390 			continue;
391 
392 		tpa_info->data = NULL;
393 		page_pool_free_va(rxr->head_pool, data, false);
394 	}
395 }
396 
397 static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
398 					    struct bnge_rx_ring_info *rxr)
399 {
400 	struct bnge_tpa_idx_map *map;
401 
402 	if (rxr->rx_tpa)
403 		bnge_free_one_tpa_info_data(bn, rxr);
404 
405 	bnge_free_one_rx_ring_bufs(bn, rxr);
406 	bnge_free_one_agg_ring_bufs(bn, rxr);
407 
408 	map = rxr->rx_tpa_idx_map;
409 	if (map)
410 		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
411 }
412 
413 static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
414 {
415 	struct bnge_dev *bd = bn->bd;
416 	int i;
417 
418 	if (!bn->rx_ring)
419 		return;
420 
421 	for (i = 0; i < bd->rx_nr_rings; i++)
422 		bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
423 }
424 
425 static void bnge_free_tx_skbs(struct bnge_net *bn)
426 {
427 	struct bnge_dev *bd = bn->bd;
428 	u16 max_idx;
429 	int i;
430 
431 	max_idx = bn->tx_nr_pages * TX_DESC_CNT;
432 	for (i = 0; i < bd->tx_nr_rings; i++) {
433 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
434 		int j;
435 
436 		if (!txr->tx_buf_ring)
437 			continue;
438 
439 		for (j = 0; j < max_idx;) {
440 			struct bnge_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
441 			struct sk_buff *skb;
442 			int k, last;
443 
444 			skb = tx_buf->skb;
445 			if (!skb) {
446 				j++;
447 				continue;
448 			}
449 
450 			tx_buf->skb = NULL;
451 
452 			dma_unmap_single(bd->dev,
453 					 dma_unmap_addr(tx_buf, mapping),
454 					 skb_headlen(skb),
455 					 DMA_TO_DEVICE);
456 
457 			last = tx_buf->nr_frags;
458 			j += 2;
459 			for (k = 0; k < last; k++, j++) {
460 				int ring_idx = j & bn->tx_ring_mask;
461 				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
462 
463 				tx_buf = &txr->tx_buf_ring[ring_idx];
464 				dma_unmap_page(bd->dev,
465 					       dma_unmap_addr(tx_buf, mapping),
466 					       skb_frag_size(frag),
467 					       DMA_TO_DEVICE);
468 			}
469 			dev_kfree_skb(skb);
470 		}
471 		netdev_tx_reset_queue(netdev_get_tx_queue(bd->netdev, i));
472 	}
473 }
474 
475 static void bnge_free_all_rings_bufs(struct bnge_net *bn)
476 {
477 	bnge_free_rx_ring_pair_bufs(bn);
478 	bnge_free_tx_skbs(bn);
479 }
480 
481 static void bnge_free_tpa_info(struct bnge_net *bn)
482 {
483 	struct bnge_dev *bd = bn->bd;
484 	int i, j;
485 
486 	for (i = 0; i < bd->rx_nr_rings; i++) {
487 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
488 
489 		kfree(rxr->rx_tpa_idx_map);
490 		rxr->rx_tpa_idx_map = NULL;
491 		if (rxr->rx_tpa) {
492 			for (j = 0; j < bn->max_tpa; j++) {
493 				kfree(rxr->rx_tpa[j].agg_arr);
494 				rxr->rx_tpa[j].agg_arr = NULL;
495 			}
496 		}
497 		kfree(rxr->rx_tpa);
498 		rxr->rx_tpa = NULL;
499 	}
500 }
501 
502 static int bnge_alloc_tpa_info(struct bnge_net *bn)
503 {
504 	struct bnge_dev *bd = bn->bd;
505 	int i, j;
506 
507 	if (!bd->max_tpa_v2)
508 		return 0;
509 
510 	bn->max_tpa = max_t(u16, bd->max_tpa_v2, MAX_TPA);
511 	for (i = 0; i < bd->rx_nr_rings; i++) {
512 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
513 
514 		rxr->rx_tpa = kcalloc(bn->max_tpa, sizeof(struct bnge_tpa_info),
515 				      GFP_KERNEL);
516 		if (!rxr->rx_tpa)
517 			goto err_free_tpa_info;
518 
519 		for (j = 0; j < bn->max_tpa; j++) {
520 			struct rx_agg_cmp *agg;
521 
522 			agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
523 			if (!agg)
524 				goto err_free_tpa_info;
525 			rxr->rx_tpa[j].agg_arr = agg;
526 		}
527 		rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
528 					      GFP_KERNEL);
529 		if (!rxr->rx_tpa_idx_map)
530 			goto err_free_tpa_info;
531 	}
532 	return 0;
533 
534 err_free_tpa_info:
535 	bnge_free_tpa_info(bn);
536 	return -ENOMEM;
537 }
538 
539 static void bnge_free_rx_rings(struct bnge_net *bn)
540 {
541 	struct bnge_dev *bd = bn->bd;
542 	int i;
543 
544 	bnge_free_tpa_info(bn);
545 	for (i = 0; i < bd->rx_nr_rings; i++) {
546 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
547 		struct bnge_ring_struct *ring;
548 
549 		page_pool_destroy(rxr->page_pool);
550 		page_pool_destroy(rxr->head_pool);
551 		rxr->page_pool = rxr->head_pool = NULL;
552 
553 		kfree(rxr->rx_agg_bmap);
554 		rxr->rx_agg_bmap = NULL;
555 
556 		ring = &rxr->rx_ring_struct;
557 		bnge_free_ring(bd, &ring->ring_mem);
558 
559 		ring = &rxr->rx_agg_ring_struct;
560 		bnge_free_ring(bd, &ring->ring_mem);
561 	}
562 }
563 
564 static int bnge_alloc_rx_page_pool(struct bnge_net *bn,
565 				   struct bnge_rx_ring_info *rxr,
566 				   int numa_node)
567 {
568 	const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE;
569 	const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
570 	struct page_pool_params pp = { 0 };
571 	struct bnge_dev *bd = bn->bd;
572 	struct page_pool *pool;
573 
574 	pp.pool_size = bn->rx_agg_ring_size / agg_size_fac;
575 	pp.nid = numa_node;
576 	pp.netdev = bn->netdev;
577 	pp.dev = bd->dev;
578 	pp.dma_dir = bn->rx_dir;
579 	pp.max_len = PAGE_SIZE;
580 	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
581 		   PP_FLAG_ALLOW_UNREADABLE_NETMEM;
582 	pp.queue_idx = rxr->bnapi->index;
583 
584 	pool = page_pool_create(&pp);
585 	if (IS_ERR(pool))
586 		return PTR_ERR(pool);
587 	rxr->page_pool = pool;
588 
589 	rxr->need_head_pool = page_pool_is_unreadable(pool);
590 	if (bnge_separate_head_pool(rxr)) {
591 		pp.pool_size = min(bn->rx_ring_size / rx_size_fac, 1024);
592 		pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
593 		pool = page_pool_create(&pp);
594 		if (IS_ERR(pool))
595 			goto err_destroy_pp;
596 	} else {
597 		page_pool_get(pool);
598 	}
599 	rxr->head_pool = pool;
600 	return 0;
601 
602 err_destroy_pp:
603 	page_pool_destroy(rxr->page_pool);
604 	rxr->page_pool = NULL;
605 	return PTR_ERR(pool);
606 }
607 
608 static void bnge_enable_rx_page_pool(struct bnge_rx_ring_info *rxr)
609 {
610 	page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
611 	page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
612 }
613 
614 static int bnge_alloc_rx_agg_bmap(struct bnge_net *bn,
615 				  struct bnge_rx_ring_info *rxr)
616 {
617 	u16 mem_size;
618 
619 	rxr->rx_agg_bmap_size = bn->rx_agg_ring_mask + 1;
620 	mem_size = rxr->rx_agg_bmap_size / 8;
621 	rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
622 	if (!rxr->rx_agg_bmap)
623 		return -ENOMEM;
624 
625 	return 0;
626 }
627 
628 static int bnge_alloc_rx_rings(struct bnge_net *bn)
629 {
630 	int i, rc = 0, agg_rings = 0, cpu;
631 	struct bnge_dev *bd = bn->bd;
632 
633 	if (bnge_is_agg_reqd(bd))
634 		agg_rings = 1;
635 
636 	for (i = 0; i < bd->rx_nr_rings; i++) {
637 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
638 		struct bnge_ring_struct *ring;
639 		int cpu_node;
640 
641 		ring = &rxr->rx_ring_struct;
642 
643 		cpu = cpumask_local_spread(i, dev_to_node(bd->dev));
644 		cpu_node = cpu_to_node(cpu);
645 		netdev_dbg(bn->netdev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
646 			   i, cpu_node);
647 		rc = bnge_alloc_rx_page_pool(bn, rxr, cpu_node);
648 		if (rc)
649 			goto err_free_rx_rings;
650 		bnge_enable_rx_page_pool(rxr);
651 
652 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
653 		if (rc)
654 			goto err_free_rx_rings;
655 
656 		ring->grp_idx = i;
657 		if (agg_rings) {
658 			ring = &rxr->rx_agg_ring_struct;
659 			rc = bnge_alloc_ring(bd, &ring->ring_mem);
660 			if (rc)
661 				goto err_free_rx_rings;
662 
663 			ring->grp_idx = i;
664 			rc = bnge_alloc_rx_agg_bmap(bn, rxr);
665 			if (rc)
666 				goto err_free_rx_rings;
667 		}
668 	}
669 
670 	if (bn->priv_flags & BNGE_NET_EN_TPA) {
671 		rc = bnge_alloc_tpa_info(bn);
672 		if (rc)
673 			goto err_free_rx_rings;
674 	}
675 	return rc;
676 
677 err_free_rx_rings:
678 	bnge_free_rx_rings(bn);
679 	return rc;
680 }
681 
682 static void bnge_free_tx_rings(struct bnge_net *bn)
683 {
684 	struct bnge_dev *bd = bn->bd;
685 	int i;
686 
687 	for (i = 0; i < bd->tx_nr_rings; i++) {
688 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
689 		struct bnge_ring_struct *ring;
690 
691 		ring = &txr->tx_ring_struct;
692 
693 		bnge_free_ring(bd, &ring->ring_mem);
694 	}
695 }
696 
697 static int bnge_alloc_tx_rings(struct bnge_net *bn)
698 {
699 	struct bnge_dev *bd = bn->bd;
700 	int i, j, rc;
701 
702 	for (i = 0, j = 0; i < bd->tx_nr_rings; i++) {
703 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
704 		struct bnge_ring_struct *ring;
705 		u8 qidx;
706 
707 		ring = &txr->tx_ring_struct;
708 
709 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
710 		if (rc)
711 			goto err_free_tx_rings;
712 
713 		ring->grp_idx = txr->bnapi->index;
714 		qidx = bd->tc_to_qidx[j];
715 		ring->queue_id = bd->q_info[qidx].queue_id;
716 		if (BNGE_RING_TO_TC_OFF(bd, i) == (bd->tx_nr_rings_per_tc - 1))
717 			j++;
718 	}
719 	return 0;
720 
721 err_free_tx_rings:
722 	bnge_free_tx_rings(bn);
723 	return rc;
724 }
725 
726 static void bnge_free_vnic_attributes(struct bnge_net *bn)
727 {
728 	struct pci_dev *pdev = bn->bd->pdev;
729 	struct bnge_vnic_info *vnic;
730 	int i;
731 
732 	if (!bn->vnic_info)
733 		return;
734 
735 	for (i = 0; i < bn->nr_vnics; i++) {
736 		vnic = &bn->vnic_info[i];
737 
738 		kfree(vnic->uc_list);
739 		vnic->uc_list = NULL;
740 
741 		if (vnic->mc_list) {
742 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
743 					  vnic->mc_list, vnic->mc_list_mapping);
744 			vnic->mc_list = NULL;
745 		}
746 
747 		if (vnic->rss_table) {
748 			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
749 					  vnic->rss_table,
750 					  vnic->rss_table_dma_addr);
751 			vnic->rss_table = NULL;
752 		}
753 
754 		vnic->rss_hash_key = NULL;
755 		vnic->flags = 0;
756 	}
757 }
758 
759 static int bnge_alloc_vnic_attributes(struct bnge_net *bn)
760 {
761 	struct bnge_dev *bd = bn->bd;
762 	struct bnge_vnic_info *vnic;
763 	int i, size;
764 
765 	for (i = 0; i < bn->nr_vnics; i++) {
766 		vnic = &bn->vnic_info[i];
767 
768 		if (vnic->flags & BNGE_VNIC_UCAST_FLAG) {
769 			int mem_size = (BNGE_MAX_UC_ADDRS - 1) * ETH_ALEN;
770 
771 			vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
772 			if (!vnic->uc_list)
773 				goto err_free_vnic_attributes;
774 		}
775 
776 		if (vnic->flags & BNGE_VNIC_MCAST_FLAG) {
777 			vnic->mc_list_size = BNGE_MAX_MC_ADDRS * ETH_ALEN;
778 			vnic->mc_list =
779 				dma_alloc_coherent(bd->dev,
780 						   vnic->mc_list_size,
781 						   &vnic->mc_list_mapping,
782 						   GFP_KERNEL);
783 			if (!vnic->mc_list)
784 				goto err_free_vnic_attributes;
785 		}
786 
787 		/* Allocate rss table and hash key */
788 		size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE);
789 
790 		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
791 		vnic->rss_table = dma_alloc_coherent(bd->dev,
792 						     vnic->rss_table_size,
793 						     &vnic->rss_table_dma_addr,
794 						     GFP_KERNEL);
795 		if (!vnic->rss_table)
796 			goto err_free_vnic_attributes;
797 
798 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
799 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
800 	}
801 	return 0;
802 
803 err_free_vnic_attributes:
804 	bnge_free_vnic_attributes(bn);
805 	return -ENOMEM;
806 }
807 
808 static int bnge_alloc_vnics(struct bnge_net *bn)
809 {
810 	int num_vnics;
811 
812 	/* Allocate only 1 VNIC for now
813 	 * Additional VNICs will be added based on RFS/NTUPLE in future patches
814 	 */
815 	num_vnics = 1;
816 
817 	bn->vnic_info = kcalloc(num_vnics, sizeof(struct bnge_vnic_info),
818 				GFP_KERNEL);
819 	if (!bn->vnic_info)
820 		return -ENOMEM;
821 
822 	bn->nr_vnics = num_vnics;
823 
824 	return 0;
825 }
826 
827 static void bnge_free_vnics(struct bnge_net *bn)
828 {
829 	kfree(bn->vnic_info);
830 	bn->vnic_info = NULL;
831 	bn->nr_vnics = 0;
832 }
833 
834 static void bnge_free_ring_grps(struct bnge_net *bn)
835 {
836 	kfree(bn->grp_info);
837 	bn->grp_info = NULL;
838 }
839 
840 static int bnge_init_ring_grps(struct bnge_net *bn)
841 {
842 	struct bnge_dev *bd = bn->bd;
843 	int i;
844 
845 	bn->grp_info = kcalloc(bd->nq_nr_rings,
846 			       sizeof(struct bnge_ring_grp_info),
847 			       GFP_KERNEL);
848 	if (!bn->grp_info)
849 		return -ENOMEM;
850 	for (i = 0; i < bd->nq_nr_rings; i++) {
851 		bn->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
852 		bn->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
853 		bn->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
854 		bn->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
855 		bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
856 	}
857 
858 	return 0;
859 }
860 
861 static void bnge_free_core(struct bnge_net *bn)
862 {
863 	bnge_free_vnic_attributes(bn);
864 	bnge_free_tx_rings(bn);
865 	bnge_free_rx_rings(bn);
866 	bnge_free_nq_tree(bn);
867 	bnge_free_nq_arrays(bn);
868 	bnge_free_ring_stats(bn);
869 	bnge_free_ring_grps(bn);
870 	bnge_free_vnics(bn);
871 	kfree(bn->tx_ring_map);
872 	bn->tx_ring_map = NULL;
873 	kfree(bn->tx_ring);
874 	bn->tx_ring = NULL;
875 	kfree(bn->rx_ring);
876 	bn->rx_ring = NULL;
877 	kfree(bn->bnapi);
878 	bn->bnapi = NULL;
879 }
880 
881 static int bnge_alloc_core(struct bnge_net *bn)
882 {
883 	struct bnge_dev *bd = bn->bd;
884 	int i, j, size, arr_size;
885 	int rc = -ENOMEM;
886 	void *bnapi;
887 
888 	arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) *
889 			bd->nq_nr_rings);
890 	size = L1_CACHE_ALIGN(sizeof(struct bnge_napi));
891 	bnapi = kzalloc(arr_size + size * bd->nq_nr_rings, GFP_KERNEL);
892 	if (!bnapi)
893 		return rc;
894 
895 	bn->bnapi = bnapi;
896 	bnapi += arr_size;
897 	for (i = 0; i < bd->nq_nr_rings; i++, bnapi += size) {
898 		struct bnge_nq_ring_info *nqr;
899 
900 		bn->bnapi[i] = bnapi;
901 		bn->bnapi[i]->index = i;
902 		bn->bnapi[i]->bn = bn;
903 		nqr = &bn->bnapi[i]->nq_ring;
904 		nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
905 	}
906 
907 	bn->rx_ring = kcalloc(bd->rx_nr_rings,
908 			      sizeof(struct bnge_rx_ring_info),
909 			      GFP_KERNEL);
910 	if (!bn->rx_ring)
911 		goto err_free_core;
912 
913 	for (i = 0; i < bd->rx_nr_rings; i++) {
914 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
915 
916 		rxr->rx_ring_struct.ring_mem.flags =
917 			BNGE_RMEM_RING_PTE_FLAG;
918 		rxr->rx_agg_ring_struct.ring_mem.flags =
919 			BNGE_RMEM_RING_PTE_FLAG;
920 		rxr->bnapi = bn->bnapi[i];
921 		bn->bnapi[i]->rx_ring = &bn->rx_ring[i];
922 	}
923 
924 	bn->tx_ring = kcalloc(bd->tx_nr_rings,
925 			      sizeof(struct bnge_tx_ring_info),
926 			      GFP_KERNEL);
927 	if (!bn->tx_ring)
928 		goto err_free_core;
929 
930 	bn->tx_ring_map = kcalloc(bd->tx_nr_rings, sizeof(u16),
931 				  GFP_KERNEL);
932 	if (!bn->tx_ring_map)
933 		goto err_free_core;
934 
935 	if (bd->flags & BNGE_EN_SHARED_CHNL)
936 		j = 0;
937 	else
938 		j = bd->rx_nr_rings;
939 
940 	for (i = 0; i < bd->tx_nr_rings; i++) {
941 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
942 		struct bnge_napi *bnapi2;
943 		int k;
944 
945 		txr->tx_ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
946 		bn->tx_ring_map[i] = i;
947 		k = j + BNGE_RING_TO_TC_OFF(bd, i);
948 
949 		bnapi2 = bn->bnapi[k];
950 		txr->txq_index = i;
951 		txr->tx_napi_idx =
952 			BNGE_RING_TO_TC(bd, txr->txq_index);
953 		bnapi2->tx_ring[txr->tx_napi_idx] = txr;
954 		txr->bnapi = bnapi2;
955 	}
956 
957 	rc = bnge_alloc_ring_stats(bn);
958 	if (rc)
959 		goto err_free_core;
960 
961 	rc = bnge_alloc_vnics(bn);
962 	if (rc)
963 		goto err_free_core;
964 
965 	rc = bnge_alloc_nq_arrays(bn);
966 	if (rc)
967 		goto err_free_core;
968 
969 	bnge_init_ring_struct(bn);
970 
971 	rc = bnge_alloc_rx_rings(bn);
972 	if (rc)
973 		goto err_free_core;
974 
975 	rc = bnge_alloc_tx_rings(bn);
976 	if (rc)
977 		goto err_free_core;
978 
979 	rc = bnge_alloc_nq_tree(bn);
980 	if (rc)
981 		goto err_free_core;
982 
983 	bn->vnic_info[BNGE_VNIC_DEFAULT].flags |= BNGE_VNIC_RSS_FLAG |
984 						  BNGE_VNIC_MCAST_FLAG |
985 						  BNGE_VNIC_UCAST_FLAG;
986 	rc = bnge_alloc_vnic_attributes(bn);
987 	if (rc)
988 		goto err_free_core;
989 	return 0;
990 
991 err_free_core:
992 	bnge_free_core(bn);
993 	return rc;
994 }
995 
996 u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr)
997 {
998 	return rxr->rx_cpr->ring_struct.fw_ring_id;
999 }
1000 
1001 u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
1002 {
1003 	return txr->tx_cpr->ring_struct.fw_ring_id;
1004 }
1005 
1006 static void bnge_db_nq_arm(struct bnge_net *bn,
1007 			   struct bnge_db_info *db, u32 idx)
1008 {
1009 	bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_ARM |
1010 		    DB_RING_IDX(db, idx), db->doorbell);
1011 }
1012 
1013 static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
1014 {
1015 	bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
1016 		    DB_RING_IDX(db, idx), db->doorbell);
1017 }
1018 
1019 static void bnge_db_cq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
1020 {
1021 	bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_CQ_ARMALL |
1022 		    DB_RING_IDX(db, idx), db->doorbell);
1023 }
1024 
1025 static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
1026 {
1027 	struct bnge_napi *bnapi = bn->bnapi[n];
1028 	struct bnge_nq_ring_info *nqr;
1029 
1030 	nqr = &bnapi->nq_ring;
1031 
1032 	return nqr->ring_struct.map_idx;
1033 }
1034 
1035 static void bnge_init_nq_tree(struct bnge_net *bn)
1036 {
1037 	struct bnge_dev *bd = bn->bd;
1038 	int i, j;
1039 
1040 	for (i = 0; i < bd->nq_nr_rings; i++) {
1041 		struct bnge_nq_ring_info *nqr = &bn->bnapi[i]->nq_ring;
1042 		struct bnge_ring_struct *ring = &nqr->ring_struct;
1043 
1044 		ring->fw_ring_id = INVALID_HW_RING_ID;
1045 		for (j = 0; j < nqr->cp_ring_count; j++) {
1046 			struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
1047 
1048 			ring = &cpr->ring_struct;
1049 			ring->fw_ring_id = INVALID_HW_RING_ID;
1050 		}
1051 	}
1052 }
1053 
1054 static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
1055 					 dma_addr_t *mapping,
1056 					 struct bnge_rx_ring_info *rxr,
1057 					 unsigned int *offset,
1058 					 gfp_t gfp)
1059 {
1060 	netmem_ref netmem;
1061 
1062 	if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
1063 		netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
1064 						     BNGE_RX_PAGE_SIZE, gfp);
1065 	} else {
1066 		netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
1067 		*offset = 0;
1068 	}
1069 	if (!netmem)
1070 		return 0;
1071 
1072 	*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
1073 	return netmem;
1074 }
1075 
1076 u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
1077 			 struct bnge_rx_ring_info *rxr,
1078 			 gfp_t gfp)
1079 {
1080 	unsigned int offset;
1081 	struct page *page;
1082 
1083 	page = page_pool_alloc_frag(rxr->head_pool, &offset,
1084 				    bn->rx_buf_size, gfp);
1085 	if (!page)
1086 		return NULL;
1087 
1088 	*mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
1089 	return page_address(page) + offset;
1090 }
1091 
1092 int bnge_alloc_rx_data(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
1093 		       u16 prod, gfp_t gfp)
1094 {
1095 	struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
1096 	struct rx_bd *rxbd;
1097 	dma_addr_t mapping;
1098 	u8 *data;
1099 
1100 	rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
1101 	data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
1102 	if (!data)
1103 		return -ENOMEM;
1104 
1105 	rx_buf->data = data;
1106 	rx_buf->data_ptr = data + bn->rx_offset;
1107 	rx_buf->mapping = mapping;
1108 
1109 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1110 
1111 	return 0;
1112 }
1113 
1114 static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
1115 				       struct bnge_rx_ring_info *rxr,
1116 				       int ring_nr)
1117 {
1118 	u32 prod = rxr->rx_prod;
1119 	int i, rc = 0;
1120 
1121 	for (i = 0; i < bn->rx_ring_size; i++) {
1122 		rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL);
1123 		if (rc)
1124 			break;
1125 		prod = NEXT_RX(prod);
1126 	}
1127 
1128 	/* Abort if not a single buffer can be allocated */
1129 	if (rc && !i) {
1130 		netdev_err(bn->netdev,
1131 			   "RX ring %d: allocated %d/%d buffers, abort\n",
1132 			   ring_nr, i, bn->rx_ring_size);
1133 		return rc;
1134 	}
1135 
1136 	rxr->rx_prod = prod;
1137 
1138 	if (i < bn->rx_ring_size)
1139 		netdev_warn(bn->netdev,
1140 			    "RX ring %d: allocated %d/%d buffers, continuing\n",
1141 			    ring_nr, i, bn->rx_ring_size);
1142 	return 0;
1143 }
1144 
1145 u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
1146 {
1147 	u16 next, max = rxr->rx_agg_bmap_size;
1148 
1149 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1150 	if (next >= max)
1151 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1152 	return next;
1153 }
1154 
1155 int bnge_alloc_rx_netmem(struct bnge_net *bn,
1156 			 struct bnge_rx_ring_info *rxr,
1157 			 u16 prod, gfp_t gfp)
1158 {
1159 	struct bnge_sw_rx_agg_bd *rx_agg_buf;
1160 	u16 sw_prod = rxr->rx_sw_agg_prod;
1161 	unsigned int offset = 0;
1162 	struct rx_bd *rxbd;
1163 	dma_addr_t mapping;
1164 	netmem_ref netmem;
1165 
1166 	rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)];
1167 	netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
1168 	if (!netmem)
1169 		return -ENOMEM;
1170 
1171 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1172 		sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
1173 
1174 	__set_bit(sw_prod, rxr->rx_agg_bmap);
1175 	rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod];
1176 	rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
1177 
1178 	rx_agg_buf->netmem = netmem;
1179 	rx_agg_buf->offset = offset;
1180 	rx_agg_buf->mapping = mapping;
1181 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1182 	rxbd->rx_bd_opaque = sw_prod;
1183 	return 0;
1184 }
1185 
1186 static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn,
1187 					struct bnge_rx_ring_info *rxr,
1188 					int ring_nr)
1189 {
1190 	u32 prod = rxr->rx_agg_prod;
1191 	int i, rc = 0;
1192 
1193 	for (i = 0; i < bn->rx_agg_ring_size; i++) {
1194 		rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL);
1195 		if (rc)
1196 			break;
1197 		prod = NEXT_RX_AGG(prod);
1198 	}
1199 
1200 	if (rc && i < MAX_SKB_FRAGS) {
1201 		netdev_err(bn->netdev,
1202 			   "Agg ring %d: allocated %d/%d buffers (min %d), abort\n",
1203 			   ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS);
1204 		goto err_free_one_agg_ring_bufs;
1205 	}
1206 
1207 	rxr->rx_agg_prod = prod;
1208 
1209 	if (i < bn->rx_agg_ring_size)
1210 		netdev_warn(bn->netdev,
1211 			    "Agg ring %d: allocated %d/%d buffers, continuing\n",
1212 			    ring_nr, i, bn->rx_agg_ring_size);
1213 	return 0;
1214 
1215 err_free_one_agg_ring_bufs:
1216 	bnge_free_one_agg_ring_bufs(bn, rxr);
1217 	return -ENOMEM;
1218 }
1219 
1220 static int bnge_alloc_one_tpa_info_data(struct bnge_net *bn,
1221 					struct bnge_rx_ring_info *rxr)
1222 {
1223 	dma_addr_t mapping;
1224 	u8 *data;
1225 	int i;
1226 
1227 	for (i = 0; i < bn->max_tpa; i++) {
1228 		data = __bnge_alloc_rx_frag(bn, &mapping, rxr,
1229 					    GFP_KERNEL);
1230 		if (!data)
1231 			goto err_free_tpa_info_data;
1232 
1233 		rxr->rx_tpa[i].data = data;
1234 		rxr->rx_tpa[i].data_ptr = data + bn->rx_offset;
1235 		rxr->rx_tpa[i].mapping = mapping;
1236 	}
1237 	return 0;
1238 
1239 err_free_tpa_info_data:
1240 	bnge_free_one_tpa_info_data(bn, rxr);
1241 	return -ENOMEM;
1242 }
1243 
1244 static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
1245 {
1246 	struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
1247 	int rc;
1248 
1249 	rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr);
1250 	if (rc)
1251 		return rc;
1252 
1253 	if (bnge_is_agg_reqd(bn->bd)) {
1254 		rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr);
1255 		if (rc)
1256 			goto err_free_one_rx_ring_bufs;
1257 	}
1258 
1259 	if (rxr->rx_tpa) {
1260 		rc = bnge_alloc_one_tpa_info_data(bn, rxr);
1261 		if (rc)
1262 			goto err_free_one_agg_ring_bufs;
1263 	}
1264 
1265 	return 0;
1266 
1267 err_free_one_agg_ring_bufs:
1268 	bnge_free_one_agg_ring_bufs(bn, rxr);
1269 err_free_one_rx_ring_bufs:
1270 	bnge_free_one_rx_ring_bufs(bn, rxr);
1271 	return rc;
1272 }
1273 
1274 static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type)
1275 {
1276 	struct rx_bd **rx_desc_ring;
1277 	u32 prod;
1278 	int i;
1279 
1280 	rx_desc_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
1281 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
1282 		struct rx_bd *rxbd = rx_desc_ring[i];
1283 		int j;
1284 
1285 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
1286 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
1287 			rxbd->rx_bd_opaque = prod;
1288 		}
1289 	}
1290 }
1291 
1292 static void bnge_init_one_rx_ring_rxbd(struct bnge_net *bn,
1293 				       struct bnge_rx_ring_info *rxr)
1294 {
1295 	struct bnge_ring_struct *ring;
1296 	u32 type;
1297 
1298 	type = (bn->rx_buf_use_size << RX_BD_LEN_SHIFT) |
1299 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
1300 
1301 	if (NET_IP_ALIGN == 2)
1302 		type |= RX_BD_FLAGS_SOP;
1303 
1304 	ring = &rxr->rx_ring_struct;
1305 	bnge_init_rxbd_pages(ring, type);
1306 	ring->fw_ring_id = INVALID_HW_RING_ID;
1307 }
1308 
1309 static void bnge_init_one_agg_ring_rxbd(struct bnge_net *bn,
1310 					struct bnge_rx_ring_info *rxr)
1311 {
1312 	struct bnge_ring_struct *ring;
1313 	u32 type;
1314 
1315 	ring = &rxr->rx_agg_ring_struct;
1316 	ring->fw_ring_id = INVALID_HW_RING_ID;
1317 	if (bnge_is_agg_reqd(bn->bd)) {
1318 		type = ((u32)BNGE_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
1319 			RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
1320 
1321 		bnge_init_rxbd_pages(ring, type);
1322 	}
1323 }
1324 
1325 static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr)
1326 {
1327 	struct bnge_rx_ring_info *rxr;
1328 
1329 	rxr = &bn->rx_ring[ring_nr];
1330 	bnge_init_one_rx_ring_rxbd(bn, rxr);
1331 
1332 	netif_queue_set_napi(bn->netdev, ring_nr, NETDEV_QUEUE_TYPE_RX,
1333 			     &rxr->bnapi->napi);
1334 
1335 	bnge_init_one_agg_ring_rxbd(bn, rxr);
1336 }
1337 
1338 static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn)
1339 {
1340 	int i, rc;
1341 
1342 	for (i = 0; i < bn->bd->rx_nr_rings; i++) {
1343 		rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i);
1344 		if (rc)
1345 			goto err_free_rx_ring_pair_bufs;
1346 	}
1347 	return 0;
1348 
1349 err_free_rx_ring_pair_bufs:
1350 	bnge_free_rx_ring_pair_bufs(bn);
1351 	return rc;
1352 }
1353 
1354 static void bnge_init_rx_rings(struct bnge_net *bn)
1355 {
1356 	int i;
1357 
1358 #define BNGE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
1359 #define BNGE_RX_DMA_OFFSET NET_SKB_PAD
1360 	bn->rx_offset = BNGE_RX_OFFSET;
1361 	bn->rx_dma_offset = BNGE_RX_DMA_OFFSET;
1362 
1363 	for (i = 0; i < bn->bd->rx_nr_rings; i++)
1364 		bnge_init_one_rx_ring_pair(bn, i);
1365 }
1366 
1367 static void bnge_init_tx_rings(struct bnge_net *bn)
1368 {
1369 	int i;
1370 
1371 	bn->tx_wake_thresh = max(bn->tx_ring_size / 2, BNGE_MIN_TX_DESC_CNT);
1372 
1373 	for (i = 0; i < bn->bd->tx_nr_rings; i++) {
1374 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
1375 		struct bnge_ring_struct *ring = &txr->tx_ring_struct;
1376 
1377 		ring->fw_ring_id = INVALID_HW_RING_ID;
1378 
1379 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX,
1380 				     &txr->bnapi->napi);
1381 	}
1382 }
1383 
1384 static void bnge_init_vnics(struct bnge_net *bn)
1385 {
1386 	struct bnge_vnic_info *vnic0 = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1387 	int i;
1388 
1389 	for (i = 0; i < bn->nr_vnics; i++) {
1390 		struct bnge_vnic_info *vnic = &bn->vnic_info[i];
1391 		int j;
1392 
1393 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
1394 		vnic->vnic_id = i;
1395 		for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++)
1396 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
1397 
1398 		if (bn->vnic_info[i].rss_hash_key) {
1399 			if (i == BNGE_VNIC_DEFAULT) {
1400 				u8 *key = (void *)vnic->rss_hash_key;
1401 				int k;
1402 
1403 				if (!bn->rss_hash_key_valid &&
1404 				    !bn->rss_hash_key_updated) {
1405 					get_random_bytes(bn->rss_hash_key,
1406 							 HW_HASH_KEY_SIZE);
1407 					bn->rss_hash_key_updated = true;
1408 				}
1409 
1410 				memcpy(vnic->rss_hash_key, bn->rss_hash_key,
1411 				       HW_HASH_KEY_SIZE);
1412 
1413 				if (!bn->rss_hash_key_updated)
1414 					continue;
1415 
1416 				bn->rss_hash_key_updated = false;
1417 				bn->rss_hash_key_valid = true;
1418 
1419 				bn->toeplitz_prefix = 0;
1420 				for (k = 0; k < 8; k++) {
1421 					bn->toeplitz_prefix <<= 8;
1422 					bn->toeplitz_prefix |= key[k];
1423 				}
1424 			} else {
1425 				memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
1426 				       HW_HASH_KEY_SIZE);
1427 			}
1428 		}
1429 	}
1430 }
1431 
1432 static void bnge_set_db_mask(struct bnge_net *bn, struct bnge_db_info *db,
1433 			     u32 ring_type)
1434 {
1435 	switch (ring_type) {
1436 	case HWRM_RING_ALLOC_TX:
1437 		db->db_ring_mask = bn->tx_ring_mask;
1438 		break;
1439 	case HWRM_RING_ALLOC_RX:
1440 		db->db_ring_mask = bn->rx_ring_mask;
1441 		break;
1442 	case HWRM_RING_ALLOC_AGG:
1443 		db->db_ring_mask = bn->rx_agg_ring_mask;
1444 		break;
1445 	case HWRM_RING_ALLOC_CMPL:
1446 	case HWRM_RING_ALLOC_NQ:
1447 		db->db_ring_mask = bn->cp_ring_mask;
1448 		break;
1449 	}
1450 	db->db_epoch_mask = db->db_ring_mask + 1;
1451 	db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
1452 }
1453 
1454 static void bnge_set_db(struct bnge_net *bn, struct bnge_db_info *db,
1455 			u32 ring_type, u32 map_idx, u32 xid)
1456 {
1457 	struct bnge_dev *bd = bn->bd;
1458 
1459 	switch (ring_type) {
1460 	case HWRM_RING_ALLOC_TX:
1461 		db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
1462 		break;
1463 	case HWRM_RING_ALLOC_RX:
1464 	case HWRM_RING_ALLOC_AGG:
1465 		db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
1466 		break;
1467 	case HWRM_RING_ALLOC_CMPL:
1468 		db->db_key64 = DBR_PATH_L2;
1469 		break;
1470 	case HWRM_RING_ALLOC_NQ:
1471 		db->db_key64 = DBR_PATH_L2;
1472 		break;
1473 	}
1474 	db->db_key64 |= ((u64)xid << DBR_XID_SFT) | DBR_VALID;
1475 
1476 	db->doorbell = bd->bar1 + bd->db_offset;
1477 	bnge_set_db_mask(bn, db, ring_type);
1478 }
1479 
1480 static int bnge_hwrm_cp_ring_alloc(struct bnge_net *bn,
1481 				   struct bnge_cp_ring_info *cpr)
1482 {
1483 	const u32 type = HWRM_RING_ALLOC_CMPL;
1484 	struct bnge_napi *bnapi = cpr->bnapi;
1485 	struct bnge_ring_struct *ring;
1486 	u32 map_idx = bnapi->index;
1487 	int rc;
1488 
1489 	ring = &cpr->ring_struct;
1490 	ring->handle = BNGE_SET_NQ_HDL(cpr);
1491 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1492 	if (rc)
1493 		return rc;
1494 
1495 	bnge_set_db(bn, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
1496 	bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
1497 
1498 	return 0;
1499 }
1500 
1501 static int bnge_hwrm_tx_ring_alloc(struct bnge_net *bn,
1502 				   struct bnge_tx_ring_info *txr, u32 tx_idx)
1503 {
1504 	struct bnge_ring_struct *ring = &txr->tx_ring_struct;
1505 	const u32 type = HWRM_RING_ALLOC_TX;
1506 	int rc;
1507 
1508 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, tx_idx);
1509 	if (rc)
1510 		return rc;
1511 
1512 	bnge_set_db(bn, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
1513 
1514 	return 0;
1515 }
1516 
1517 static int bnge_hwrm_rx_agg_ring_alloc(struct bnge_net *bn,
1518 				       struct bnge_rx_ring_info *rxr)
1519 {
1520 	struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
1521 	u32 type = HWRM_RING_ALLOC_AGG;
1522 	struct bnge_dev *bd = bn->bd;
1523 	u32 grp_idx = ring->grp_idx;
1524 	u32 map_idx;
1525 	int rc;
1526 
1527 	map_idx = grp_idx + bd->rx_nr_rings;
1528 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1529 	if (rc)
1530 		return rc;
1531 
1532 	bnge_set_db(bn, &rxr->rx_agg_db, type, map_idx,
1533 		    ring->fw_ring_id);
1534 	bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
1535 	bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
1536 	bn->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
1537 
1538 	return 0;
1539 }
1540 
1541 static int bnge_hwrm_rx_ring_alloc(struct bnge_net *bn,
1542 				   struct bnge_rx_ring_info *rxr)
1543 {
1544 	struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
1545 	struct bnge_napi *bnapi = rxr->bnapi;
1546 	u32 type = HWRM_RING_ALLOC_RX;
1547 	u32 map_idx = bnapi->index;
1548 	int rc;
1549 
1550 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1551 	if (rc)
1552 		return rc;
1553 
1554 	bnge_set_db(bn, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
1555 	bn->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
1556 
1557 	return 0;
1558 }
1559 
1560 static int bnge_hwrm_ring_alloc(struct bnge_net *bn)
1561 {
1562 	struct bnge_dev *bd = bn->bd;
1563 	bool agg_rings;
1564 	int i, rc = 0;
1565 
1566 	agg_rings = !!(bnge_is_agg_reqd(bd));
1567 	for (i = 0; i < bd->nq_nr_rings; i++) {
1568 		struct bnge_napi *bnapi = bn->bnapi[i];
1569 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
1570 		struct bnge_ring_struct *ring = &nqr->ring_struct;
1571 		u32 type = HWRM_RING_ALLOC_NQ;
1572 		u32 map_idx = ring->map_idx;
1573 		unsigned int vector;
1574 
1575 		vector = bd->irq_tbl[map_idx].vector;
1576 		disable_irq_nosync(vector);
1577 		rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1578 		if (rc) {
1579 			enable_irq(vector);
1580 			goto err_out;
1581 		}
1582 		bnge_set_db(bn, &nqr->nq_db, type, map_idx, ring->fw_ring_id);
1583 		bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
1584 		enable_irq(vector);
1585 		bn->grp_info[i].nq_fw_ring_id = ring->fw_ring_id;
1586 
1587 		if (!i) {
1588 			rc = bnge_hwrm_set_async_event_cr(bd, ring->fw_ring_id);
1589 			if (rc)
1590 				netdev_warn(bn->netdev, "Failed to set async event completion ring.\n");
1591 		}
1592 	}
1593 
1594 	for (i = 0; i < bd->tx_nr_rings; i++) {
1595 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
1596 
1597 		rc = bnge_hwrm_cp_ring_alloc(bn, txr->tx_cpr);
1598 		if (rc)
1599 			goto err_out;
1600 		rc = bnge_hwrm_tx_ring_alloc(bn, txr, i);
1601 		if (rc)
1602 			goto err_out;
1603 	}
1604 
1605 	for (i = 0; i < bd->rx_nr_rings; i++) {
1606 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
1607 		struct bnge_cp_ring_info *cpr;
1608 		struct bnge_ring_struct *ring;
1609 		struct bnge_napi *bnapi;
1610 		u32 map_idx, type;
1611 
1612 		rc = bnge_hwrm_rx_ring_alloc(bn, rxr);
1613 		if (rc)
1614 			goto err_out;
1615 		/* If we have agg rings, post agg buffers first. */
1616 		if (!agg_rings)
1617 			bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
1618 
1619 		cpr = rxr->rx_cpr;
1620 		bnapi = rxr->bnapi;
1621 		type = HWRM_RING_ALLOC_CMPL;
1622 		map_idx = bnapi->index;
1623 
1624 		ring = &cpr->ring_struct;
1625 		ring->handle = BNGE_SET_NQ_HDL(cpr);
1626 		rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1627 		if (rc)
1628 			goto err_out;
1629 		bnge_set_db(bn, &cpr->cp_db, type, map_idx,
1630 			    ring->fw_ring_id);
1631 		bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
1632 	}
1633 
1634 	if (agg_rings) {
1635 		for (i = 0; i < bd->rx_nr_rings; i++) {
1636 			rc = bnge_hwrm_rx_agg_ring_alloc(bn, &bn->rx_ring[i]);
1637 			if (rc)
1638 				goto err_out;
1639 		}
1640 	}
1641 err_out:
1642 	return rc;
1643 }
1644 
1645 void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic)
1646 {
1647 	__le16 *ring_tbl = vnic->rss_table;
1648 	struct bnge_rx_ring_info *rxr;
1649 	struct bnge_dev *bd = bn->bd;
1650 	u16 tbl_size, i;
1651 
1652 	tbl_size = bnge_get_rxfh_indir_size(bd);
1653 
1654 	for (i = 0; i < tbl_size; i++) {
1655 		u16 ring_id, j;
1656 
1657 		j = bd->rss_indir_tbl[i];
1658 		rxr = &bn->rx_ring[j];
1659 
1660 		ring_id = rxr->rx_ring_struct.fw_ring_id;
1661 		*ring_tbl++ = cpu_to_le16(ring_id);
1662 		ring_id = bnge_cp_ring_for_rx(rxr);
1663 		*ring_tbl++ = cpu_to_le16(ring_id);
1664 	}
1665 }
1666 
1667 static int bnge_hwrm_vnic_rss_cfg(struct bnge_net *bn,
1668 				  struct bnge_vnic_info *vnic)
1669 {
1670 	int rc;
1671 
1672 	rc = bnge_hwrm_vnic_set_rss(bn, vnic, true);
1673 	if (rc) {
1674 		netdev_err(bn->netdev, "hwrm vnic %d set rss failure rc: %d\n",
1675 			   vnic->vnic_id, rc);
1676 		return rc;
1677 	}
1678 	rc = bnge_hwrm_vnic_cfg(bn, vnic);
1679 	if (rc)
1680 		netdev_err(bn->netdev, "hwrm vnic %d cfg failure rc: %d\n",
1681 			   vnic->vnic_id, rc);
1682 	return rc;
1683 }
1684 
1685 static int bnge_setup_vnic(struct bnge_net *bn, struct bnge_vnic_info *vnic)
1686 {
1687 	struct bnge_dev *bd = bn->bd;
1688 	int rc, i, nr_ctxs;
1689 
1690 	nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
1691 	for (i = 0; i < nr_ctxs; i++) {
1692 		rc = bnge_hwrm_vnic_ctx_alloc(bd, vnic, i);
1693 		if (rc) {
1694 			netdev_err(bn->netdev, "hwrm vnic %d ctx %d alloc failure rc: %d\n",
1695 				   vnic->vnic_id, i, rc);
1696 			return -ENOMEM;
1697 		}
1698 		bn->rsscos_nr_ctxs++;
1699 	}
1700 
1701 	rc = bnge_hwrm_vnic_rss_cfg(bn, vnic);
1702 	if (rc)
1703 		return rc;
1704 
1705 	if (bnge_is_agg_reqd(bd)) {
1706 		rc = bnge_hwrm_vnic_set_hds(bn, vnic);
1707 		if (rc)
1708 			netdev_err(bn->netdev, "hwrm vnic %d set hds failure rc: %d\n",
1709 				   vnic->vnic_id, rc);
1710 	}
1711 	return rc;
1712 }
1713 
1714 static void bnge_del_l2_filter(struct bnge_net *bn, struct bnge_l2_filter *fltr)
1715 {
1716 	if (!refcount_dec_and_test(&fltr->refcnt))
1717 		return;
1718 	hlist_del_rcu(&fltr->base.hash);
1719 	kfree_rcu(fltr, base.rcu);
1720 }
1721 
1722 static void bnge_init_l2_filter(struct bnge_net *bn,
1723 				struct bnge_l2_filter *fltr,
1724 				struct bnge_l2_key *key, u32 idx)
1725 {
1726 	struct hlist_head *head;
1727 
1728 	ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
1729 	fltr->l2_key.vlan = key->vlan;
1730 	fltr->base.type = BNGE_FLTR_TYPE_L2;
1731 
1732 	head = &bn->l2_fltr_hash_tbl[idx];
1733 	hlist_add_head_rcu(&fltr->base.hash, head);
1734 	refcount_set(&fltr->refcnt, 1);
1735 }
1736 
1737 static struct bnge_l2_filter *__bnge_lookup_l2_filter(struct bnge_net *bn,
1738 						      struct bnge_l2_key *key,
1739 						      u32 idx)
1740 {
1741 	struct bnge_l2_filter *fltr;
1742 	struct hlist_head *head;
1743 
1744 	head = &bn->l2_fltr_hash_tbl[idx];
1745 	hlist_for_each_entry_rcu(fltr, head, base.hash) {
1746 		struct bnge_l2_key *l2_key = &fltr->l2_key;
1747 
1748 		if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
1749 		    l2_key->vlan == key->vlan)
1750 			return fltr;
1751 	}
1752 	return NULL;
1753 }
1754 
1755 static struct bnge_l2_filter *bnge_lookup_l2_filter(struct bnge_net *bn,
1756 						    struct bnge_l2_key *key,
1757 						    u32 idx)
1758 {
1759 	struct bnge_l2_filter *fltr;
1760 
1761 	rcu_read_lock();
1762 	fltr = __bnge_lookup_l2_filter(bn, key, idx);
1763 	if (fltr)
1764 		refcount_inc(&fltr->refcnt);
1765 	rcu_read_unlock();
1766 	return fltr;
1767 }
1768 
1769 static struct bnge_l2_filter *bnge_alloc_l2_filter(struct bnge_net *bn,
1770 						   struct bnge_l2_key *key,
1771 						   gfp_t gfp)
1772 {
1773 	struct bnge_l2_filter *fltr;
1774 	u32 idx;
1775 
1776 	idx = jhash2(&key->filter_key, BNGE_L2_KEY_SIZE, bn->hash_seed) &
1777 	      BNGE_L2_FLTR_HASH_MASK;
1778 	fltr = bnge_lookup_l2_filter(bn, key, idx);
1779 	if (fltr)
1780 		return fltr;
1781 
1782 	fltr = kzalloc(sizeof(*fltr), gfp);
1783 	if (!fltr)
1784 		return ERR_PTR(-ENOMEM);
1785 
1786 	bnge_init_l2_filter(bn, fltr, key, idx);
1787 	return fltr;
1788 }
1789 
1790 static int bnge_hwrm_set_vnic_filter(struct bnge_net *bn, u16 vnic_id, u16 idx,
1791 				     const u8 *mac_addr)
1792 {
1793 	struct bnge_l2_filter *fltr;
1794 	struct bnge_l2_key key;
1795 	int rc;
1796 
1797 	ether_addr_copy(key.dst_mac_addr, mac_addr);
1798 	key.vlan = 0;
1799 	fltr = bnge_alloc_l2_filter(bn, &key, GFP_KERNEL);
1800 	if (IS_ERR(fltr))
1801 		return PTR_ERR(fltr);
1802 
1803 	fltr->base.fw_vnic_id = bn->vnic_info[vnic_id].fw_vnic_id;
1804 	rc = bnge_hwrm_l2_filter_alloc(bn->bd, fltr);
1805 	if (rc)
1806 		goto err_del_l2_filter;
1807 	bn->vnic_info[vnic_id].l2_filters[idx] = fltr;
1808 	return rc;
1809 
1810 err_del_l2_filter:
1811 	bnge_del_l2_filter(bn, fltr);
1812 	return rc;
1813 }
1814 
1815 static bool bnge_mc_list_updated(struct bnge_net *bn, u32 *rx_mask)
1816 {
1817 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1818 	struct net_device *dev = bn->netdev;
1819 	struct netdev_hw_addr *ha;
1820 	int mc_count = 0, off = 0;
1821 	bool update = false;
1822 	u8 *haddr;
1823 
1824 	netdev_for_each_mc_addr(ha, dev) {
1825 		if (mc_count >= BNGE_MAX_MC_ADDRS) {
1826 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1827 			vnic->mc_list_count = 0;
1828 			return false;
1829 		}
1830 		haddr = ha->addr;
1831 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
1832 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
1833 			update = true;
1834 		}
1835 		off += ETH_ALEN;
1836 		mc_count++;
1837 	}
1838 	if (mc_count)
1839 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1840 
1841 	if (mc_count != vnic->mc_list_count) {
1842 		vnic->mc_list_count = mc_count;
1843 		update = true;
1844 	}
1845 	return update;
1846 }
1847 
1848 static bool bnge_uc_list_updated(struct bnge_net *bn)
1849 {
1850 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1851 	struct net_device *dev = bn->netdev;
1852 	struct netdev_hw_addr *ha;
1853 	int off = 0;
1854 
1855 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
1856 		return true;
1857 
1858 	netdev_for_each_uc_addr(ha, dev) {
1859 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
1860 			return true;
1861 
1862 		off += ETH_ALEN;
1863 	}
1864 	return false;
1865 }
1866 
1867 static bool bnge_promisc_ok(struct bnge_net *bn)
1868 {
1869 	return true;
1870 }
1871 
1872 static int bnge_cfg_def_vnic(struct bnge_net *bn)
1873 {
1874 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1875 	struct net_device *dev = bn->netdev;
1876 	struct bnge_dev *bd = bn->bd;
1877 	struct netdev_hw_addr *ha;
1878 	int i, off = 0, rc;
1879 	bool uc_update;
1880 
1881 	netif_addr_lock_bh(dev);
1882 	uc_update = bnge_uc_list_updated(bn);
1883 	netif_addr_unlock_bh(dev);
1884 
1885 	if (!uc_update)
1886 		goto skip_uc;
1887 
1888 	for (i = 1; i < vnic->uc_filter_count; i++) {
1889 		struct bnge_l2_filter *fltr = vnic->l2_filters[i];
1890 
1891 		bnge_hwrm_l2_filter_free(bd, fltr);
1892 		bnge_del_l2_filter(bn, fltr);
1893 	}
1894 
1895 	vnic->uc_filter_count = 1;
1896 
1897 	netif_addr_lock_bh(dev);
1898 	if (netdev_uc_count(dev) > (BNGE_MAX_UC_ADDRS - 1)) {
1899 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1900 	} else {
1901 		netdev_for_each_uc_addr(ha, dev) {
1902 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
1903 			off += ETH_ALEN;
1904 			vnic->uc_filter_count++;
1905 		}
1906 	}
1907 	netif_addr_unlock_bh(dev);
1908 
1909 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
1910 		rc = bnge_hwrm_set_vnic_filter(bn, 0, i, vnic->uc_list + off);
1911 		if (rc) {
1912 			netdev_err(dev, "HWRM vnic filter failure rc: %d\n", rc);
1913 			vnic->uc_filter_count = i;
1914 			return rc;
1915 		}
1916 	}
1917 
1918 skip_uc:
1919 	if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
1920 	    !bnge_promisc_ok(bn))
1921 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1922 	rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
1923 	if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
1924 		netdev_info(dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
1925 			    rc);
1926 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1927 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1928 		vnic->mc_list_count = 0;
1929 		rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
1930 	}
1931 	if (rc)
1932 		netdev_err(dev, "HWRM cfa l2 rx mask failure rc: %d\n",
1933 			   rc);
1934 
1935 	return rc;
1936 }
1937 
1938 static void bnge_disable_int(struct bnge_net *bn)
1939 {
1940 	struct bnge_dev *bd = bn->bd;
1941 	int i;
1942 
1943 	if (!bn->bnapi)
1944 		return;
1945 
1946 	for (i = 0; i < bd->nq_nr_rings; i++) {
1947 		struct bnge_napi *bnapi = bn->bnapi[i];
1948 		struct bnge_nq_ring_info *nqr;
1949 		struct bnge_ring_struct *ring;
1950 
1951 		nqr = &bnapi->nq_ring;
1952 		ring = &nqr->ring_struct;
1953 
1954 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
1955 			bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
1956 	}
1957 }
1958 
1959 static void bnge_disable_int_sync(struct bnge_net *bn)
1960 {
1961 	struct bnge_dev *bd = bn->bd;
1962 	int i;
1963 
1964 	bnge_disable_int(bn);
1965 	for (i = 0; i < bd->nq_nr_rings; i++) {
1966 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
1967 
1968 		synchronize_irq(bd->irq_tbl[map_idx].vector);
1969 	}
1970 }
1971 
1972 static void bnge_enable_int(struct bnge_net *bn)
1973 {
1974 	struct bnge_dev *bd = bn->bd;
1975 	int i;
1976 
1977 	for (i = 0; i < bd->nq_nr_rings; i++) {
1978 		struct bnge_napi *bnapi = bn->bnapi[i];
1979 		struct bnge_nq_ring_info *nqr;
1980 
1981 		nqr = &bnapi->nq_ring;
1982 		bnge_db_nq_arm(bn, &nqr->nq_db, nqr->nq_raw_cons);
1983 	}
1984 }
1985 
1986 static void bnge_disable_napi(struct bnge_net *bn)
1987 {
1988 	struct bnge_dev *bd = bn->bd;
1989 	int i;
1990 
1991 	if (test_and_set_bit(BNGE_STATE_NAPI_DISABLED, &bn->state))
1992 		return;
1993 
1994 	for (i = 0; i < bd->nq_nr_rings; i++) {
1995 		struct bnge_napi *bnapi = bn->bnapi[i];
1996 
1997 		napi_disable_locked(&bnapi->napi);
1998 	}
1999 }
2000 
2001 static void bnge_enable_napi(struct bnge_net *bn)
2002 {
2003 	struct bnge_dev *bd = bn->bd;
2004 	int i;
2005 
2006 	clear_bit(BNGE_STATE_NAPI_DISABLED, &bn->state);
2007 	for (i = 0; i < bd->nq_nr_rings; i++) {
2008 		struct bnge_napi *bnapi = bn->bnapi[i];
2009 
2010 		bnapi->in_reset = false;
2011 		bnapi->tx_fault = 0;
2012 
2013 		napi_enable_locked(&bnapi->napi);
2014 	}
2015 }
2016 
2017 static void bnge_hwrm_vnic_free(struct bnge_net *bn)
2018 {
2019 	int i;
2020 
2021 	for (i = 0; i < bn->nr_vnics; i++)
2022 		bnge_hwrm_vnic_free_one(bn->bd, &bn->vnic_info[i]);
2023 }
2024 
2025 static void bnge_hwrm_vnic_ctx_free(struct bnge_net *bn)
2026 {
2027 	int i, j;
2028 
2029 	for (i = 0; i < bn->nr_vnics; i++) {
2030 		struct bnge_vnic_info *vnic = &bn->vnic_info[i];
2031 
2032 		for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) {
2033 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
2034 				bnge_hwrm_vnic_ctx_free_one(bn->bd, vnic, j);
2035 		}
2036 	}
2037 	bn->rsscos_nr_ctxs = 0;
2038 }
2039 
2040 static void bnge_hwrm_clear_vnic_filter(struct bnge_net *bn)
2041 {
2042 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
2043 	int i;
2044 
2045 	for (i = 0; i < vnic->uc_filter_count; i++) {
2046 		struct bnge_l2_filter *fltr = vnic->l2_filters[i];
2047 
2048 		bnge_hwrm_l2_filter_free(bn->bd, fltr);
2049 		bnge_del_l2_filter(bn, fltr);
2050 	}
2051 
2052 	vnic->uc_filter_count = 0;
2053 }
2054 
2055 static void bnge_clear_vnic(struct bnge_net *bn)
2056 {
2057 	bnge_hwrm_clear_vnic_filter(bn);
2058 	bnge_hwrm_vnic_free(bn);
2059 	bnge_hwrm_vnic_ctx_free(bn);
2060 }
2061 
2062 static void bnge_hwrm_rx_ring_free(struct bnge_net *bn,
2063 				   struct bnge_rx_ring_info *rxr,
2064 				   bool close_path)
2065 {
2066 	struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
2067 	u32 grp_idx = rxr->bnapi->index;
2068 	u32 cmpl_ring_id;
2069 
2070 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2071 		return;
2072 
2073 	cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
2074 	hwrm_ring_free_send_msg(bn, ring,
2075 				RING_FREE_REQ_RING_TYPE_RX,
2076 				close_path ? cmpl_ring_id :
2077 				INVALID_HW_RING_ID);
2078 	ring->fw_ring_id = INVALID_HW_RING_ID;
2079 	bn->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
2080 }
2081 
2082 static void bnge_hwrm_rx_agg_ring_free(struct bnge_net *bn,
2083 				       struct bnge_rx_ring_info *rxr,
2084 				       bool close_path)
2085 {
2086 	struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
2087 	u32 grp_idx = rxr->bnapi->index;
2088 	u32 cmpl_ring_id;
2089 
2090 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2091 		return;
2092 
2093 	cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
2094 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_RX_AGG,
2095 				close_path ? cmpl_ring_id :
2096 				INVALID_HW_RING_ID);
2097 	ring->fw_ring_id = INVALID_HW_RING_ID;
2098 	bn->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
2099 }
2100 
2101 static void bnge_hwrm_tx_ring_free(struct bnge_net *bn,
2102 				   struct bnge_tx_ring_info *txr,
2103 				   bool close_path)
2104 {
2105 	struct bnge_ring_struct *ring = &txr->tx_ring_struct;
2106 	u32 cmpl_ring_id;
2107 
2108 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2109 		return;
2110 
2111 	cmpl_ring_id = close_path ? bnge_cp_ring_for_tx(txr) :
2112 		       INVALID_HW_RING_ID;
2113 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_TX,
2114 				cmpl_ring_id);
2115 	ring->fw_ring_id = INVALID_HW_RING_ID;
2116 }
2117 
2118 static void bnge_hwrm_cp_ring_free(struct bnge_net *bn,
2119 				   struct bnge_cp_ring_info *cpr)
2120 {
2121 	struct bnge_ring_struct *ring;
2122 
2123 	ring = &cpr->ring_struct;
2124 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2125 		return;
2126 
2127 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
2128 				INVALID_HW_RING_ID);
2129 	ring->fw_ring_id = INVALID_HW_RING_ID;
2130 }
2131 
2132 static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
2133 {
2134 	struct bnge_dev *bd = bn->bd;
2135 	int i;
2136 
2137 	if (!bn->bnapi)
2138 		return;
2139 
2140 	for (i = 0; i < bd->tx_nr_rings; i++)
2141 		bnge_hwrm_tx_ring_free(bn, &bn->tx_ring[i], close_path);
2142 
2143 	for (i = 0; i < bd->rx_nr_rings; i++) {
2144 		bnge_hwrm_rx_ring_free(bn, &bn->rx_ring[i], close_path);
2145 		bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
2146 	}
2147 
2148 	/* The completion rings are about to be freed.  After that the
2149 	 * IRQ doorbell will not work anymore.  So we need to disable
2150 	 * IRQ here.
2151 	 */
2152 	bnge_disable_int_sync(bn);
2153 
2154 	for (i = 0; i < bd->nq_nr_rings; i++) {
2155 		struct bnge_napi *bnapi = bn->bnapi[i];
2156 		struct bnge_nq_ring_info *nqr;
2157 		struct bnge_ring_struct *ring;
2158 		int j;
2159 
2160 		nqr = &bnapi->nq_ring;
2161 		for (j = 0; j < nqr->cp_ring_count && nqr->cp_ring_arr; j++)
2162 			bnge_hwrm_cp_ring_free(bn, &nqr->cp_ring_arr[j]);
2163 
2164 		ring = &nqr->ring_struct;
2165 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2166 			hwrm_ring_free_send_msg(bn, ring,
2167 						RING_FREE_REQ_RING_TYPE_NQ,
2168 						INVALID_HW_RING_ID);
2169 			ring->fw_ring_id = INVALID_HW_RING_ID;
2170 			bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
2171 		}
2172 	}
2173 }
2174 
2175 static void bnge_setup_msix(struct bnge_net *bn)
2176 {
2177 	struct net_device *dev = bn->netdev;
2178 	struct bnge_dev *bd = bn->bd;
2179 	int len, i;
2180 
2181 	len = sizeof(bd->irq_tbl[0].name);
2182 	for (i = 0; i < bd->nq_nr_rings; i++) {
2183 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
2184 		char *attr;
2185 
2186 		if (bd->flags & BNGE_EN_SHARED_CHNL)
2187 			attr = "TxRx";
2188 		else if (i < bd->rx_nr_rings)
2189 			attr = "rx";
2190 		else
2191 			attr = "tx";
2192 
2193 		snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
2194 			 attr, i);
2195 		bd->irq_tbl[map_idx].handler = bnge_msix;
2196 	}
2197 }
2198 
2199 static int bnge_setup_interrupts(struct bnge_net *bn)
2200 {
2201 	struct net_device *dev = bn->netdev;
2202 	struct bnge_dev *bd = bn->bd;
2203 
2204 	bnge_setup_msix(bn);
2205 
2206 	return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
2207 }
2208 
2209 static void bnge_hwrm_resource_free(struct bnge_net *bn, bool close_path)
2210 {
2211 	bnge_clear_vnic(bn);
2212 	bnge_hwrm_ring_free(bn, close_path);
2213 	bnge_hwrm_stat_ctx_free(bn);
2214 }
2215 
2216 static void bnge_free_irq(struct bnge_net *bn)
2217 {
2218 	struct bnge_dev *bd = bn->bd;
2219 	struct bnge_irq *irq;
2220 	int i;
2221 
2222 	for (i = 0; i < bd->nq_nr_rings; i++) {
2223 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
2224 
2225 		irq = &bd->irq_tbl[map_idx];
2226 		if (irq->requested) {
2227 			if (irq->have_cpumask) {
2228 				irq_set_affinity_hint(irq->vector, NULL);
2229 				free_cpumask_var(irq->cpu_mask);
2230 				irq->have_cpumask = 0;
2231 			}
2232 			free_irq(irq->vector, bn->bnapi[i]);
2233 		}
2234 
2235 		irq->requested = 0;
2236 	}
2237 }
2238 
2239 static int bnge_request_irq(struct bnge_net *bn)
2240 {
2241 	struct bnge_dev *bd = bn->bd;
2242 	int i, rc;
2243 
2244 	rc = bnge_setup_interrupts(bn);
2245 	if (rc) {
2246 		netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
2247 		return rc;
2248 	}
2249 	for (i = 0; i < bd->nq_nr_rings; i++) {
2250 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
2251 		struct bnge_irq *irq = &bd->irq_tbl[map_idx];
2252 
2253 		rc = request_irq(irq->vector, irq->handler, 0, irq->name,
2254 				 bn->bnapi[i]);
2255 		if (rc)
2256 			goto err_free_irq;
2257 
2258 		netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
2259 		irq->requested = 1;
2260 
2261 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
2262 			int numa_node = dev_to_node(&bd->pdev->dev);
2263 
2264 			irq->have_cpumask = 1;
2265 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2266 					irq->cpu_mask);
2267 			rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
2268 			if (rc) {
2269 				netdev_warn(bn->netdev,
2270 					    "Set affinity failed, IRQ = %d\n",
2271 					    irq->vector);
2272 				goto err_free_irq;
2273 			}
2274 		}
2275 	}
2276 	return 0;
2277 
2278 err_free_irq:
2279 	bnge_free_irq(bn);
2280 	return rc;
2281 }
2282 
2283 static int bnge_set_tpa(struct bnge_net *bn, bool set_tpa)
2284 {
2285 	u32 tpa_flags = 0;
2286 	int rc, i;
2287 
2288 	if (set_tpa)
2289 		tpa_flags = bn->priv_flags & BNGE_NET_EN_TPA;
2290 	else if (BNGE_NO_FW_ACCESS(bn->bd))
2291 		return 0;
2292 	for (i = 0; i < bn->nr_vnics; i++) {
2293 		rc = bnge_hwrm_vnic_set_tpa(bn->bd, &bn->vnic_info[i],
2294 					    tpa_flags);
2295 		if (rc) {
2296 			netdev_err(bn->netdev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
2297 				   i, rc);
2298 			return rc;
2299 		}
2300 	}
2301 	return 0;
2302 }
2303 
2304 static int bnge_init_chip(struct bnge_net *bn)
2305 {
2306 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
2307 	struct bnge_dev *bd = bn->bd;
2308 	int rc;
2309 
2310 #define BNGE_DEF_STATS_COAL_TICKS	 1000000
2311 	bn->stats_coal_ticks = BNGE_DEF_STATS_COAL_TICKS;
2312 
2313 	rc = bnge_hwrm_stat_ctx_alloc(bn);
2314 	if (rc) {
2315 		netdev_err(bn->netdev, "hwrm stat ctx alloc failure rc: %d\n", rc);
2316 		goto err_out;
2317 	}
2318 
2319 	rc = bnge_hwrm_ring_alloc(bn);
2320 	if (rc) {
2321 		netdev_err(bn->netdev, "hwrm ring alloc failure rc: %d\n", rc);
2322 		goto err_out;
2323 	}
2324 
2325 	rc = bnge_hwrm_vnic_alloc(bd, vnic, bd->rx_nr_rings);
2326 	if (rc) {
2327 		netdev_err(bn->netdev, "hwrm vnic alloc failure rc: %d\n", rc);
2328 		goto err_out;
2329 	}
2330 
2331 	rc = bnge_setup_vnic(bn, vnic);
2332 	if (rc)
2333 		goto err_out;
2334 
2335 	if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
2336 		bnge_hwrm_update_rss_hash_cfg(bn);
2337 
2338 	if (bn->priv_flags & BNGE_NET_EN_TPA) {
2339 		rc = bnge_set_tpa(bn, true);
2340 		if (rc)
2341 			goto err_out;
2342 	}
2343 
2344 	/* Filter for default vnic 0 */
2345 	rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
2346 	if (rc) {
2347 		netdev_err(bn->netdev, "HWRM vnic filter failure rc: %d\n", rc);
2348 		goto err_out;
2349 	}
2350 	vnic->uc_filter_count = 1;
2351 
2352 	vnic->rx_mask = 0;
2353 
2354 	if (bn->netdev->flags & IFF_BROADCAST)
2355 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
2356 
2357 	if (bn->netdev->flags & IFF_PROMISC)
2358 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
2359 
2360 	if (bn->netdev->flags & IFF_ALLMULTI) {
2361 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
2362 		vnic->mc_list_count = 0;
2363 	} else if (bn->netdev->flags & IFF_MULTICAST) {
2364 		u32 mask = 0;
2365 
2366 		bnge_mc_list_updated(bn, &mask);
2367 		vnic->rx_mask |= mask;
2368 	}
2369 
2370 	rc = bnge_cfg_def_vnic(bn);
2371 	if (rc)
2372 		goto err_out;
2373 	return 0;
2374 
2375 err_out:
2376 	bnge_hwrm_resource_free(bn, 0);
2377 	return rc;
2378 }
2379 
2380 static void bnge_init_napi(struct bnge_net *bn)
2381 {
2382 	struct bnge_dev *bd = bn->bd;
2383 	struct bnge_napi *bnapi;
2384 	int i;
2385 
2386 	for (i = 0; i < bd->nq_nr_rings; i++) {
2387 		bnapi = bn->bnapi[i];
2388 		netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
2389 					     bnge_napi_poll, bnapi->index);
2390 	}
2391 }
2392 
2393 static void bnge_del_napi(struct bnge_net *bn)
2394 {
2395 	struct bnge_dev *bd = bn->bd;
2396 	int i;
2397 
2398 	for (i = 0; i < bd->rx_nr_rings; i++)
2399 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
2400 	for (i = 0; i < bd->tx_nr_rings; i++)
2401 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
2402 
2403 	for (i = 0; i < bd->nq_nr_rings; i++) {
2404 		struct bnge_napi *bnapi = bn->bnapi[i];
2405 
2406 		__netif_napi_del_locked(&bnapi->napi);
2407 	}
2408 
2409 	/* Wait for RCU grace period after removing NAPI instances */
2410 	synchronize_net();
2411 }
2412 
2413 static int bnge_init_nic(struct bnge_net *bn)
2414 {
2415 	int rc;
2416 
2417 	bnge_init_nq_tree(bn);
2418 
2419 	bnge_init_rx_rings(bn);
2420 	rc = bnge_alloc_rx_ring_pair_bufs(bn);
2421 	if (rc)
2422 		return rc;
2423 
2424 	bnge_init_tx_rings(bn);
2425 
2426 	rc = bnge_init_ring_grps(bn);
2427 	if (rc)
2428 		goto err_free_rx_ring_pair_bufs;
2429 
2430 	bnge_init_vnics(bn);
2431 
2432 	rc = bnge_init_chip(bn);
2433 	if (rc)
2434 		goto err_free_ring_grps;
2435 	return rc;
2436 
2437 err_free_ring_grps:
2438 	bnge_free_ring_grps(bn);
2439 	return rc;
2440 
2441 err_free_rx_ring_pair_bufs:
2442 	bnge_free_rx_ring_pair_bufs(bn);
2443 	return rc;
2444 }
2445 
2446 static void bnge_tx_disable(struct bnge_net *bn)
2447 {
2448 	struct bnge_tx_ring_info *txr;
2449 	int i;
2450 
2451 	if (bn->tx_ring) {
2452 		for (i = 0; i < bn->bd->tx_nr_rings; i++) {
2453 			txr = &bn->tx_ring[i];
2454 			WRITE_ONCE(txr->dev_state, BNGE_DEV_STATE_CLOSING);
2455 		}
2456 	}
2457 	/* Make sure napi polls see @dev_state change */
2458 	synchronize_net();
2459 
2460 	if (!bn->netdev)
2461 		return;
2462 	/* Drop carrier first to prevent TX timeout */
2463 	netif_carrier_off(bn->netdev);
2464 	/* Stop all TX queues */
2465 	netif_tx_disable(bn->netdev);
2466 }
2467 
2468 static void bnge_tx_enable(struct bnge_net *bn)
2469 {
2470 	struct bnge_tx_ring_info *txr;
2471 	int i;
2472 
2473 	for (i = 0; i < bn->bd->tx_nr_rings; i++) {
2474 		txr = &bn->tx_ring[i];
2475 		WRITE_ONCE(txr->dev_state, 0);
2476 	}
2477 	/* Make sure napi polls see @dev_state change */
2478 	synchronize_net();
2479 	netif_tx_wake_all_queues(bn->netdev);
2480 }
2481 
2482 static int bnge_open_core(struct bnge_net *bn)
2483 {
2484 	struct bnge_dev *bd = bn->bd;
2485 	int rc;
2486 
2487 	netif_carrier_off(bn->netdev);
2488 
2489 	rc = bnge_reserve_rings(bd);
2490 	if (rc) {
2491 		netdev_err(bn->netdev, "bnge_reserve_rings err: %d\n", rc);
2492 		return rc;
2493 	}
2494 
2495 	rc = bnge_alloc_core(bn);
2496 	if (rc) {
2497 		netdev_err(bn->netdev, "bnge_alloc_core err: %d\n", rc);
2498 		return rc;
2499 	}
2500 
2501 	bnge_init_napi(bn);
2502 	rc = bnge_request_irq(bn);
2503 	if (rc) {
2504 		netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
2505 		goto err_del_napi;
2506 	}
2507 
2508 	rc = bnge_init_nic(bn);
2509 	if (rc) {
2510 		netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
2511 		goto err_free_irq;
2512 	}
2513 
2514 	bnge_enable_napi(bn);
2515 
2516 	set_bit(BNGE_STATE_OPEN, &bd->state);
2517 
2518 	bnge_enable_int(bn);
2519 
2520 	bnge_tx_enable(bn);
2521 	return 0;
2522 
2523 err_free_irq:
2524 	bnge_free_irq(bn);
2525 err_del_napi:
2526 	bnge_del_napi(bn);
2527 	bnge_free_core(bn);
2528 	return rc;
2529 }
2530 
2531 static int bnge_open(struct net_device *dev)
2532 {
2533 	struct bnge_net *bn = netdev_priv(dev);
2534 	int rc;
2535 
2536 	rc = bnge_open_core(bn);
2537 	if (rc)
2538 		netdev_err(dev, "bnge_open_core err: %d\n", rc);
2539 
2540 	return rc;
2541 }
2542 
2543 static int bnge_shutdown_nic(struct bnge_net *bn)
2544 {
2545 	bnge_hwrm_resource_free(bn, 1);
2546 	return 0;
2547 }
2548 
2549 static void bnge_close_core(struct bnge_net *bn)
2550 {
2551 	struct bnge_dev *bd = bn->bd;
2552 
2553 	bnge_tx_disable(bn);
2554 
2555 	clear_bit(BNGE_STATE_OPEN, &bd->state);
2556 	bnge_shutdown_nic(bn);
2557 	bnge_disable_napi(bn);
2558 	bnge_free_all_rings_bufs(bn);
2559 	bnge_free_irq(bn);
2560 	bnge_del_napi(bn);
2561 
2562 	bnge_free_core(bn);
2563 }
2564 
2565 static int bnge_close(struct net_device *dev)
2566 {
2567 	struct bnge_net *bn = netdev_priv(dev);
2568 
2569 	bnge_close_core(bn);
2570 
2571 	return 0;
2572 }
2573 
2574 static const struct net_device_ops bnge_netdev_ops = {
2575 	.ndo_open		= bnge_open,
2576 	.ndo_stop		= bnge_close,
2577 	.ndo_start_xmit		= bnge_start_xmit,
2578 	.ndo_features_check	= bnge_features_check,
2579 };
2580 
2581 static void bnge_init_mac_addr(struct bnge_dev *bd)
2582 {
2583 	eth_hw_addr_set(bd->netdev, bd->pf.mac_addr);
2584 }
2585 
2586 static void bnge_set_tpa_flags(struct bnge_dev *bd)
2587 {
2588 	struct bnge_net *bn = netdev_priv(bd->netdev);
2589 
2590 	bn->priv_flags &= ~BNGE_NET_EN_TPA;
2591 
2592 	if (bd->netdev->features & NETIF_F_LRO)
2593 		bn->priv_flags |= BNGE_NET_EN_LRO;
2594 	else if (bd->netdev->features & NETIF_F_GRO_HW)
2595 		bn->priv_flags |= BNGE_NET_EN_GRO;
2596 }
2597 
2598 static void bnge_init_l2_fltr_tbl(struct bnge_net *bn)
2599 {
2600 	int i;
2601 
2602 	for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++)
2603 		INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]);
2604 	get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed));
2605 }
2606 
2607 void bnge_set_ring_params(struct bnge_dev *bd)
2608 {
2609 	struct bnge_net *bn = netdev_priv(bd->netdev);
2610 	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
2611 	u32 agg_factor = 0, agg_ring_size = 0;
2612 
2613 	/* 8 for CRC and VLAN */
2614 	rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2615 
2616 	rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
2617 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2618 
2619 	ring_size = bn->rx_ring_size;
2620 	bn->rx_agg_ring_size = 0;
2621 	bn->rx_agg_nr_pages = 0;
2622 
2623 	if (bn->priv_flags & BNGE_NET_EN_TPA)
2624 		agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE);
2625 
2626 	bn->priv_flags &= ~BNGE_NET_EN_JUMBO;
2627 	if (rx_space > PAGE_SIZE) {
2628 		u32 jumbo_factor;
2629 
2630 		bn->priv_flags |= BNGE_NET_EN_JUMBO;
2631 		jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT;
2632 		if (jumbo_factor > agg_factor)
2633 			agg_factor = jumbo_factor;
2634 	}
2635 	if (agg_factor) {
2636 		if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) {
2637 			ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA;
2638 			netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n",
2639 				    bn->rx_ring_size, ring_size);
2640 			bn->rx_ring_size = ring_size;
2641 		}
2642 		agg_ring_size = ring_size * agg_factor;
2643 
2644 		bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size,
2645 							  RX_DESC_CNT);
2646 		if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2647 			u32 tmp = agg_ring_size;
2648 
2649 			bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2650 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2651 			netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n",
2652 				    tmp, agg_ring_size);
2653 		}
2654 		bn->rx_agg_ring_size = agg_ring_size;
2655 		bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2656 
2657 		rx_size = max3(BNGE_DEFAULT_RX_COPYBREAK,
2658 			       bn->rx_copybreak,
2659 			       bn->netdev->cfg_pending->hds_thresh);
2660 		rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
2661 		rx_space = rx_size + NET_SKB_PAD +
2662 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2663 	}
2664 
2665 	bn->rx_buf_use_size = rx_size;
2666 	bn->rx_buf_size = rx_space;
2667 
2668 	bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT);
2669 	bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1;
2670 
2671 	ring_size = bn->tx_ring_size;
2672 	bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT);
2673 	bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1;
2674 
2675 	max_rx_cmpl = bn->rx_ring_size;
2676 
2677 	if (bn->priv_flags & BNGE_NET_EN_TPA)
2678 		max_rx_cmpl += bd->max_tpa_v2;
2679 	ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size;
2680 	bn->cp_ring_size = ring_size;
2681 
2682 	bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT);
2683 	if (bn->cp_nr_pages > MAX_CP_PAGES) {
2684 		bn->cp_nr_pages = MAX_CP_PAGES;
2685 		bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2686 		netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n",
2687 			    ring_size, bn->cp_ring_size);
2688 	}
2689 	bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT;
2690 	bn->cp_ring_mask = bn->cp_bit - 1;
2691 }
2692 
2693 static void bnge_init_ring_params(struct bnge_net *bn)
2694 {
2695 	u32 rx_size;
2696 
2697 	bn->rx_copybreak = BNGE_DEFAULT_RX_COPYBREAK;
2698 	/* Try to fit 4 chunks into a 4k page */
2699 	rx_size = SZ_1K -
2700 		NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2701 	bn->netdev->cfg->hds_thresh = max(BNGE_DEFAULT_RX_COPYBREAK, rx_size);
2702 }
2703 
2704 int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
2705 {
2706 	struct net_device *netdev;
2707 	struct bnge_net *bn;
2708 	int rc;
2709 
2710 	netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE,
2711 				    max_irqs);
2712 	if (!netdev)
2713 		return -ENOMEM;
2714 
2715 	SET_NETDEV_DEV(netdev, bd->dev);
2716 	bd->netdev = netdev;
2717 
2718 	netdev->netdev_ops = &bnge_netdev_ops;
2719 
2720 	bnge_set_ethtool_ops(netdev);
2721 
2722 	bn = netdev_priv(netdev);
2723 	bn->netdev = netdev;
2724 	bn->bd = bd;
2725 
2726 	netdev->min_mtu = ETH_ZLEN;
2727 	netdev->max_mtu = bd->max_mtu;
2728 
2729 	netdev->hw_features = NETIF_F_IP_CSUM |
2730 			      NETIF_F_IPV6_CSUM |
2731 			      NETIF_F_SG |
2732 			      NETIF_F_TSO |
2733 			      NETIF_F_TSO6 |
2734 			      NETIF_F_GSO_UDP_TUNNEL |
2735 			      NETIF_F_GSO_GRE |
2736 			      NETIF_F_GSO_IPXIP4 |
2737 			      NETIF_F_GSO_UDP_TUNNEL_CSUM |
2738 			      NETIF_F_GSO_GRE_CSUM |
2739 			      NETIF_F_GSO_PARTIAL |
2740 			      NETIF_F_RXHASH |
2741 			      NETIF_F_RXCSUM |
2742 			      NETIF_F_GRO;
2743 
2744 	if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
2745 		netdev->hw_features |= NETIF_F_GSO_UDP_L4;
2746 
2747 	if (BNGE_SUPPORTS_TPA(bd))
2748 		netdev->hw_features |= NETIF_F_LRO;
2749 
2750 	netdev->hw_enc_features = NETIF_F_IP_CSUM |
2751 				  NETIF_F_IPV6_CSUM |
2752 				  NETIF_F_SG |
2753 				  NETIF_F_TSO |
2754 				  NETIF_F_TSO6 |
2755 				  NETIF_F_GSO_UDP_TUNNEL |
2756 				  NETIF_F_GSO_GRE |
2757 				  NETIF_F_GSO_UDP_TUNNEL_CSUM |
2758 				  NETIF_F_GSO_GRE_CSUM |
2759 				  NETIF_F_GSO_IPXIP4 |
2760 				  NETIF_F_GSO_PARTIAL;
2761 
2762 	if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
2763 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
2764 
2765 	netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
2766 				       NETIF_F_GSO_GRE_CSUM;
2767 
2768 	netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA;
2769 	if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP)
2770 		netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX;
2771 	if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT)
2772 		netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX;
2773 
2774 	if (BNGE_SUPPORTS_TPA(bd))
2775 		netdev->hw_features |= NETIF_F_GRO_HW;
2776 
2777 	netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
2778 
2779 	if (netdev->features & NETIF_F_GRO_HW)
2780 		netdev->features &= ~NETIF_F_LRO;
2781 
2782 	netdev->priv_flags |= IFF_UNICAST_FLT;
2783 
2784 	netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
2785 	if (bd->tso_max_segs)
2786 		netif_set_tso_max_segs(netdev, bd->tso_max_segs);
2787 
2788 	bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
2789 	bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
2790 	bn->rx_dir = DMA_FROM_DEVICE;
2791 
2792 	bnge_set_tpa_flags(bd);
2793 	bnge_init_ring_params(bn);
2794 	bnge_set_ring_params(bd);
2795 
2796 	bnge_init_l2_fltr_tbl(bn);
2797 	bnge_init_mac_addr(bd);
2798 
2799 	netdev->request_ops_lock = true;
2800 	rc = register_netdev(netdev);
2801 	if (rc) {
2802 		dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
2803 		goto err_netdev;
2804 	}
2805 
2806 	return 0;
2807 
2808 err_netdev:
2809 	free_netdev(netdev);
2810 	return rc;
2811 }
2812 
2813 void bnge_netdev_free(struct bnge_dev *bd)
2814 {
2815 	struct net_device *netdev = bd->netdev;
2816 
2817 	unregister_netdev(netdev);
2818 	free_netdev(netdev);
2819 	bd->netdev = NULL;
2820 }
2821