xref: /linux/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3 
4 #include <asm/byteorder.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/dmapool.h>
7 #include <linux/delay.h>
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/list.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <net/netdev_lock.h>
14 #include <net/netdev_queues.h>
15 #include <net/netdev_rx_queue.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if.h>
18 #include <net/ip.h>
19 #include <linux/skbuff.h>
20 #include <net/page_pool/helpers.h>
21 
22 #include "bnge.h"
23 #include "bnge_hwrm_lib.h"
24 #include "bnge_ethtool.h"
25 #include "bnge_rmem.h"
26 #include "bnge_txrx.h"
27 
28 #define BNGE_RING_TO_TC_OFF(bd, tx)	\
29 	((tx) % (bd)->tx_nr_rings_per_tc)
30 
31 #define BNGE_RING_TO_TC(bd, tx)		\
32 	((tx) / (bd)->tx_nr_rings_per_tc)
33 
34 #define BNGE_TC_TO_RING_BASE(bd, tc)	\
35 	((tc) * (bd)->tx_nr_rings_per_tc)
36 
37 static void bnge_free_stats_mem(struct bnge_net *bn,
38 				struct bnge_stats_mem *stats)
39 {
40 	struct bnge_dev *bd = bn->bd;
41 
42 	if (stats->hw_stats) {
43 		dma_free_coherent(bd->dev, stats->len, stats->hw_stats,
44 				  stats->hw_stats_map);
45 		stats->hw_stats = NULL;
46 	}
47 }
48 
49 static int bnge_alloc_stats_mem(struct bnge_net *bn,
50 				struct bnge_stats_mem *stats)
51 {
52 	struct bnge_dev *bd = bn->bd;
53 
54 	stats->hw_stats = dma_alloc_coherent(bd->dev, stats->len,
55 					     &stats->hw_stats_map, GFP_KERNEL);
56 	if (!stats->hw_stats)
57 		return -ENOMEM;
58 
59 	return 0;
60 }
61 
62 static void bnge_free_ring_stats(struct bnge_net *bn)
63 {
64 	struct bnge_dev *bd = bn->bd;
65 	int i;
66 
67 	if (!bn->bnapi)
68 		return;
69 
70 	for (i = 0; i < bd->nq_nr_rings; i++) {
71 		struct bnge_napi *bnapi = bn->bnapi[i];
72 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
73 
74 		bnge_free_stats_mem(bn, &nqr->stats);
75 	}
76 }
77 
78 static int bnge_alloc_ring_stats(struct bnge_net *bn)
79 {
80 	struct bnge_dev *bd = bn->bd;
81 	u32 size, i;
82 	int rc;
83 
84 	size = bd->hw_ring_stats_size;
85 
86 	for (i = 0; i < bd->nq_nr_rings; i++) {
87 		struct bnge_napi *bnapi = bn->bnapi[i];
88 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
89 
90 		nqr->stats.len = size;
91 		rc = bnge_alloc_stats_mem(bn, &nqr->stats);
92 		if (rc)
93 			goto err_free_ring_stats;
94 
95 		nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
96 	}
97 	return 0;
98 
99 err_free_ring_stats:
100 	bnge_free_ring_stats(bn);
101 	return rc;
102 }
103 
104 static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
105 {
106 	struct bnge_ring_struct *ring = &nqr->ring_struct;
107 
108 	kfree(nqr->desc_ring);
109 	nqr->desc_ring = NULL;
110 	ring->ring_mem.pg_arr = NULL;
111 	kfree(nqr->desc_mapping);
112 	nqr->desc_mapping = NULL;
113 	ring->ring_mem.dma_arr = NULL;
114 }
115 
116 static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
117 {
118 	struct bnge_ring_struct *ring = &cpr->ring_struct;
119 
120 	kfree(cpr->desc_ring);
121 	cpr->desc_ring = NULL;
122 	ring->ring_mem.pg_arr = NULL;
123 	kfree(cpr->desc_mapping);
124 	cpr->desc_mapping = NULL;
125 	ring->ring_mem.dma_arr = NULL;
126 }
127 
128 static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
129 {
130 	nqr->desc_ring = kzalloc_objs(*nqr->desc_ring, n);
131 	if (!nqr->desc_ring)
132 		return -ENOMEM;
133 
134 	nqr->desc_mapping = kzalloc_objs(*nqr->desc_mapping, n);
135 	if (!nqr->desc_mapping)
136 		goto err_free_desc_ring;
137 	return 0;
138 
139 err_free_desc_ring:
140 	kfree(nqr->desc_ring);
141 	nqr->desc_ring = NULL;
142 	return -ENOMEM;
143 }
144 
145 static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
146 {
147 	cpr->desc_ring = kzalloc_objs(*cpr->desc_ring, n);
148 	if (!cpr->desc_ring)
149 		return -ENOMEM;
150 
151 	cpr->desc_mapping = kzalloc_objs(*cpr->desc_mapping, n);
152 	if (!cpr->desc_mapping)
153 		goto err_free_desc_ring;
154 	return 0;
155 
156 err_free_desc_ring:
157 	kfree(cpr->desc_ring);
158 	cpr->desc_ring = NULL;
159 	return -ENOMEM;
160 }
161 
162 static void bnge_free_nq_arrays(struct bnge_net *bn)
163 {
164 	struct bnge_dev *bd = bn->bd;
165 	int i;
166 
167 	for (i = 0; i < bd->nq_nr_rings; i++) {
168 		struct bnge_napi *bnapi = bn->bnapi[i];
169 
170 		bnge_free_nq_desc_arr(&bnapi->nq_ring);
171 	}
172 }
173 
174 static int bnge_alloc_nq_arrays(struct bnge_net *bn)
175 {
176 	struct bnge_dev *bd = bn->bd;
177 	int i, rc;
178 
179 	for (i = 0; i < bd->nq_nr_rings; i++) {
180 		struct bnge_napi *bnapi = bn->bnapi[i];
181 
182 		rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
183 		if (rc)
184 			goto err_free_nq_arrays;
185 	}
186 	return 0;
187 
188 err_free_nq_arrays:
189 	bnge_free_nq_arrays(bn);
190 	return rc;
191 }
192 
193 static void bnge_free_nq_tree(struct bnge_net *bn)
194 {
195 	struct bnge_dev *bd = bn->bd;
196 	int i;
197 
198 	for (i = 0; i < bd->nq_nr_rings; i++) {
199 		struct bnge_napi *bnapi = bn->bnapi[i];
200 		struct bnge_nq_ring_info *nqr;
201 		struct bnge_ring_struct *ring;
202 		int j;
203 
204 		nqr = &bnapi->nq_ring;
205 		ring = &nqr->ring_struct;
206 
207 		bnge_free_ring(bd, &ring->ring_mem);
208 
209 		if (!nqr->cp_ring_arr)
210 			continue;
211 
212 		for (j = 0; j < nqr->cp_ring_count; j++) {
213 			struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
214 
215 			ring = &cpr->ring_struct;
216 			bnge_free_ring(bd, &ring->ring_mem);
217 			bnge_free_cp_desc_arr(cpr);
218 		}
219 		kfree(nqr->cp_ring_arr);
220 		nqr->cp_ring_arr = NULL;
221 		nqr->cp_ring_count = 0;
222 	}
223 }
224 
225 static int alloc_one_cp_ring(struct bnge_net *bn,
226 			     struct bnge_cp_ring_info *cpr)
227 {
228 	struct bnge_ring_mem_info *rmem;
229 	struct bnge_ring_struct *ring;
230 	struct bnge_dev *bd = bn->bd;
231 	int rc;
232 
233 	rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
234 	if (rc)
235 		return -ENOMEM;
236 	ring = &cpr->ring_struct;
237 	rmem = &ring->ring_mem;
238 	rmem->nr_pages = bn->cp_nr_pages;
239 	rmem->page_size = HW_CMPD_RING_SIZE;
240 	rmem->pg_arr = (void **)cpr->desc_ring;
241 	rmem->dma_arr = cpr->desc_mapping;
242 	rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
243 	rc = bnge_alloc_ring(bd, rmem);
244 	if (rc)
245 		goto err_free_cp_desc_arr;
246 	return rc;
247 
248 err_free_cp_desc_arr:
249 	bnge_free_cp_desc_arr(cpr);
250 	return rc;
251 }
252 
253 static int bnge_alloc_nq_tree(struct bnge_net *bn)
254 {
255 	struct bnge_dev *bd = bn->bd;
256 	int i, j, ulp_msix, rc;
257 	int tcs = 1;
258 
259 	ulp_msix = bnge_aux_get_msix(bd);
260 	for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
261 		bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
262 		struct bnge_napi *bnapi = bn->bnapi[i];
263 		struct bnge_nq_ring_info *nqr;
264 		struct bnge_cp_ring_info *cpr;
265 		struct bnge_ring_struct *ring;
266 		int cp_count = 0, k;
267 		int rx = 0, tx = 0;
268 
269 		nqr = &bnapi->nq_ring;
270 		nqr->bnapi = bnapi;
271 		ring = &nqr->ring_struct;
272 
273 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
274 		if (rc)
275 			goto err_free_nq_tree;
276 
277 		ring->map_idx = ulp_msix + i;
278 
279 		if (i < bd->rx_nr_rings) {
280 			cp_count++;
281 			rx = 1;
282 		}
283 
284 		if ((sh && i < bd->tx_nr_rings) ||
285 		    (!sh && i >= bd->rx_nr_rings)) {
286 			cp_count += tcs;
287 			tx = 1;
288 		}
289 
290 		nqr->cp_ring_arr = kzalloc_objs(*cpr, cp_count);
291 		if (!nqr->cp_ring_arr) {
292 			rc = -ENOMEM;
293 			goto err_free_nq_tree;
294 		}
295 
296 		nqr->cp_ring_count = cp_count;
297 
298 		for (k = 0; k < cp_count; k++) {
299 			cpr = &nqr->cp_ring_arr[k];
300 			rc = alloc_one_cp_ring(bn, cpr);
301 			if (rc)
302 				goto err_free_nq_tree;
303 
304 			cpr->bnapi = bnapi;
305 			cpr->cp_idx = k;
306 			if (!k && rx) {
307 				bn->rx_ring[i].rx_cpr = cpr;
308 				cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
309 			} else {
310 				int n, tc = k - rx;
311 
312 				n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
313 				bn->tx_ring[n].tx_cpr = cpr;
314 				cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
315 			}
316 		}
317 		if (tx)
318 			j++;
319 	}
320 	return 0;
321 
322 err_free_nq_tree:
323 	bnge_free_nq_tree(bn);
324 	return rc;
325 }
326 
327 static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
328 {
329 	return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
330 }
331 
332 static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn,
333 				       struct bnge_rx_ring_info *rxr)
334 {
335 	int i, max_idx;
336 
337 	if (!rxr->rx_buf_ring)
338 		return;
339 
340 	max_idx = bn->rx_nr_pages * RX_DESC_CNT;
341 
342 	for (i = 0; i < max_idx; i++) {
343 		struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
344 		void *data = rx_buf->data;
345 
346 		if (!data)
347 			continue;
348 
349 		rx_buf->data = NULL;
350 		page_pool_free_va(rxr->head_pool, data, true);
351 	}
352 }
353 
354 static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
355 					struct bnge_rx_ring_info *rxr)
356 {
357 	int i, max_idx;
358 
359 	if (!rxr->rx_agg_buf_ring)
360 		return;
361 
362 	max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT;
363 
364 	for (i = 0; i < max_idx; i++) {
365 		struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i];
366 		netmem_ref netmem = rx_agg_buf->netmem;
367 
368 		if (!netmem)
369 			continue;
370 
371 		rx_agg_buf->netmem = 0;
372 		__clear_bit(i, rxr->rx_agg_bmap);
373 
374 		page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
375 	}
376 }
377 
378 static void bnge_free_one_tpa_info_data(struct bnge_net *bn,
379 					struct bnge_rx_ring_info *rxr)
380 {
381 	int i;
382 
383 	for (i = 0; i < bn->max_tpa; i++) {
384 		struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[i];
385 		u8 *data = tpa_info->data;
386 
387 		if (!data)
388 			continue;
389 
390 		tpa_info->data = NULL;
391 		page_pool_free_va(rxr->head_pool, data, false);
392 	}
393 }
394 
395 static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
396 					    struct bnge_rx_ring_info *rxr)
397 {
398 	struct bnge_tpa_idx_map *map;
399 
400 	if (rxr->rx_tpa)
401 		bnge_free_one_tpa_info_data(bn, rxr);
402 
403 	bnge_free_one_rx_ring_bufs(bn, rxr);
404 	bnge_free_one_agg_ring_bufs(bn, rxr);
405 
406 	map = rxr->rx_tpa_idx_map;
407 	if (map)
408 		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
409 }
410 
411 static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
412 {
413 	struct bnge_dev *bd = bn->bd;
414 	int i;
415 
416 	if (!bn->rx_ring)
417 		return;
418 
419 	for (i = 0; i < bd->rx_nr_rings; i++)
420 		bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
421 }
422 
423 static void bnge_free_tx_skbs(struct bnge_net *bn)
424 {
425 	struct bnge_dev *bd = bn->bd;
426 	u16 max_idx;
427 	int i;
428 
429 	max_idx = bn->tx_nr_pages * TX_DESC_CNT;
430 	for (i = 0; i < bd->tx_nr_rings; i++) {
431 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
432 		int j;
433 
434 		if (!txr->tx_buf_ring)
435 			continue;
436 
437 		for (j = 0; j < max_idx;) {
438 			struct bnge_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
439 			struct sk_buff *skb;
440 			int k, last;
441 
442 			skb = tx_buf->skb;
443 			if (!skb) {
444 				j++;
445 				continue;
446 			}
447 
448 			tx_buf->skb = NULL;
449 
450 			dma_unmap_single(bd->dev,
451 					 dma_unmap_addr(tx_buf, mapping),
452 					 skb_headlen(skb),
453 					 DMA_TO_DEVICE);
454 
455 			last = tx_buf->nr_frags;
456 			j += 2;
457 			for (k = 0; k < last; k++, j++) {
458 				int ring_idx = j & bn->tx_ring_mask;
459 				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
460 
461 				tx_buf = &txr->tx_buf_ring[ring_idx];
462 				dma_unmap_page(bd->dev,
463 					       dma_unmap_addr(tx_buf, mapping),
464 					       skb_frag_size(frag),
465 					       DMA_TO_DEVICE);
466 			}
467 			dev_kfree_skb(skb);
468 		}
469 		netdev_tx_reset_queue(netdev_get_tx_queue(bd->netdev, i));
470 	}
471 }
472 
473 static void bnge_free_all_rings_bufs(struct bnge_net *bn)
474 {
475 	bnge_free_rx_ring_pair_bufs(bn);
476 	bnge_free_tx_skbs(bn);
477 }
478 
479 static void bnge_free_tpa_info(struct bnge_net *bn)
480 {
481 	struct bnge_dev *bd = bn->bd;
482 	int i, j;
483 
484 	for (i = 0; i < bd->rx_nr_rings; i++) {
485 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
486 
487 		kfree(rxr->rx_tpa_idx_map);
488 		rxr->rx_tpa_idx_map = NULL;
489 		if (rxr->rx_tpa) {
490 			for (j = 0; j < bn->max_tpa; j++) {
491 				kfree(rxr->rx_tpa[j].agg_arr);
492 				rxr->rx_tpa[j].agg_arr = NULL;
493 			}
494 		}
495 		kfree(rxr->rx_tpa);
496 		rxr->rx_tpa = NULL;
497 	}
498 }
499 
500 static int bnge_alloc_tpa_info(struct bnge_net *bn)
501 {
502 	struct bnge_dev *bd = bn->bd;
503 	int i, j;
504 
505 	if (!bd->max_tpa_v2)
506 		return 0;
507 
508 	bn->max_tpa = max_t(u16, bd->max_tpa_v2, MAX_TPA);
509 	for (i = 0; i < bd->rx_nr_rings; i++) {
510 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
511 
512 		rxr->rx_tpa = kzalloc_objs(struct bnge_tpa_info, bn->max_tpa,
513 					   GFP_KERNEL);
514 		if (!rxr->rx_tpa)
515 			goto err_free_tpa_info;
516 
517 		for (j = 0; j < bn->max_tpa; j++) {
518 			struct rx_agg_cmp *agg;
519 
520 			agg = kzalloc_objs(*agg, MAX_SKB_FRAGS);
521 			if (!agg)
522 				goto err_free_tpa_info;
523 			rxr->rx_tpa[j].agg_arr = agg;
524 		}
525 		rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map,
526 						  GFP_KERNEL);
527 		if (!rxr->rx_tpa_idx_map)
528 			goto err_free_tpa_info;
529 	}
530 	return 0;
531 
532 err_free_tpa_info:
533 	bnge_free_tpa_info(bn);
534 	return -ENOMEM;
535 }
536 
537 static void bnge_free_rx_rings(struct bnge_net *bn)
538 {
539 	struct bnge_dev *bd = bn->bd;
540 	int i;
541 
542 	bnge_free_tpa_info(bn);
543 	for (i = 0; i < bd->rx_nr_rings; i++) {
544 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
545 		struct bnge_ring_struct *ring;
546 
547 		page_pool_destroy(rxr->page_pool);
548 		page_pool_destroy(rxr->head_pool);
549 		rxr->page_pool = rxr->head_pool = NULL;
550 
551 		kfree(rxr->rx_agg_bmap);
552 		rxr->rx_agg_bmap = NULL;
553 
554 		ring = &rxr->rx_ring_struct;
555 		bnge_free_ring(bd, &ring->ring_mem);
556 
557 		ring = &rxr->rx_agg_ring_struct;
558 		bnge_free_ring(bd, &ring->ring_mem);
559 	}
560 }
561 
562 static int bnge_alloc_rx_page_pool(struct bnge_net *bn,
563 				   struct bnge_rx_ring_info *rxr,
564 				   int numa_node)
565 {
566 	const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE;
567 	const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
568 	struct page_pool_params pp = { 0 };
569 	struct bnge_dev *bd = bn->bd;
570 	struct page_pool *pool;
571 
572 	pp.pool_size = bn->rx_agg_ring_size / agg_size_fac;
573 	pp.nid = numa_node;
574 	pp.netdev = bn->netdev;
575 	pp.dev = bd->dev;
576 	pp.dma_dir = bn->rx_dir;
577 	pp.max_len = PAGE_SIZE;
578 	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
579 		   PP_FLAG_ALLOW_UNREADABLE_NETMEM;
580 	pp.queue_idx = rxr->bnapi->index;
581 
582 	pool = page_pool_create(&pp);
583 	if (IS_ERR(pool))
584 		return PTR_ERR(pool);
585 	rxr->page_pool = pool;
586 
587 	rxr->need_head_pool = page_pool_is_unreadable(pool);
588 	if (bnge_separate_head_pool(rxr)) {
589 		pp.pool_size = min(bn->rx_ring_size / rx_size_fac, 1024);
590 		pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
591 		pool = page_pool_create(&pp);
592 		if (IS_ERR(pool))
593 			goto err_destroy_pp;
594 	} else {
595 		page_pool_get(pool);
596 	}
597 	rxr->head_pool = pool;
598 	return 0;
599 
600 err_destroy_pp:
601 	page_pool_destroy(rxr->page_pool);
602 	rxr->page_pool = NULL;
603 	return PTR_ERR(pool);
604 }
605 
606 static void bnge_enable_rx_page_pool(struct bnge_rx_ring_info *rxr)
607 {
608 	page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
609 	page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
610 }
611 
612 static int bnge_alloc_rx_agg_bmap(struct bnge_net *bn,
613 				  struct bnge_rx_ring_info *rxr)
614 {
615 	u16 mem_size;
616 
617 	rxr->rx_agg_bmap_size = bn->rx_agg_ring_mask + 1;
618 	mem_size = rxr->rx_agg_bmap_size / 8;
619 	rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
620 	if (!rxr->rx_agg_bmap)
621 		return -ENOMEM;
622 
623 	return 0;
624 }
625 
626 static int bnge_alloc_rx_rings(struct bnge_net *bn)
627 {
628 	int i, rc = 0, agg_rings = 0, cpu;
629 	struct bnge_dev *bd = bn->bd;
630 
631 	if (bnge_is_agg_reqd(bd))
632 		agg_rings = 1;
633 
634 	for (i = 0; i < bd->rx_nr_rings; i++) {
635 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
636 		struct bnge_ring_struct *ring;
637 		int cpu_node;
638 
639 		ring = &rxr->rx_ring_struct;
640 
641 		cpu = cpumask_local_spread(i, dev_to_node(bd->dev));
642 		cpu_node = cpu_to_node(cpu);
643 		netdev_dbg(bn->netdev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
644 			   i, cpu_node);
645 		rc = bnge_alloc_rx_page_pool(bn, rxr, cpu_node);
646 		if (rc)
647 			goto err_free_rx_rings;
648 		bnge_enable_rx_page_pool(rxr);
649 
650 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
651 		if (rc)
652 			goto err_free_rx_rings;
653 
654 		ring->grp_idx = i;
655 		if (agg_rings) {
656 			ring = &rxr->rx_agg_ring_struct;
657 			rc = bnge_alloc_ring(bd, &ring->ring_mem);
658 			if (rc)
659 				goto err_free_rx_rings;
660 
661 			ring->grp_idx = i;
662 			rc = bnge_alloc_rx_agg_bmap(bn, rxr);
663 			if (rc)
664 				goto err_free_rx_rings;
665 		}
666 	}
667 
668 	if (bn->priv_flags & BNGE_NET_EN_TPA) {
669 		rc = bnge_alloc_tpa_info(bn);
670 		if (rc)
671 			goto err_free_rx_rings;
672 	}
673 	return rc;
674 
675 err_free_rx_rings:
676 	bnge_free_rx_rings(bn);
677 	return rc;
678 }
679 
680 static void bnge_free_tx_rings(struct bnge_net *bn)
681 {
682 	struct bnge_dev *bd = bn->bd;
683 	int i;
684 
685 	for (i = 0; i < bd->tx_nr_rings; i++) {
686 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
687 		struct bnge_ring_struct *ring;
688 
689 		ring = &txr->tx_ring_struct;
690 
691 		bnge_free_ring(bd, &ring->ring_mem);
692 	}
693 }
694 
695 static int bnge_alloc_tx_rings(struct bnge_net *bn)
696 {
697 	struct bnge_dev *bd = bn->bd;
698 	int i, j, rc;
699 
700 	for (i = 0, j = 0; i < bd->tx_nr_rings; i++) {
701 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
702 		struct bnge_ring_struct *ring;
703 		u8 qidx;
704 
705 		ring = &txr->tx_ring_struct;
706 
707 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
708 		if (rc)
709 			goto err_free_tx_rings;
710 
711 		ring->grp_idx = txr->bnapi->index;
712 		qidx = bd->tc_to_qidx[j];
713 		ring->queue_id = bd->q_info[qidx].queue_id;
714 		if (BNGE_RING_TO_TC_OFF(bd, i) == (bd->tx_nr_rings_per_tc - 1))
715 			j++;
716 	}
717 	return 0;
718 
719 err_free_tx_rings:
720 	bnge_free_tx_rings(bn);
721 	return rc;
722 }
723 
724 static void bnge_free_vnic_attributes(struct bnge_net *bn)
725 {
726 	struct pci_dev *pdev = bn->bd->pdev;
727 	struct bnge_vnic_info *vnic;
728 	int i;
729 
730 	if (!bn->vnic_info)
731 		return;
732 
733 	for (i = 0; i < bn->nr_vnics; i++) {
734 		vnic = &bn->vnic_info[i];
735 
736 		kfree(vnic->uc_list);
737 		vnic->uc_list = NULL;
738 
739 		if (vnic->mc_list) {
740 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
741 					  vnic->mc_list, vnic->mc_list_mapping);
742 			vnic->mc_list = NULL;
743 		}
744 
745 		if (vnic->rss_table) {
746 			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
747 					  vnic->rss_table,
748 					  vnic->rss_table_dma_addr);
749 			vnic->rss_table = NULL;
750 		}
751 
752 		vnic->rss_hash_key = NULL;
753 		vnic->flags = 0;
754 	}
755 }
756 
757 static int bnge_alloc_vnic_attributes(struct bnge_net *bn)
758 {
759 	struct bnge_dev *bd = bn->bd;
760 	struct bnge_vnic_info *vnic;
761 	int i, size;
762 
763 	for (i = 0; i < bn->nr_vnics; i++) {
764 		vnic = &bn->vnic_info[i];
765 
766 		if (vnic->flags & BNGE_VNIC_UCAST_FLAG) {
767 			int mem_size = (BNGE_MAX_UC_ADDRS - 1) * ETH_ALEN;
768 
769 			vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
770 			if (!vnic->uc_list)
771 				goto err_free_vnic_attributes;
772 		}
773 
774 		if (vnic->flags & BNGE_VNIC_MCAST_FLAG) {
775 			vnic->mc_list_size = BNGE_MAX_MC_ADDRS * ETH_ALEN;
776 			vnic->mc_list =
777 				dma_alloc_coherent(bd->dev,
778 						   vnic->mc_list_size,
779 						   &vnic->mc_list_mapping,
780 						   GFP_KERNEL);
781 			if (!vnic->mc_list)
782 				goto err_free_vnic_attributes;
783 		}
784 
785 		/* Allocate rss table and hash key */
786 		size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE);
787 
788 		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
789 		vnic->rss_table = dma_alloc_coherent(bd->dev,
790 						     vnic->rss_table_size,
791 						     &vnic->rss_table_dma_addr,
792 						     GFP_KERNEL);
793 		if (!vnic->rss_table)
794 			goto err_free_vnic_attributes;
795 
796 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
797 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
798 	}
799 	return 0;
800 
801 err_free_vnic_attributes:
802 	bnge_free_vnic_attributes(bn);
803 	return -ENOMEM;
804 }
805 
806 static int bnge_alloc_vnics(struct bnge_net *bn)
807 {
808 	int num_vnics;
809 
810 	/* Allocate only 1 VNIC for now
811 	 * Additional VNICs will be added based on RFS/NTUPLE in future patches
812 	 */
813 	num_vnics = 1;
814 
815 	bn->vnic_info = kzalloc_objs(struct bnge_vnic_info, num_vnics,
816 				     GFP_KERNEL);
817 	if (!bn->vnic_info)
818 		return -ENOMEM;
819 
820 	bn->nr_vnics = num_vnics;
821 
822 	return 0;
823 }
824 
825 static void bnge_free_vnics(struct bnge_net *bn)
826 {
827 	kfree(bn->vnic_info);
828 	bn->vnic_info = NULL;
829 	bn->nr_vnics = 0;
830 }
831 
832 static void bnge_free_ring_grps(struct bnge_net *bn)
833 {
834 	kfree(bn->grp_info);
835 	bn->grp_info = NULL;
836 }
837 
838 static int bnge_init_ring_grps(struct bnge_net *bn)
839 {
840 	struct bnge_dev *bd = bn->bd;
841 	int i;
842 
843 	bn->grp_info = kzalloc_objs(struct bnge_ring_grp_info, bd->nq_nr_rings,
844 				    GFP_KERNEL);
845 	if (!bn->grp_info)
846 		return -ENOMEM;
847 	for (i = 0; i < bd->nq_nr_rings; i++) {
848 		bn->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
849 		bn->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
850 		bn->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
851 		bn->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
852 		bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
853 	}
854 
855 	return 0;
856 }
857 
858 static void bnge_free_core(struct bnge_net *bn)
859 {
860 	bnge_free_vnic_attributes(bn);
861 	bnge_free_tx_rings(bn);
862 	bnge_free_rx_rings(bn);
863 	bnge_free_nq_tree(bn);
864 	bnge_free_nq_arrays(bn);
865 	bnge_free_ring_stats(bn);
866 	bnge_free_ring_grps(bn);
867 	bnge_free_vnics(bn);
868 	kfree(bn->tx_ring_map);
869 	bn->tx_ring_map = NULL;
870 	kfree(bn->tx_ring);
871 	bn->tx_ring = NULL;
872 	kfree(bn->rx_ring);
873 	bn->rx_ring = NULL;
874 	kfree(bn->bnapi);
875 	bn->bnapi = NULL;
876 }
877 
878 static int bnge_alloc_core(struct bnge_net *bn)
879 {
880 	struct bnge_dev *bd = bn->bd;
881 	int i, j, size, arr_size;
882 	int rc = -ENOMEM;
883 	void *bnapi;
884 
885 	arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) *
886 			bd->nq_nr_rings);
887 	size = L1_CACHE_ALIGN(sizeof(struct bnge_napi));
888 	bnapi = kzalloc(arr_size + size * bd->nq_nr_rings, GFP_KERNEL);
889 	if (!bnapi)
890 		return rc;
891 
892 	bn->bnapi = bnapi;
893 	bnapi += arr_size;
894 	for (i = 0; i < bd->nq_nr_rings; i++, bnapi += size) {
895 		struct bnge_nq_ring_info *nqr;
896 
897 		bn->bnapi[i] = bnapi;
898 		bn->bnapi[i]->index = i;
899 		bn->bnapi[i]->bn = bn;
900 		nqr = &bn->bnapi[i]->nq_ring;
901 		nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
902 	}
903 
904 	bn->rx_ring = kzalloc_objs(struct bnge_rx_ring_info, bd->rx_nr_rings,
905 				   GFP_KERNEL);
906 	if (!bn->rx_ring)
907 		goto err_free_core;
908 
909 	for (i = 0; i < bd->rx_nr_rings; i++) {
910 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
911 
912 		rxr->rx_ring_struct.ring_mem.flags =
913 			BNGE_RMEM_RING_PTE_FLAG;
914 		rxr->rx_agg_ring_struct.ring_mem.flags =
915 			BNGE_RMEM_RING_PTE_FLAG;
916 		rxr->bnapi = bn->bnapi[i];
917 		bn->bnapi[i]->rx_ring = &bn->rx_ring[i];
918 	}
919 
920 	bn->tx_ring = kzalloc_objs(struct bnge_tx_ring_info, bd->tx_nr_rings,
921 				   GFP_KERNEL);
922 	if (!bn->tx_ring)
923 		goto err_free_core;
924 
925 	bn->tx_ring_map = kcalloc(bd->tx_nr_rings, sizeof(u16),
926 				  GFP_KERNEL);
927 	if (!bn->tx_ring_map)
928 		goto err_free_core;
929 
930 	if (bd->flags & BNGE_EN_SHARED_CHNL)
931 		j = 0;
932 	else
933 		j = bd->rx_nr_rings;
934 
935 	for (i = 0; i < bd->tx_nr_rings; i++) {
936 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
937 		struct bnge_napi *bnapi2;
938 		int k;
939 
940 		txr->tx_ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
941 		bn->tx_ring_map[i] = i;
942 		k = j + BNGE_RING_TO_TC_OFF(bd, i);
943 
944 		bnapi2 = bn->bnapi[k];
945 		txr->txq_index = i;
946 		txr->tx_napi_idx =
947 			BNGE_RING_TO_TC(bd, txr->txq_index);
948 		bnapi2->tx_ring[txr->tx_napi_idx] = txr;
949 		txr->bnapi = bnapi2;
950 	}
951 
952 	rc = bnge_alloc_ring_stats(bn);
953 	if (rc)
954 		goto err_free_core;
955 
956 	rc = bnge_alloc_vnics(bn);
957 	if (rc)
958 		goto err_free_core;
959 
960 	rc = bnge_alloc_nq_arrays(bn);
961 	if (rc)
962 		goto err_free_core;
963 
964 	bnge_init_ring_struct(bn);
965 
966 	rc = bnge_alloc_rx_rings(bn);
967 	if (rc)
968 		goto err_free_core;
969 
970 	rc = bnge_alloc_tx_rings(bn);
971 	if (rc)
972 		goto err_free_core;
973 
974 	rc = bnge_alloc_nq_tree(bn);
975 	if (rc)
976 		goto err_free_core;
977 
978 	bn->vnic_info[BNGE_VNIC_DEFAULT].flags |= BNGE_VNIC_RSS_FLAG |
979 						  BNGE_VNIC_MCAST_FLAG |
980 						  BNGE_VNIC_UCAST_FLAG;
981 	rc = bnge_alloc_vnic_attributes(bn);
982 	if (rc)
983 		goto err_free_core;
984 	return 0;
985 
986 err_free_core:
987 	bnge_free_core(bn);
988 	return rc;
989 }
990 
991 u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr)
992 {
993 	return rxr->rx_cpr->ring_struct.fw_ring_id;
994 }
995 
996 u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
997 {
998 	return txr->tx_cpr->ring_struct.fw_ring_id;
999 }
1000 
1001 static void bnge_db_nq_arm(struct bnge_net *bn,
1002 			   struct bnge_db_info *db, u32 idx)
1003 {
1004 	bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_ARM |
1005 		    DB_RING_IDX(db, idx), db->doorbell);
1006 }
1007 
1008 static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
1009 {
1010 	bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
1011 		    DB_RING_IDX(db, idx), db->doorbell);
1012 }
1013 
1014 static void bnge_db_cq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
1015 {
1016 	bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_CQ_ARMALL |
1017 		    DB_RING_IDX(db, idx), db->doorbell);
1018 }
1019 
1020 static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
1021 {
1022 	struct bnge_napi *bnapi = bn->bnapi[n];
1023 	struct bnge_nq_ring_info *nqr;
1024 
1025 	nqr = &bnapi->nq_ring;
1026 
1027 	return nqr->ring_struct.map_idx;
1028 }
1029 
1030 static void bnge_init_nq_tree(struct bnge_net *bn)
1031 {
1032 	struct bnge_dev *bd = bn->bd;
1033 	int i, j;
1034 
1035 	for (i = 0; i < bd->nq_nr_rings; i++) {
1036 		struct bnge_nq_ring_info *nqr = &bn->bnapi[i]->nq_ring;
1037 		struct bnge_ring_struct *ring = &nqr->ring_struct;
1038 
1039 		ring->fw_ring_id = INVALID_HW_RING_ID;
1040 		for (j = 0; j < nqr->cp_ring_count; j++) {
1041 			struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
1042 
1043 			ring = &cpr->ring_struct;
1044 			ring->fw_ring_id = INVALID_HW_RING_ID;
1045 		}
1046 	}
1047 }
1048 
1049 static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
1050 					 dma_addr_t *mapping,
1051 					 struct bnge_rx_ring_info *rxr,
1052 					 unsigned int *offset,
1053 					 gfp_t gfp)
1054 {
1055 	netmem_ref netmem;
1056 
1057 	if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
1058 		netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
1059 						     BNGE_RX_PAGE_SIZE, gfp);
1060 	} else {
1061 		netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
1062 		*offset = 0;
1063 	}
1064 	if (!netmem)
1065 		return 0;
1066 
1067 	*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
1068 	return netmem;
1069 }
1070 
1071 u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
1072 			 struct bnge_rx_ring_info *rxr,
1073 			 gfp_t gfp)
1074 {
1075 	unsigned int offset;
1076 	struct page *page;
1077 
1078 	page = page_pool_alloc_frag(rxr->head_pool, &offset,
1079 				    bn->rx_buf_size, gfp);
1080 	if (!page)
1081 		return NULL;
1082 
1083 	*mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
1084 	return page_address(page) + offset;
1085 }
1086 
1087 int bnge_alloc_rx_data(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
1088 		       u16 prod, gfp_t gfp)
1089 {
1090 	struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
1091 	struct rx_bd *rxbd;
1092 	dma_addr_t mapping;
1093 	u8 *data;
1094 
1095 	rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
1096 	data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
1097 	if (!data)
1098 		return -ENOMEM;
1099 
1100 	rx_buf->data = data;
1101 	rx_buf->data_ptr = data + bn->rx_offset;
1102 	rx_buf->mapping = mapping;
1103 
1104 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1105 
1106 	return 0;
1107 }
1108 
1109 static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
1110 				       struct bnge_rx_ring_info *rxr,
1111 				       int ring_nr)
1112 {
1113 	u32 prod = rxr->rx_prod;
1114 	int i, rc = 0;
1115 
1116 	for (i = 0; i < bn->rx_ring_size; i++) {
1117 		rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL);
1118 		if (rc)
1119 			break;
1120 		prod = NEXT_RX(prod);
1121 	}
1122 
1123 	/* Abort if not a single buffer can be allocated */
1124 	if (rc && !i) {
1125 		netdev_err(bn->netdev,
1126 			   "RX ring %d: allocated %d/%d buffers, abort\n",
1127 			   ring_nr, i, bn->rx_ring_size);
1128 		return rc;
1129 	}
1130 
1131 	rxr->rx_prod = prod;
1132 
1133 	if (i < bn->rx_ring_size)
1134 		netdev_warn(bn->netdev,
1135 			    "RX ring %d: allocated %d/%d buffers, continuing\n",
1136 			    ring_nr, i, bn->rx_ring_size);
1137 	return 0;
1138 }
1139 
1140 u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
1141 {
1142 	u16 next, max = rxr->rx_agg_bmap_size;
1143 
1144 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1145 	if (next >= max)
1146 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1147 	return next;
1148 }
1149 
1150 int bnge_alloc_rx_netmem(struct bnge_net *bn,
1151 			 struct bnge_rx_ring_info *rxr,
1152 			 u16 prod, gfp_t gfp)
1153 {
1154 	struct bnge_sw_rx_agg_bd *rx_agg_buf;
1155 	u16 sw_prod = rxr->rx_sw_agg_prod;
1156 	unsigned int offset = 0;
1157 	struct rx_bd *rxbd;
1158 	dma_addr_t mapping;
1159 	netmem_ref netmem;
1160 
1161 	rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)];
1162 	netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
1163 	if (!netmem)
1164 		return -ENOMEM;
1165 
1166 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1167 		sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
1168 
1169 	__set_bit(sw_prod, rxr->rx_agg_bmap);
1170 	rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod];
1171 	rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
1172 
1173 	rx_agg_buf->netmem = netmem;
1174 	rx_agg_buf->offset = offset;
1175 	rx_agg_buf->mapping = mapping;
1176 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1177 	rxbd->rx_bd_opaque = sw_prod;
1178 	return 0;
1179 }
1180 
1181 static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn,
1182 					struct bnge_rx_ring_info *rxr,
1183 					int ring_nr)
1184 {
1185 	u32 prod = rxr->rx_agg_prod;
1186 	int i, rc = 0;
1187 
1188 	for (i = 0; i < bn->rx_agg_ring_size; i++) {
1189 		rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL);
1190 		if (rc)
1191 			break;
1192 		prod = NEXT_RX_AGG(prod);
1193 	}
1194 
1195 	if (rc && i < MAX_SKB_FRAGS) {
1196 		netdev_err(bn->netdev,
1197 			   "Agg ring %d: allocated %d/%d buffers (min %d), abort\n",
1198 			   ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS);
1199 		goto err_free_one_agg_ring_bufs;
1200 	}
1201 
1202 	rxr->rx_agg_prod = prod;
1203 
1204 	if (i < bn->rx_agg_ring_size)
1205 		netdev_warn(bn->netdev,
1206 			    "Agg ring %d: allocated %d/%d buffers, continuing\n",
1207 			    ring_nr, i, bn->rx_agg_ring_size);
1208 	return 0;
1209 
1210 err_free_one_agg_ring_bufs:
1211 	bnge_free_one_agg_ring_bufs(bn, rxr);
1212 	return -ENOMEM;
1213 }
1214 
1215 static int bnge_alloc_one_tpa_info_data(struct bnge_net *bn,
1216 					struct bnge_rx_ring_info *rxr)
1217 {
1218 	dma_addr_t mapping;
1219 	u8 *data;
1220 	int i;
1221 
1222 	for (i = 0; i < bn->max_tpa; i++) {
1223 		data = __bnge_alloc_rx_frag(bn, &mapping, rxr,
1224 					    GFP_KERNEL);
1225 		if (!data)
1226 			goto err_free_tpa_info_data;
1227 
1228 		rxr->rx_tpa[i].data = data;
1229 		rxr->rx_tpa[i].data_ptr = data + bn->rx_offset;
1230 		rxr->rx_tpa[i].mapping = mapping;
1231 	}
1232 	return 0;
1233 
1234 err_free_tpa_info_data:
1235 	bnge_free_one_tpa_info_data(bn, rxr);
1236 	return -ENOMEM;
1237 }
1238 
1239 static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
1240 {
1241 	struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
1242 	int rc;
1243 
1244 	rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr);
1245 	if (rc)
1246 		return rc;
1247 
1248 	if (bnge_is_agg_reqd(bn->bd)) {
1249 		rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr);
1250 		if (rc)
1251 			goto err_free_one_rx_ring_bufs;
1252 	}
1253 
1254 	if (rxr->rx_tpa) {
1255 		rc = bnge_alloc_one_tpa_info_data(bn, rxr);
1256 		if (rc)
1257 			goto err_free_one_agg_ring_bufs;
1258 	}
1259 
1260 	return 0;
1261 
1262 err_free_one_agg_ring_bufs:
1263 	bnge_free_one_agg_ring_bufs(bn, rxr);
1264 err_free_one_rx_ring_bufs:
1265 	bnge_free_one_rx_ring_bufs(bn, rxr);
1266 	return rc;
1267 }
1268 
1269 static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type)
1270 {
1271 	struct rx_bd **rx_desc_ring;
1272 	u32 prod;
1273 	int i;
1274 
1275 	rx_desc_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
1276 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
1277 		struct rx_bd *rxbd = rx_desc_ring[i];
1278 		int j;
1279 
1280 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
1281 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
1282 			rxbd->rx_bd_opaque = prod;
1283 		}
1284 	}
1285 }
1286 
1287 static void bnge_init_one_rx_ring_rxbd(struct bnge_net *bn,
1288 				       struct bnge_rx_ring_info *rxr)
1289 {
1290 	struct bnge_ring_struct *ring;
1291 	u32 type;
1292 
1293 	type = (bn->rx_buf_use_size << RX_BD_LEN_SHIFT) |
1294 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
1295 
1296 	if (NET_IP_ALIGN == 2)
1297 		type |= RX_BD_FLAGS_SOP;
1298 
1299 	ring = &rxr->rx_ring_struct;
1300 	bnge_init_rxbd_pages(ring, type);
1301 	ring->fw_ring_id = INVALID_HW_RING_ID;
1302 }
1303 
1304 static void bnge_init_one_agg_ring_rxbd(struct bnge_net *bn,
1305 					struct bnge_rx_ring_info *rxr)
1306 {
1307 	struct bnge_ring_struct *ring;
1308 	u32 type;
1309 
1310 	ring = &rxr->rx_agg_ring_struct;
1311 	ring->fw_ring_id = INVALID_HW_RING_ID;
1312 	if (bnge_is_agg_reqd(bn->bd)) {
1313 		type = ((u32)BNGE_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
1314 			RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
1315 
1316 		bnge_init_rxbd_pages(ring, type);
1317 	}
1318 }
1319 
1320 static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr)
1321 {
1322 	struct bnge_rx_ring_info *rxr;
1323 
1324 	rxr = &bn->rx_ring[ring_nr];
1325 	bnge_init_one_rx_ring_rxbd(bn, rxr);
1326 
1327 	netif_queue_set_napi(bn->netdev, ring_nr, NETDEV_QUEUE_TYPE_RX,
1328 			     &rxr->bnapi->napi);
1329 
1330 	bnge_init_one_agg_ring_rxbd(bn, rxr);
1331 }
1332 
1333 static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn)
1334 {
1335 	int i, rc;
1336 
1337 	for (i = 0; i < bn->bd->rx_nr_rings; i++) {
1338 		rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i);
1339 		if (rc)
1340 			goto err_free_rx_ring_pair_bufs;
1341 	}
1342 	return 0;
1343 
1344 err_free_rx_ring_pair_bufs:
1345 	bnge_free_rx_ring_pair_bufs(bn);
1346 	return rc;
1347 }
1348 
1349 static void bnge_init_rx_rings(struct bnge_net *bn)
1350 {
1351 	int i;
1352 
1353 #define BNGE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
1354 #define BNGE_RX_DMA_OFFSET NET_SKB_PAD
1355 	bn->rx_offset = BNGE_RX_OFFSET;
1356 	bn->rx_dma_offset = BNGE_RX_DMA_OFFSET;
1357 
1358 	for (i = 0; i < bn->bd->rx_nr_rings; i++)
1359 		bnge_init_one_rx_ring_pair(bn, i);
1360 }
1361 
1362 static void bnge_init_tx_rings(struct bnge_net *bn)
1363 {
1364 	int i;
1365 
1366 	bn->tx_wake_thresh = max(bn->tx_ring_size / 2, BNGE_MIN_TX_DESC_CNT);
1367 
1368 	for (i = 0; i < bn->bd->tx_nr_rings; i++) {
1369 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
1370 		struct bnge_ring_struct *ring = &txr->tx_ring_struct;
1371 
1372 		ring->fw_ring_id = INVALID_HW_RING_ID;
1373 
1374 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX,
1375 				     &txr->bnapi->napi);
1376 	}
1377 }
1378 
1379 static void bnge_init_vnics(struct bnge_net *bn)
1380 {
1381 	struct bnge_vnic_info *vnic0 = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1382 	int i;
1383 
1384 	for (i = 0; i < bn->nr_vnics; i++) {
1385 		struct bnge_vnic_info *vnic = &bn->vnic_info[i];
1386 		int j;
1387 
1388 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
1389 		vnic->vnic_id = i;
1390 		for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++)
1391 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
1392 
1393 		if (bn->vnic_info[i].rss_hash_key) {
1394 			if (i == BNGE_VNIC_DEFAULT) {
1395 				u8 *key = (void *)vnic->rss_hash_key;
1396 				int k;
1397 
1398 				if (!bn->rss_hash_key_valid &&
1399 				    !bn->rss_hash_key_updated) {
1400 					get_random_bytes(bn->rss_hash_key,
1401 							 HW_HASH_KEY_SIZE);
1402 					bn->rss_hash_key_updated = true;
1403 				}
1404 
1405 				memcpy(vnic->rss_hash_key, bn->rss_hash_key,
1406 				       HW_HASH_KEY_SIZE);
1407 
1408 				if (!bn->rss_hash_key_updated)
1409 					continue;
1410 
1411 				bn->rss_hash_key_updated = false;
1412 				bn->rss_hash_key_valid = true;
1413 
1414 				bn->toeplitz_prefix = 0;
1415 				for (k = 0; k < 8; k++) {
1416 					bn->toeplitz_prefix <<= 8;
1417 					bn->toeplitz_prefix |= key[k];
1418 				}
1419 			} else {
1420 				memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
1421 				       HW_HASH_KEY_SIZE);
1422 			}
1423 		}
1424 	}
1425 }
1426 
1427 static void bnge_set_db_mask(struct bnge_net *bn, struct bnge_db_info *db,
1428 			     u32 ring_type)
1429 {
1430 	switch (ring_type) {
1431 	case HWRM_RING_ALLOC_TX:
1432 		db->db_ring_mask = bn->tx_ring_mask;
1433 		break;
1434 	case HWRM_RING_ALLOC_RX:
1435 		db->db_ring_mask = bn->rx_ring_mask;
1436 		break;
1437 	case HWRM_RING_ALLOC_AGG:
1438 		db->db_ring_mask = bn->rx_agg_ring_mask;
1439 		break;
1440 	case HWRM_RING_ALLOC_CMPL:
1441 	case HWRM_RING_ALLOC_NQ:
1442 		db->db_ring_mask = bn->cp_ring_mask;
1443 		break;
1444 	}
1445 	db->db_epoch_mask = db->db_ring_mask + 1;
1446 	db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
1447 }
1448 
1449 static void bnge_set_db(struct bnge_net *bn, struct bnge_db_info *db,
1450 			u32 ring_type, u32 map_idx, u32 xid)
1451 {
1452 	struct bnge_dev *bd = bn->bd;
1453 
1454 	switch (ring_type) {
1455 	case HWRM_RING_ALLOC_TX:
1456 		db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
1457 		break;
1458 	case HWRM_RING_ALLOC_RX:
1459 	case HWRM_RING_ALLOC_AGG:
1460 		db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
1461 		break;
1462 	case HWRM_RING_ALLOC_CMPL:
1463 		db->db_key64 = DBR_PATH_L2;
1464 		break;
1465 	case HWRM_RING_ALLOC_NQ:
1466 		db->db_key64 = DBR_PATH_L2;
1467 		break;
1468 	}
1469 	db->db_key64 |= ((u64)xid << DBR_XID_SFT) | DBR_VALID;
1470 
1471 	db->doorbell = bd->bar1 + bd->db_offset;
1472 	bnge_set_db_mask(bn, db, ring_type);
1473 }
1474 
1475 static int bnge_hwrm_cp_ring_alloc(struct bnge_net *bn,
1476 				   struct bnge_cp_ring_info *cpr)
1477 {
1478 	const u32 type = HWRM_RING_ALLOC_CMPL;
1479 	struct bnge_napi *bnapi = cpr->bnapi;
1480 	struct bnge_ring_struct *ring;
1481 	u32 map_idx = bnapi->index;
1482 	int rc;
1483 
1484 	ring = &cpr->ring_struct;
1485 	ring->handle = BNGE_SET_NQ_HDL(cpr);
1486 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1487 	if (rc)
1488 		return rc;
1489 
1490 	bnge_set_db(bn, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
1491 	bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
1492 
1493 	return 0;
1494 }
1495 
1496 static int bnge_hwrm_tx_ring_alloc(struct bnge_net *bn,
1497 				   struct bnge_tx_ring_info *txr, u32 tx_idx)
1498 {
1499 	struct bnge_ring_struct *ring = &txr->tx_ring_struct;
1500 	const u32 type = HWRM_RING_ALLOC_TX;
1501 	int rc;
1502 
1503 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, tx_idx);
1504 	if (rc)
1505 		return rc;
1506 
1507 	bnge_set_db(bn, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
1508 
1509 	return 0;
1510 }
1511 
1512 static int bnge_hwrm_rx_agg_ring_alloc(struct bnge_net *bn,
1513 				       struct bnge_rx_ring_info *rxr)
1514 {
1515 	struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
1516 	u32 type = HWRM_RING_ALLOC_AGG;
1517 	struct bnge_dev *bd = bn->bd;
1518 	u32 grp_idx = ring->grp_idx;
1519 	u32 map_idx;
1520 	int rc;
1521 
1522 	map_idx = grp_idx + bd->rx_nr_rings;
1523 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1524 	if (rc)
1525 		return rc;
1526 
1527 	bnge_set_db(bn, &rxr->rx_agg_db, type, map_idx,
1528 		    ring->fw_ring_id);
1529 	bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
1530 	bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
1531 	bn->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
1532 
1533 	return 0;
1534 }
1535 
1536 static int bnge_hwrm_rx_ring_alloc(struct bnge_net *bn,
1537 				   struct bnge_rx_ring_info *rxr)
1538 {
1539 	struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
1540 	struct bnge_napi *bnapi = rxr->bnapi;
1541 	u32 type = HWRM_RING_ALLOC_RX;
1542 	u32 map_idx = bnapi->index;
1543 	int rc;
1544 
1545 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1546 	if (rc)
1547 		return rc;
1548 
1549 	bnge_set_db(bn, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
1550 	bn->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
1551 
1552 	return 0;
1553 }
1554 
1555 static int bnge_hwrm_ring_alloc(struct bnge_net *bn)
1556 {
1557 	struct bnge_dev *bd = bn->bd;
1558 	bool agg_rings;
1559 	int i, rc = 0;
1560 
1561 	agg_rings = !!(bnge_is_agg_reqd(bd));
1562 	for (i = 0; i < bd->nq_nr_rings; i++) {
1563 		struct bnge_napi *bnapi = bn->bnapi[i];
1564 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
1565 		struct bnge_ring_struct *ring = &nqr->ring_struct;
1566 		u32 type = HWRM_RING_ALLOC_NQ;
1567 		u32 map_idx = ring->map_idx;
1568 		unsigned int vector;
1569 
1570 		vector = bd->irq_tbl[map_idx].vector;
1571 		disable_irq_nosync(vector);
1572 		rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1573 		if (rc) {
1574 			enable_irq(vector);
1575 			goto err_out;
1576 		}
1577 		bnge_set_db(bn, &nqr->nq_db, type, map_idx, ring->fw_ring_id);
1578 		bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
1579 		enable_irq(vector);
1580 		bn->grp_info[i].nq_fw_ring_id = ring->fw_ring_id;
1581 
1582 		if (!i) {
1583 			rc = bnge_hwrm_set_async_event_cr(bd, ring->fw_ring_id);
1584 			if (rc)
1585 				netdev_warn(bn->netdev, "Failed to set async event completion ring.\n");
1586 		}
1587 	}
1588 
1589 	for (i = 0; i < bd->tx_nr_rings; i++) {
1590 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
1591 
1592 		rc = bnge_hwrm_cp_ring_alloc(bn, txr->tx_cpr);
1593 		if (rc)
1594 			goto err_out;
1595 		rc = bnge_hwrm_tx_ring_alloc(bn, txr, i);
1596 		if (rc)
1597 			goto err_out;
1598 	}
1599 
1600 	for (i = 0; i < bd->rx_nr_rings; i++) {
1601 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
1602 		struct bnge_cp_ring_info *cpr;
1603 		struct bnge_ring_struct *ring;
1604 		struct bnge_napi *bnapi;
1605 		u32 map_idx, type;
1606 
1607 		rc = bnge_hwrm_rx_ring_alloc(bn, rxr);
1608 		if (rc)
1609 			goto err_out;
1610 		/* If we have agg rings, post agg buffers first. */
1611 		if (!agg_rings)
1612 			bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
1613 
1614 		cpr = rxr->rx_cpr;
1615 		bnapi = rxr->bnapi;
1616 		type = HWRM_RING_ALLOC_CMPL;
1617 		map_idx = bnapi->index;
1618 
1619 		ring = &cpr->ring_struct;
1620 		ring->handle = BNGE_SET_NQ_HDL(cpr);
1621 		rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1622 		if (rc)
1623 			goto err_out;
1624 		bnge_set_db(bn, &cpr->cp_db, type, map_idx,
1625 			    ring->fw_ring_id);
1626 		bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
1627 	}
1628 
1629 	if (agg_rings) {
1630 		for (i = 0; i < bd->rx_nr_rings; i++) {
1631 			rc = bnge_hwrm_rx_agg_ring_alloc(bn, &bn->rx_ring[i]);
1632 			if (rc)
1633 				goto err_out;
1634 		}
1635 	}
1636 err_out:
1637 	return rc;
1638 }
1639 
1640 void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic)
1641 {
1642 	__le16 *ring_tbl = vnic->rss_table;
1643 	struct bnge_rx_ring_info *rxr;
1644 	struct bnge_dev *bd = bn->bd;
1645 	u16 tbl_size, i;
1646 
1647 	tbl_size = bnge_get_rxfh_indir_size(bd);
1648 
1649 	for (i = 0; i < tbl_size; i++) {
1650 		u16 ring_id, j;
1651 
1652 		j = bd->rss_indir_tbl[i];
1653 		rxr = &bn->rx_ring[j];
1654 
1655 		ring_id = rxr->rx_ring_struct.fw_ring_id;
1656 		*ring_tbl++ = cpu_to_le16(ring_id);
1657 		ring_id = bnge_cp_ring_for_rx(rxr);
1658 		*ring_tbl++ = cpu_to_le16(ring_id);
1659 	}
1660 }
1661 
1662 static int bnge_hwrm_vnic_rss_cfg(struct bnge_net *bn,
1663 				  struct bnge_vnic_info *vnic)
1664 {
1665 	int rc;
1666 
1667 	rc = bnge_hwrm_vnic_set_rss(bn, vnic, true);
1668 	if (rc) {
1669 		netdev_err(bn->netdev, "hwrm vnic %d set rss failure rc: %d\n",
1670 			   vnic->vnic_id, rc);
1671 		return rc;
1672 	}
1673 	rc = bnge_hwrm_vnic_cfg(bn, vnic);
1674 	if (rc)
1675 		netdev_err(bn->netdev, "hwrm vnic %d cfg failure rc: %d\n",
1676 			   vnic->vnic_id, rc);
1677 	return rc;
1678 }
1679 
1680 static int bnge_setup_vnic(struct bnge_net *bn, struct bnge_vnic_info *vnic)
1681 {
1682 	struct bnge_dev *bd = bn->bd;
1683 	int rc, i, nr_ctxs;
1684 
1685 	nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
1686 	for (i = 0; i < nr_ctxs; i++) {
1687 		rc = bnge_hwrm_vnic_ctx_alloc(bd, vnic, i);
1688 		if (rc) {
1689 			netdev_err(bn->netdev, "hwrm vnic %d ctx %d alloc failure rc: %d\n",
1690 				   vnic->vnic_id, i, rc);
1691 			return -ENOMEM;
1692 		}
1693 		bn->rsscos_nr_ctxs++;
1694 	}
1695 
1696 	rc = bnge_hwrm_vnic_rss_cfg(bn, vnic);
1697 	if (rc)
1698 		return rc;
1699 
1700 	if (bnge_is_agg_reqd(bd)) {
1701 		rc = bnge_hwrm_vnic_set_hds(bn, vnic);
1702 		if (rc)
1703 			netdev_err(bn->netdev, "hwrm vnic %d set hds failure rc: %d\n",
1704 				   vnic->vnic_id, rc);
1705 	}
1706 	return rc;
1707 }
1708 
1709 static void bnge_del_l2_filter(struct bnge_net *bn, struct bnge_l2_filter *fltr)
1710 {
1711 	if (!refcount_dec_and_test(&fltr->refcnt))
1712 		return;
1713 	hlist_del_rcu(&fltr->base.hash);
1714 	kfree_rcu(fltr, base.rcu);
1715 }
1716 
1717 static void bnge_init_l2_filter(struct bnge_net *bn,
1718 				struct bnge_l2_filter *fltr,
1719 				struct bnge_l2_key *key, u32 idx)
1720 {
1721 	struct hlist_head *head;
1722 
1723 	ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
1724 	fltr->l2_key.vlan = key->vlan;
1725 	fltr->base.type = BNGE_FLTR_TYPE_L2;
1726 
1727 	head = &bn->l2_fltr_hash_tbl[idx];
1728 	hlist_add_head_rcu(&fltr->base.hash, head);
1729 	refcount_set(&fltr->refcnt, 1);
1730 }
1731 
1732 static struct bnge_l2_filter *__bnge_lookup_l2_filter(struct bnge_net *bn,
1733 						      struct bnge_l2_key *key,
1734 						      u32 idx)
1735 {
1736 	struct bnge_l2_filter *fltr;
1737 	struct hlist_head *head;
1738 
1739 	head = &bn->l2_fltr_hash_tbl[idx];
1740 	hlist_for_each_entry_rcu(fltr, head, base.hash) {
1741 		struct bnge_l2_key *l2_key = &fltr->l2_key;
1742 
1743 		if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
1744 		    l2_key->vlan == key->vlan)
1745 			return fltr;
1746 	}
1747 	return NULL;
1748 }
1749 
1750 static struct bnge_l2_filter *bnge_lookup_l2_filter(struct bnge_net *bn,
1751 						    struct bnge_l2_key *key,
1752 						    u32 idx)
1753 {
1754 	struct bnge_l2_filter *fltr;
1755 
1756 	rcu_read_lock();
1757 	fltr = __bnge_lookup_l2_filter(bn, key, idx);
1758 	if (fltr)
1759 		refcount_inc(&fltr->refcnt);
1760 	rcu_read_unlock();
1761 	return fltr;
1762 }
1763 
1764 static struct bnge_l2_filter *bnge_alloc_l2_filter(struct bnge_net *bn,
1765 						   struct bnge_l2_key *key,
1766 						   gfp_t gfp)
1767 {
1768 	struct bnge_l2_filter *fltr;
1769 	u32 idx;
1770 
1771 	idx = jhash2(&key->filter_key, BNGE_L2_KEY_SIZE, bn->hash_seed) &
1772 	      BNGE_L2_FLTR_HASH_MASK;
1773 	fltr = bnge_lookup_l2_filter(bn, key, idx);
1774 	if (fltr)
1775 		return fltr;
1776 
1777 	fltr = kzalloc_obj(*fltr, gfp);
1778 	if (!fltr)
1779 		return ERR_PTR(-ENOMEM);
1780 
1781 	bnge_init_l2_filter(bn, fltr, key, idx);
1782 	return fltr;
1783 }
1784 
1785 static int bnge_hwrm_set_vnic_filter(struct bnge_net *bn, u16 vnic_id, u16 idx,
1786 				     const u8 *mac_addr)
1787 {
1788 	struct bnge_l2_filter *fltr;
1789 	struct bnge_l2_key key;
1790 	int rc;
1791 
1792 	ether_addr_copy(key.dst_mac_addr, mac_addr);
1793 	key.vlan = 0;
1794 	fltr = bnge_alloc_l2_filter(bn, &key, GFP_KERNEL);
1795 	if (IS_ERR(fltr))
1796 		return PTR_ERR(fltr);
1797 
1798 	fltr->base.fw_vnic_id = bn->vnic_info[vnic_id].fw_vnic_id;
1799 	rc = bnge_hwrm_l2_filter_alloc(bn->bd, fltr);
1800 	if (rc)
1801 		goto err_del_l2_filter;
1802 	bn->vnic_info[vnic_id].l2_filters[idx] = fltr;
1803 	return rc;
1804 
1805 err_del_l2_filter:
1806 	bnge_del_l2_filter(bn, fltr);
1807 	return rc;
1808 }
1809 
1810 static bool bnge_mc_list_updated(struct bnge_net *bn, u32 *rx_mask)
1811 {
1812 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1813 	struct net_device *dev = bn->netdev;
1814 	struct netdev_hw_addr *ha;
1815 	int mc_count = 0, off = 0;
1816 	bool update = false;
1817 	u8 *haddr;
1818 
1819 	netdev_for_each_mc_addr(ha, dev) {
1820 		if (mc_count >= BNGE_MAX_MC_ADDRS) {
1821 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1822 			vnic->mc_list_count = 0;
1823 			return false;
1824 		}
1825 		haddr = ha->addr;
1826 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
1827 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
1828 			update = true;
1829 		}
1830 		off += ETH_ALEN;
1831 		mc_count++;
1832 	}
1833 	if (mc_count)
1834 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1835 
1836 	if (mc_count != vnic->mc_list_count) {
1837 		vnic->mc_list_count = mc_count;
1838 		update = true;
1839 	}
1840 	return update;
1841 }
1842 
1843 static bool bnge_uc_list_updated(struct bnge_net *bn)
1844 {
1845 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1846 	struct net_device *dev = bn->netdev;
1847 	struct netdev_hw_addr *ha;
1848 	int off = 0;
1849 
1850 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
1851 		return true;
1852 
1853 	netdev_for_each_uc_addr(ha, dev) {
1854 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
1855 			return true;
1856 
1857 		off += ETH_ALEN;
1858 	}
1859 	return false;
1860 }
1861 
1862 static bool bnge_promisc_ok(struct bnge_net *bn)
1863 {
1864 	return true;
1865 }
1866 
1867 static int bnge_cfg_def_vnic(struct bnge_net *bn)
1868 {
1869 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1870 	struct net_device *dev = bn->netdev;
1871 	struct bnge_dev *bd = bn->bd;
1872 	struct netdev_hw_addr *ha;
1873 	int i, off = 0, rc;
1874 	bool uc_update;
1875 
1876 	netif_addr_lock_bh(dev);
1877 	uc_update = bnge_uc_list_updated(bn);
1878 	netif_addr_unlock_bh(dev);
1879 
1880 	if (!uc_update)
1881 		goto skip_uc;
1882 
1883 	for (i = 1; i < vnic->uc_filter_count; i++) {
1884 		struct bnge_l2_filter *fltr = vnic->l2_filters[i];
1885 
1886 		bnge_hwrm_l2_filter_free(bd, fltr);
1887 		bnge_del_l2_filter(bn, fltr);
1888 	}
1889 
1890 	vnic->uc_filter_count = 1;
1891 
1892 	netif_addr_lock_bh(dev);
1893 	if (netdev_uc_count(dev) > (BNGE_MAX_UC_ADDRS - 1)) {
1894 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1895 	} else {
1896 		netdev_for_each_uc_addr(ha, dev) {
1897 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
1898 			off += ETH_ALEN;
1899 			vnic->uc_filter_count++;
1900 		}
1901 	}
1902 	netif_addr_unlock_bh(dev);
1903 
1904 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
1905 		rc = bnge_hwrm_set_vnic_filter(bn, 0, i, vnic->uc_list + off);
1906 		if (rc) {
1907 			netdev_err(dev, "HWRM vnic filter failure rc: %d\n", rc);
1908 			vnic->uc_filter_count = i;
1909 			return rc;
1910 		}
1911 	}
1912 
1913 skip_uc:
1914 	if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
1915 	    !bnge_promisc_ok(bn))
1916 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1917 	rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
1918 	if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
1919 		netdev_info(dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
1920 			    rc);
1921 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1922 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1923 		vnic->mc_list_count = 0;
1924 		rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
1925 	}
1926 	if (rc)
1927 		netdev_err(dev, "HWRM cfa l2 rx mask failure rc: %d\n",
1928 			   rc);
1929 
1930 	return rc;
1931 }
1932 
1933 static void bnge_disable_int(struct bnge_net *bn)
1934 {
1935 	struct bnge_dev *bd = bn->bd;
1936 	int i;
1937 
1938 	if (!bn->bnapi)
1939 		return;
1940 
1941 	for (i = 0; i < bd->nq_nr_rings; i++) {
1942 		struct bnge_napi *bnapi = bn->bnapi[i];
1943 		struct bnge_nq_ring_info *nqr;
1944 		struct bnge_ring_struct *ring;
1945 
1946 		nqr = &bnapi->nq_ring;
1947 		ring = &nqr->ring_struct;
1948 
1949 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
1950 			bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
1951 	}
1952 }
1953 
1954 static void bnge_disable_int_sync(struct bnge_net *bn)
1955 {
1956 	struct bnge_dev *bd = bn->bd;
1957 	int i;
1958 
1959 	bnge_disable_int(bn);
1960 	for (i = 0; i < bd->nq_nr_rings; i++) {
1961 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
1962 
1963 		synchronize_irq(bd->irq_tbl[map_idx].vector);
1964 	}
1965 }
1966 
1967 static void bnge_enable_int(struct bnge_net *bn)
1968 {
1969 	struct bnge_dev *bd = bn->bd;
1970 	int i;
1971 
1972 	for (i = 0; i < bd->nq_nr_rings; i++) {
1973 		struct bnge_napi *bnapi = bn->bnapi[i];
1974 		struct bnge_nq_ring_info *nqr;
1975 
1976 		nqr = &bnapi->nq_ring;
1977 		bnge_db_nq_arm(bn, &nqr->nq_db, nqr->nq_raw_cons);
1978 	}
1979 }
1980 
1981 static void bnge_disable_napi(struct bnge_net *bn)
1982 {
1983 	struct bnge_dev *bd = bn->bd;
1984 	int i;
1985 
1986 	if (test_and_set_bit(BNGE_STATE_NAPI_DISABLED, &bn->state))
1987 		return;
1988 
1989 	for (i = 0; i < bd->nq_nr_rings; i++) {
1990 		struct bnge_napi *bnapi = bn->bnapi[i];
1991 
1992 		napi_disable_locked(&bnapi->napi);
1993 	}
1994 }
1995 
1996 static void bnge_enable_napi(struct bnge_net *bn)
1997 {
1998 	struct bnge_dev *bd = bn->bd;
1999 	int i;
2000 
2001 	clear_bit(BNGE_STATE_NAPI_DISABLED, &bn->state);
2002 	for (i = 0; i < bd->nq_nr_rings; i++) {
2003 		struct bnge_napi *bnapi = bn->bnapi[i];
2004 
2005 		bnapi->in_reset = false;
2006 		bnapi->tx_fault = 0;
2007 
2008 		napi_enable_locked(&bnapi->napi);
2009 	}
2010 }
2011 
2012 static void bnge_hwrm_vnic_free(struct bnge_net *bn)
2013 {
2014 	int i;
2015 
2016 	for (i = 0; i < bn->nr_vnics; i++)
2017 		bnge_hwrm_vnic_free_one(bn->bd, &bn->vnic_info[i]);
2018 }
2019 
2020 static void bnge_hwrm_vnic_ctx_free(struct bnge_net *bn)
2021 {
2022 	int i, j;
2023 
2024 	for (i = 0; i < bn->nr_vnics; i++) {
2025 		struct bnge_vnic_info *vnic = &bn->vnic_info[i];
2026 
2027 		for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) {
2028 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
2029 				bnge_hwrm_vnic_ctx_free_one(bn->bd, vnic, j);
2030 		}
2031 	}
2032 	bn->rsscos_nr_ctxs = 0;
2033 }
2034 
2035 static void bnge_hwrm_clear_vnic_filter(struct bnge_net *bn)
2036 {
2037 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
2038 	int i;
2039 
2040 	for (i = 0; i < vnic->uc_filter_count; i++) {
2041 		struct bnge_l2_filter *fltr = vnic->l2_filters[i];
2042 
2043 		bnge_hwrm_l2_filter_free(bn->bd, fltr);
2044 		bnge_del_l2_filter(bn, fltr);
2045 	}
2046 
2047 	vnic->uc_filter_count = 0;
2048 }
2049 
2050 static void bnge_clear_vnic(struct bnge_net *bn)
2051 {
2052 	bnge_hwrm_clear_vnic_filter(bn);
2053 	bnge_hwrm_vnic_free(bn);
2054 	bnge_hwrm_vnic_ctx_free(bn);
2055 }
2056 
2057 static void bnge_hwrm_rx_ring_free(struct bnge_net *bn,
2058 				   struct bnge_rx_ring_info *rxr,
2059 				   bool close_path)
2060 {
2061 	struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
2062 	u32 grp_idx = rxr->bnapi->index;
2063 	u32 cmpl_ring_id;
2064 
2065 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2066 		return;
2067 
2068 	cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
2069 	hwrm_ring_free_send_msg(bn, ring,
2070 				RING_FREE_REQ_RING_TYPE_RX,
2071 				close_path ? cmpl_ring_id :
2072 				INVALID_HW_RING_ID);
2073 	ring->fw_ring_id = INVALID_HW_RING_ID;
2074 	bn->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
2075 }
2076 
2077 static void bnge_hwrm_rx_agg_ring_free(struct bnge_net *bn,
2078 				       struct bnge_rx_ring_info *rxr,
2079 				       bool close_path)
2080 {
2081 	struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
2082 	u32 grp_idx = rxr->bnapi->index;
2083 	u32 cmpl_ring_id;
2084 
2085 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2086 		return;
2087 
2088 	cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
2089 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_RX_AGG,
2090 				close_path ? cmpl_ring_id :
2091 				INVALID_HW_RING_ID);
2092 	ring->fw_ring_id = INVALID_HW_RING_ID;
2093 	bn->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
2094 }
2095 
2096 static void bnge_hwrm_tx_ring_free(struct bnge_net *bn,
2097 				   struct bnge_tx_ring_info *txr,
2098 				   bool close_path)
2099 {
2100 	struct bnge_ring_struct *ring = &txr->tx_ring_struct;
2101 	u32 cmpl_ring_id;
2102 
2103 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2104 		return;
2105 
2106 	cmpl_ring_id = close_path ? bnge_cp_ring_for_tx(txr) :
2107 		       INVALID_HW_RING_ID;
2108 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_TX,
2109 				cmpl_ring_id);
2110 	ring->fw_ring_id = INVALID_HW_RING_ID;
2111 }
2112 
2113 static void bnge_hwrm_cp_ring_free(struct bnge_net *bn,
2114 				   struct bnge_cp_ring_info *cpr)
2115 {
2116 	struct bnge_ring_struct *ring;
2117 
2118 	ring = &cpr->ring_struct;
2119 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2120 		return;
2121 
2122 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
2123 				INVALID_HW_RING_ID);
2124 	ring->fw_ring_id = INVALID_HW_RING_ID;
2125 }
2126 
2127 static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
2128 {
2129 	struct bnge_dev *bd = bn->bd;
2130 	int i;
2131 
2132 	if (!bn->bnapi)
2133 		return;
2134 
2135 	for (i = 0; i < bd->tx_nr_rings; i++)
2136 		bnge_hwrm_tx_ring_free(bn, &bn->tx_ring[i], close_path);
2137 
2138 	for (i = 0; i < bd->rx_nr_rings; i++) {
2139 		bnge_hwrm_rx_ring_free(bn, &bn->rx_ring[i], close_path);
2140 		bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
2141 	}
2142 
2143 	/* The completion rings are about to be freed.  After that the
2144 	 * IRQ doorbell will not work anymore.  So we need to disable
2145 	 * IRQ here.
2146 	 */
2147 	bnge_disable_int_sync(bn);
2148 
2149 	for (i = 0; i < bd->nq_nr_rings; i++) {
2150 		struct bnge_napi *bnapi = bn->bnapi[i];
2151 		struct bnge_nq_ring_info *nqr;
2152 		struct bnge_ring_struct *ring;
2153 		int j;
2154 
2155 		nqr = &bnapi->nq_ring;
2156 		for (j = 0; j < nqr->cp_ring_count && nqr->cp_ring_arr; j++)
2157 			bnge_hwrm_cp_ring_free(bn, &nqr->cp_ring_arr[j]);
2158 
2159 		ring = &nqr->ring_struct;
2160 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2161 			hwrm_ring_free_send_msg(bn, ring,
2162 						RING_FREE_REQ_RING_TYPE_NQ,
2163 						INVALID_HW_RING_ID);
2164 			ring->fw_ring_id = INVALID_HW_RING_ID;
2165 			bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
2166 		}
2167 	}
2168 }
2169 
2170 static void bnge_setup_msix(struct bnge_net *bn)
2171 {
2172 	struct net_device *dev = bn->netdev;
2173 	struct bnge_dev *bd = bn->bd;
2174 	int len, i;
2175 
2176 	len = sizeof(bd->irq_tbl[0].name);
2177 	for (i = 0; i < bd->nq_nr_rings; i++) {
2178 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
2179 		char *attr;
2180 
2181 		if (bd->flags & BNGE_EN_SHARED_CHNL)
2182 			attr = "TxRx";
2183 		else if (i < bd->rx_nr_rings)
2184 			attr = "rx";
2185 		else
2186 			attr = "tx";
2187 
2188 		snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
2189 			 attr, i);
2190 		bd->irq_tbl[map_idx].handler = bnge_msix;
2191 	}
2192 }
2193 
2194 static int bnge_setup_interrupts(struct bnge_net *bn)
2195 {
2196 	struct net_device *dev = bn->netdev;
2197 	struct bnge_dev *bd = bn->bd;
2198 
2199 	bnge_setup_msix(bn);
2200 
2201 	return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
2202 }
2203 
2204 static void bnge_hwrm_resource_free(struct bnge_net *bn, bool close_path)
2205 {
2206 	bnge_clear_vnic(bn);
2207 	bnge_hwrm_ring_free(bn, close_path);
2208 	bnge_hwrm_stat_ctx_free(bn);
2209 }
2210 
2211 static void bnge_free_irq(struct bnge_net *bn)
2212 {
2213 	struct bnge_dev *bd = bn->bd;
2214 	struct bnge_irq *irq;
2215 	int i;
2216 
2217 	for (i = 0; i < bd->nq_nr_rings; i++) {
2218 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
2219 
2220 		irq = &bd->irq_tbl[map_idx];
2221 		if (irq->requested) {
2222 			if (irq->have_cpumask) {
2223 				irq_set_affinity_hint(irq->vector, NULL);
2224 				free_cpumask_var(irq->cpu_mask);
2225 				irq->have_cpumask = 0;
2226 			}
2227 			free_irq(irq->vector, bn->bnapi[i]);
2228 		}
2229 
2230 		irq->requested = 0;
2231 	}
2232 }
2233 
2234 static int bnge_request_irq(struct bnge_net *bn)
2235 {
2236 	struct bnge_dev *bd = bn->bd;
2237 	int i, rc;
2238 
2239 	rc = bnge_setup_interrupts(bn);
2240 	if (rc) {
2241 		netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
2242 		return rc;
2243 	}
2244 	for (i = 0; i < bd->nq_nr_rings; i++) {
2245 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
2246 		struct bnge_irq *irq = &bd->irq_tbl[map_idx];
2247 
2248 		rc = request_irq(irq->vector, irq->handler, 0, irq->name,
2249 				 bn->bnapi[i]);
2250 		if (rc)
2251 			goto err_free_irq;
2252 
2253 		netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
2254 		irq->requested = 1;
2255 
2256 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
2257 			int numa_node = dev_to_node(&bd->pdev->dev);
2258 
2259 			irq->have_cpumask = 1;
2260 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2261 					irq->cpu_mask);
2262 			rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
2263 			if (rc) {
2264 				netdev_warn(bn->netdev,
2265 					    "Set affinity failed, IRQ = %d\n",
2266 					    irq->vector);
2267 				goto err_free_irq;
2268 			}
2269 		}
2270 	}
2271 	return 0;
2272 
2273 err_free_irq:
2274 	bnge_free_irq(bn);
2275 	return rc;
2276 }
2277 
2278 static int bnge_set_tpa(struct bnge_net *bn, bool set_tpa)
2279 {
2280 	u32 tpa_flags = 0;
2281 	int rc, i;
2282 
2283 	if (set_tpa)
2284 		tpa_flags = bn->priv_flags & BNGE_NET_EN_TPA;
2285 	else if (BNGE_NO_FW_ACCESS(bn->bd))
2286 		return 0;
2287 	for (i = 0; i < bn->nr_vnics; i++) {
2288 		rc = bnge_hwrm_vnic_set_tpa(bn->bd, &bn->vnic_info[i],
2289 					    tpa_flags);
2290 		if (rc) {
2291 			netdev_err(bn->netdev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
2292 				   i, rc);
2293 			return rc;
2294 		}
2295 	}
2296 	return 0;
2297 }
2298 
2299 static int bnge_init_chip(struct bnge_net *bn)
2300 {
2301 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
2302 	struct bnge_dev *bd = bn->bd;
2303 	int rc;
2304 
2305 #define BNGE_DEF_STATS_COAL_TICKS	 1000000
2306 	bn->stats_coal_ticks = BNGE_DEF_STATS_COAL_TICKS;
2307 
2308 	rc = bnge_hwrm_stat_ctx_alloc(bn);
2309 	if (rc) {
2310 		netdev_err(bn->netdev, "hwrm stat ctx alloc failure rc: %d\n", rc);
2311 		goto err_out;
2312 	}
2313 
2314 	rc = bnge_hwrm_ring_alloc(bn);
2315 	if (rc) {
2316 		netdev_err(bn->netdev, "hwrm ring alloc failure rc: %d\n", rc);
2317 		goto err_out;
2318 	}
2319 
2320 	rc = bnge_hwrm_vnic_alloc(bd, vnic, bd->rx_nr_rings);
2321 	if (rc) {
2322 		netdev_err(bn->netdev, "hwrm vnic alloc failure rc: %d\n", rc);
2323 		goto err_out;
2324 	}
2325 
2326 	rc = bnge_setup_vnic(bn, vnic);
2327 	if (rc)
2328 		goto err_out;
2329 
2330 	if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
2331 		bnge_hwrm_update_rss_hash_cfg(bn);
2332 
2333 	if (bn->priv_flags & BNGE_NET_EN_TPA) {
2334 		rc = bnge_set_tpa(bn, true);
2335 		if (rc)
2336 			goto err_out;
2337 	}
2338 
2339 	/* Filter for default vnic 0 */
2340 	rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
2341 	if (rc) {
2342 		netdev_err(bn->netdev, "HWRM vnic filter failure rc: %d\n", rc);
2343 		goto err_out;
2344 	}
2345 	vnic->uc_filter_count = 1;
2346 
2347 	vnic->rx_mask = 0;
2348 
2349 	if (bn->netdev->flags & IFF_BROADCAST)
2350 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
2351 
2352 	if (bn->netdev->flags & IFF_PROMISC)
2353 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
2354 
2355 	if (bn->netdev->flags & IFF_ALLMULTI) {
2356 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
2357 		vnic->mc_list_count = 0;
2358 	} else if (bn->netdev->flags & IFF_MULTICAST) {
2359 		u32 mask = 0;
2360 
2361 		bnge_mc_list_updated(bn, &mask);
2362 		vnic->rx_mask |= mask;
2363 	}
2364 
2365 	rc = bnge_cfg_def_vnic(bn);
2366 	if (rc)
2367 		goto err_out;
2368 	return 0;
2369 
2370 err_out:
2371 	bnge_hwrm_resource_free(bn, 0);
2372 	return rc;
2373 }
2374 
2375 static void bnge_init_napi(struct bnge_net *bn)
2376 {
2377 	struct bnge_dev *bd = bn->bd;
2378 	struct bnge_napi *bnapi;
2379 	int i;
2380 
2381 	for (i = 0; i < bd->nq_nr_rings; i++) {
2382 		bnapi = bn->bnapi[i];
2383 		netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
2384 					     bnge_napi_poll, bnapi->index);
2385 	}
2386 }
2387 
2388 static void bnge_del_napi(struct bnge_net *bn)
2389 {
2390 	struct bnge_dev *bd = bn->bd;
2391 	int i;
2392 
2393 	for (i = 0; i < bd->rx_nr_rings; i++)
2394 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
2395 	for (i = 0; i < bd->tx_nr_rings; i++)
2396 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
2397 
2398 	for (i = 0; i < bd->nq_nr_rings; i++) {
2399 		struct bnge_napi *bnapi = bn->bnapi[i];
2400 
2401 		__netif_napi_del_locked(&bnapi->napi);
2402 	}
2403 
2404 	/* Wait for RCU grace period after removing NAPI instances */
2405 	synchronize_net();
2406 }
2407 
2408 static int bnge_init_nic(struct bnge_net *bn)
2409 {
2410 	int rc;
2411 
2412 	bnge_init_nq_tree(bn);
2413 
2414 	bnge_init_rx_rings(bn);
2415 	rc = bnge_alloc_rx_ring_pair_bufs(bn);
2416 	if (rc)
2417 		return rc;
2418 
2419 	bnge_init_tx_rings(bn);
2420 
2421 	rc = bnge_init_ring_grps(bn);
2422 	if (rc)
2423 		goto err_free_rx_ring_pair_bufs;
2424 
2425 	bnge_init_vnics(bn);
2426 
2427 	rc = bnge_init_chip(bn);
2428 	if (rc)
2429 		goto err_free_ring_grps;
2430 	return rc;
2431 
2432 err_free_ring_grps:
2433 	bnge_free_ring_grps(bn);
2434 	return rc;
2435 
2436 err_free_rx_ring_pair_bufs:
2437 	bnge_free_rx_ring_pair_bufs(bn);
2438 	return rc;
2439 }
2440 
2441 static void bnge_tx_disable(struct bnge_net *bn)
2442 {
2443 	struct bnge_tx_ring_info *txr;
2444 	int i;
2445 
2446 	if (bn->tx_ring) {
2447 		for (i = 0; i < bn->bd->tx_nr_rings; i++) {
2448 			txr = &bn->tx_ring[i];
2449 			WRITE_ONCE(txr->dev_state, BNGE_DEV_STATE_CLOSING);
2450 		}
2451 	}
2452 	/* Make sure napi polls see @dev_state change */
2453 	synchronize_net();
2454 
2455 	if (!bn->netdev)
2456 		return;
2457 	/* Drop carrier first to prevent TX timeout */
2458 	netif_carrier_off(bn->netdev);
2459 	/* Stop all TX queues */
2460 	netif_tx_disable(bn->netdev);
2461 }
2462 
2463 static void bnge_tx_enable(struct bnge_net *bn)
2464 {
2465 	struct bnge_tx_ring_info *txr;
2466 	int i;
2467 
2468 	for (i = 0; i < bn->bd->tx_nr_rings; i++) {
2469 		txr = &bn->tx_ring[i];
2470 		WRITE_ONCE(txr->dev_state, 0);
2471 	}
2472 	/* Make sure napi polls see @dev_state change */
2473 	synchronize_net();
2474 	netif_tx_wake_all_queues(bn->netdev);
2475 }
2476 
2477 static int bnge_open_core(struct bnge_net *bn)
2478 {
2479 	struct bnge_dev *bd = bn->bd;
2480 	int rc;
2481 
2482 	netif_carrier_off(bn->netdev);
2483 
2484 	rc = bnge_reserve_rings(bd);
2485 	if (rc) {
2486 		netdev_err(bn->netdev, "bnge_reserve_rings err: %d\n", rc);
2487 		return rc;
2488 	}
2489 
2490 	rc = bnge_alloc_core(bn);
2491 	if (rc) {
2492 		netdev_err(bn->netdev, "bnge_alloc_core err: %d\n", rc);
2493 		return rc;
2494 	}
2495 
2496 	bnge_init_napi(bn);
2497 	rc = bnge_request_irq(bn);
2498 	if (rc) {
2499 		netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
2500 		goto err_del_napi;
2501 	}
2502 
2503 	rc = bnge_init_nic(bn);
2504 	if (rc) {
2505 		netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
2506 		goto err_free_irq;
2507 	}
2508 
2509 	bnge_enable_napi(bn);
2510 
2511 	set_bit(BNGE_STATE_OPEN, &bd->state);
2512 
2513 	bnge_enable_int(bn);
2514 
2515 	bnge_tx_enable(bn);
2516 	return 0;
2517 
2518 err_free_irq:
2519 	bnge_free_irq(bn);
2520 err_del_napi:
2521 	bnge_del_napi(bn);
2522 	bnge_free_core(bn);
2523 	return rc;
2524 }
2525 
2526 static int bnge_open(struct net_device *dev)
2527 {
2528 	struct bnge_net *bn = netdev_priv(dev);
2529 	int rc;
2530 
2531 	rc = bnge_open_core(bn);
2532 	if (rc)
2533 		netdev_err(dev, "bnge_open_core err: %d\n", rc);
2534 
2535 	return rc;
2536 }
2537 
2538 static int bnge_shutdown_nic(struct bnge_net *bn)
2539 {
2540 	bnge_hwrm_resource_free(bn, 1);
2541 	return 0;
2542 }
2543 
2544 static void bnge_close_core(struct bnge_net *bn)
2545 {
2546 	struct bnge_dev *bd = bn->bd;
2547 
2548 	bnge_tx_disable(bn);
2549 
2550 	clear_bit(BNGE_STATE_OPEN, &bd->state);
2551 	bnge_shutdown_nic(bn);
2552 	bnge_disable_napi(bn);
2553 	bnge_free_all_rings_bufs(bn);
2554 	bnge_free_irq(bn);
2555 	bnge_del_napi(bn);
2556 
2557 	bnge_free_core(bn);
2558 }
2559 
2560 static int bnge_close(struct net_device *dev)
2561 {
2562 	struct bnge_net *bn = netdev_priv(dev);
2563 
2564 	bnge_close_core(bn);
2565 
2566 	return 0;
2567 }
2568 
2569 static const struct net_device_ops bnge_netdev_ops = {
2570 	.ndo_open		= bnge_open,
2571 	.ndo_stop		= bnge_close,
2572 	.ndo_start_xmit		= bnge_start_xmit,
2573 	.ndo_features_check	= bnge_features_check,
2574 };
2575 
2576 static void bnge_init_mac_addr(struct bnge_dev *bd)
2577 {
2578 	eth_hw_addr_set(bd->netdev, bd->pf.mac_addr);
2579 }
2580 
2581 static void bnge_set_tpa_flags(struct bnge_dev *bd)
2582 {
2583 	struct bnge_net *bn = netdev_priv(bd->netdev);
2584 
2585 	bn->priv_flags &= ~BNGE_NET_EN_TPA;
2586 
2587 	if (bd->netdev->features & NETIF_F_LRO)
2588 		bn->priv_flags |= BNGE_NET_EN_LRO;
2589 	else if (bd->netdev->features & NETIF_F_GRO_HW)
2590 		bn->priv_flags |= BNGE_NET_EN_GRO;
2591 }
2592 
2593 static void bnge_init_l2_fltr_tbl(struct bnge_net *bn)
2594 {
2595 	int i;
2596 
2597 	for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++)
2598 		INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]);
2599 	get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed));
2600 }
2601 
2602 void bnge_set_ring_params(struct bnge_dev *bd)
2603 {
2604 	struct bnge_net *bn = netdev_priv(bd->netdev);
2605 	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
2606 	u32 agg_factor = 0, agg_ring_size = 0;
2607 
2608 	/* 8 for CRC and VLAN */
2609 	rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2610 
2611 	rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
2612 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2613 
2614 	ring_size = bn->rx_ring_size;
2615 	bn->rx_agg_ring_size = 0;
2616 	bn->rx_agg_nr_pages = 0;
2617 
2618 	if (bn->priv_flags & BNGE_NET_EN_TPA)
2619 		agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE);
2620 
2621 	bn->priv_flags &= ~BNGE_NET_EN_JUMBO;
2622 	if (rx_space > PAGE_SIZE) {
2623 		u32 jumbo_factor;
2624 
2625 		bn->priv_flags |= BNGE_NET_EN_JUMBO;
2626 		jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT;
2627 		if (jumbo_factor > agg_factor)
2628 			agg_factor = jumbo_factor;
2629 	}
2630 	if (agg_factor) {
2631 		if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) {
2632 			ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA;
2633 			netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n",
2634 				    bn->rx_ring_size, ring_size);
2635 			bn->rx_ring_size = ring_size;
2636 		}
2637 		agg_ring_size = ring_size * agg_factor;
2638 
2639 		bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size,
2640 							  RX_DESC_CNT);
2641 		if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2642 			u32 tmp = agg_ring_size;
2643 
2644 			bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2645 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2646 			netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n",
2647 				    tmp, agg_ring_size);
2648 		}
2649 		bn->rx_agg_ring_size = agg_ring_size;
2650 		bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2651 
2652 		rx_size = max3(BNGE_DEFAULT_RX_COPYBREAK,
2653 			       bn->rx_copybreak,
2654 			       bn->netdev->cfg_pending->hds_thresh);
2655 		rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
2656 		rx_space = rx_size + NET_SKB_PAD +
2657 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2658 	}
2659 
2660 	bn->rx_buf_use_size = rx_size;
2661 	bn->rx_buf_size = rx_space;
2662 
2663 	bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT);
2664 	bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1;
2665 
2666 	ring_size = bn->tx_ring_size;
2667 	bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT);
2668 	bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1;
2669 
2670 	max_rx_cmpl = bn->rx_ring_size;
2671 
2672 	if (bn->priv_flags & BNGE_NET_EN_TPA)
2673 		max_rx_cmpl += bd->max_tpa_v2;
2674 	ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size;
2675 	bn->cp_ring_size = ring_size;
2676 
2677 	bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT);
2678 	if (bn->cp_nr_pages > MAX_CP_PAGES) {
2679 		bn->cp_nr_pages = MAX_CP_PAGES;
2680 		bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2681 		netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n",
2682 			    ring_size, bn->cp_ring_size);
2683 	}
2684 	bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT;
2685 	bn->cp_ring_mask = bn->cp_bit - 1;
2686 }
2687 
2688 static void bnge_init_ring_params(struct bnge_net *bn)
2689 {
2690 	u32 rx_size;
2691 
2692 	bn->rx_copybreak = BNGE_DEFAULT_RX_COPYBREAK;
2693 	/* Try to fit 4 chunks into a 4k page */
2694 	rx_size = SZ_1K -
2695 		NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2696 	bn->netdev->cfg->hds_thresh = max(BNGE_DEFAULT_RX_COPYBREAK, rx_size);
2697 }
2698 
2699 int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
2700 {
2701 	struct net_device *netdev;
2702 	struct bnge_net *bn;
2703 	int rc;
2704 
2705 	netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE,
2706 				    max_irqs);
2707 	if (!netdev)
2708 		return -ENOMEM;
2709 
2710 	SET_NETDEV_DEV(netdev, bd->dev);
2711 	bd->netdev = netdev;
2712 
2713 	netdev->netdev_ops = &bnge_netdev_ops;
2714 
2715 	bnge_set_ethtool_ops(netdev);
2716 
2717 	bn = netdev_priv(netdev);
2718 	bn->netdev = netdev;
2719 	bn->bd = bd;
2720 
2721 	netdev->min_mtu = ETH_ZLEN;
2722 	netdev->max_mtu = bd->max_mtu;
2723 
2724 	netdev->hw_features = NETIF_F_IP_CSUM |
2725 			      NETIF_F_IPV6_CSUM |
2726 			      NETIF_F_SG |
2727 			      NETIF_F_TSO |
2728 			      NETIF_F_TSO6 |
2729 			      NETIF_F_GSO_UDP_TUNNEL |
2730 			      NETIF_F_GSO_GRE |
2731 			      NETIF_F_GSO_IPXIP4 |
2732 			      NETIF_F_GSO_UDP_TUNNEL_CSUM |
2733 			      NETIF_F_GSO_GRE_CSUM |
2734 			      NETIF_F_GSO_PARTIAL |
2735 			      NETIF_F_RXHASH |
2736 			      NETIF_F_RXCSUM |
2737 			      NETIF_F_GRO;
2738 
2739 	if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
2740 		netdev->hw_features |= NETIF_F_GSO_UDP_L4;
2741 
2742 	if (BNGE_SUPPORTS_TPA(bd))
2743 		netdev->hw_features |= NETIF_F_LRO;
2744 
2745 	netdev->hw_enc_features = NETIF_F_IP_CSUM |
2746 				  NETIF_F_IPV6_CSUM |
2747 				  NETIF_F_SG |
2748 				  NETIF_F_TSO |
2749 				  NETIF_F_TSO6 |
2750 				  NETIF_F_GSO_UDP_TUNNEL |
2751 				  NETIF_F_GSO_GRE |
2752 				  NETIF_F_GSO_UDP_TUNNEL_CSUM |
2753 				  NETIF_F_GSO_GRE_CSUM |
2754 				  NETIF_F_GSO_IPXIP4 |
2755 				  NETIF_F_GSO_PARTIAL;
2756 
2757 	if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
2758 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
2759 
2760 	netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
2761 				       NETIF_F_GSO_GRE_CSUM;
2762 
2763 	netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA;
2764 	if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP)
2765 		netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX;
2766 	if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT)
2767 		netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX;
2768 
2769 	if (BNGE_SUPPORTS_TPA(bd))
2770 		netdev->hw_features |= NETIF_F_GRO_HW;
2771 
2772 	netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
2773 
2774 	if (netdev->features & NETIF_F_GRO_HW)
2775 		netdev->features &= ~NETIF_F_LRO;
2776 
2777 	netdev->priv_flags |= IFF_UNICAST_FLT;
2778 
2779 	netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
2780 	if (bd->tso_max_segs)
2781 		netif_set_tso_max_segs(netdev, bd->tso_max_segs);
2782 
2783 	bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
2784 	bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
2785 	bn->rx_dir = DMA_FROM_DEVICE;
2786 
2787 	bnge_set_tpa_flags(bd);
2788 	bnge_init_ring_params(bn);
2789 	bnge_set_ring_params(bd);
2790 
2791 	bnge_init_l2_fltr_tbl(bn);
2792 	bnge_init_mac_addr(bd);
2793 
2794 	netdev->request_ops_lock = true;
2795 	rc = register_netdev(netdev);
2796 	if (rc) {
2797 		dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
2798 		goto err_netdev;
2799 	}
2800 
2801 	return 0;
2802 
2803 err_netdev:
2804 	free_netdev(netdev);
2805 	return rc;
2806 }
2807 
2808 void bnge_netdev_free(struct bnge_dev *bd)
2809 {
2810 	struct net_device *netdev = bd->netdev;
2811 
2812 	unregister_netdev(netdev);
2813 	free_netdev(netdev);
2814 	bd->netdev = NULL;
2815 }
2816