xref: /linux/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3 
4 #include <asm/byteorder.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/dmapool.h>
7 #include <linux/delay.h>
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/list.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if.h>
15 #include <net/ip.h>
16 #include <linux/skbuff.h>
17 #include <net/page_pool/helpers.h>
18 
19 #include "bnge.h"
20 #include "bnge_hwrm_lib.h"
21 #include "bnge_ethtool.h"
22 #include "bnge_rmem.h"
23 
24 #define BNGE_RING_TO_TC_OFF(bd, tx)	\
25 	((tx) % (bd)->tx_nr_rings_per_tc)
26 
27 #define BNGE_RING_TO_TC(bd, tx)		\
28 	((tx) / (bd)->tx_nr_rings_per_tc)
29 
30 #define BNGE_TC_TO_RING_BASE(bd, tc)	\
31 	((tc) * (bd)->tx_nr_rings_per_tc)
32 
33 static void bnge_free_stats_mem(struct bnge_net *bn,
34 				struct bnge_stats_mem *stats)
35 {
36 	struct bnge_dev *bd = bn->bd;
37 
38 	if (stats->hw_stats) {
39 		dma_free_coherent(bd->dev, stats->len, stats->hw_stats,
40 				  stats->hw_stats_map);
41 		stats->hw_stats = NULL;
42 	}
43 }
44 
45 static int bnge_alloc_stats_mem(struct bnge_net *bn,
46 				struct bnge_stats_mem *stats)
47 {
48 	struct bnge_dev *bd = bn->bd;
49 
50 	stats->hw_stats = dma_alloc_coherent(bd->dev, stats->len,
51 					     &stats->hw_stats_map, GFP_KERNEL);
52 	if (!stats->hw_stats)
53 		return -ENOMEM;
54 
55 	return 0;
56 }
57 
58 static void bnge_free_ring_stats(struct bnge_net *bn)
59 {
60 	struct bnge_dev *bd = bn->bd;
61 	int i;
62 
63 	if (!bn->bnapi)
64 		return;
65 
66 	for (i = 0; i < bd->nq_nr_rings; i++) {
67 		struct bnge_napi *bnapi = bn->bnapi[i];
68 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
69 
70 		bnge_free_stats_mem(bn, &nqr->stats);
71 	}
72 }
73 
74 static int bnge_alloc_ring_stats(struct bnge_net *bn)
75 {
76 	struct bnge_dev *bd = bn->bd;
77 	u32 size, i;
78 	int rc;
79 
80 	size = bd->hw_ring_stats_size;
81 
82 	for (i = 0; i < bd->nq_nr_rings; i++) {
83 		struct bnge_napi *bnapi = bn->bnapi[i];
84 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
85 
86 		nqr->stats.len = size;
87 		rc = bnge_alloc_stats_mem(bn, &nqr->stats);
88 		if (rc)
89 			goto err_free_ring_stats;
90 
91 		nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
92 	}
93 	return 0;
94 
95 err_free_ring_stats:
96 	bnge_free_ring_stats(bn);
97 	return rc;
98 }
99 
100 static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
101 {
102 	struct bnge_ring_struct *ring = &nqr->ring_struct;
103 
104 	kfree(nqr->desc_ring);
105 	nqr->desc_ring = NULL;
106 	ring->ring_mem.pg_arr = NULL;
107 	kfree(nqr->desc_mapping);
108 	nqr->desc_mapping = NULL;
109 	ring->ring_mem.dma_arr = NULL;
110 }
111 
112 static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
113 {
114 	struct bnge_ring_struct *ring = &cpr->ring_struct;
115 
116 	kfree(cpr->desc_ring);
117 	cpr->desc_ring = NULL;
118 	ring->ring_mem.pg_arr = NULL;
119 	kfree(cpr->desc_mapping);
120 	cpr->desc_mapping = NULL;
121 	ring->ring_mem.dma_arr = NULL;
122 }
123 
124 static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
125 {
126 	nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL);
127 	if (!nqr->desc_ring)
128 		return -ENOMEM;
129 
130 	nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL);
131 	if (!nqr->desc_mapping)
132 		goto err_free_desc_ring;
133 	return 0;
134 
135 err_free_desc_ring:
136 	kfree(nqr->desc_ring);
137 	nqr->desc_ring = NULL;
138 	return -ENOMEM;
139 }
140 
141 static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
142 {
143 	cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL);
144 	if (!cpr->desc_ring)
145 		return -ENOMEM;
146 
147 	cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL);
148 	if (!cpr->desc_mapping)
149 		goto err_free_desc_ring;
150 	return 0;
151 
152 err_free_desc_ring:
153 	kfree(cpr->desc_ring);
154 	cpr->desc_ring = NULL;
155 	return -ENOMEM;
156 }
157 
158 static void bnge_free_nq_arrays(struct bnge_net *bn)
159 {
160 	struct bnge_dev *bd = bn->bd;
161 	int i;
162 
163 	for (i = 0; i < bd->nq_nr_rings; i++) {
164 		struct bnge_napi *bnapi = bn->bnapi[i];
165 
166 		bnge_free_nq_desc_arr(&bnapi->nq_ring);
167 	}
168 }
169 
170 static int bnge_alloc_nq_arrays(struct bnge_net *bn)
171 {
172 	struct bnge_dev *bd = bn->bd;
173 	int i, rc;
174 
175 	for (i = 0; i < bd->nq_nr_rings; i++) {
176 		struct bnge_napi *bnapi = bn->bnapi[i];
177 
178 		rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
179 		if (rc)
180 			goto err_free_nq_arrays;
181 	}
182 	return 0;
183 
184 err_free_nq_arrays:
185 	bnge_free_nq_arrays(bn);
186 	return rc;
187 }
188 
189 static void bnge_free_nq_tree(struct bnge_net *bn)
190 {
191 	struct bnge_dev *bd = bn->bd;
192 	int i;
193 
194 	for (i = 0; i < bd->nq_nr_rings; i++) {
195 		struct bnge_napi *bnapi = bn->bnapi[i];
196 		struct bnge_nq_ring_info *nqr;
197 		struct bnge_ring_struct *ring;
198 		int j;
199 
200 		nqr = &bnapi->nq_ring;
201 		ring = &nqr->ring_struct;
202 
203 		bnge_free_ring(bd, &ring->ring_mem);
204 
205 		if (!nqr->cp_ring_arr)
206 			continue;
207 
208 		for (j = 0; j < nqr->cp_ring_count; j++) {
209 			struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
210 
211 			ring = &cpr->ring_struct;
212 			bnge_free_ring(bd, &ring->ring_mem);
213 			bnge_free_cp_desc_arr(cpr);
214 		}
215 		kfree(nqr->cp_ring_arr);
216 		nqr->cp_ring_arr = NULL;
217 		nqr->cp_ring_count = 0;
218 	}
219 }
220 
221 static int alloc_one_cp_ring(struct bnge_net *bn,
222 			     struct bnge_cp_ring_info *cpr)
223 {
224 	struct bnge_ring_mem_info *rmem;
225 	struct bnge_ring_struct *ring;
226 	struct bnge_dev *bd = bn->bd;
227 	int rc;
228 
229 	rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
230 	if (rc)
231 		return -ENOMEM;
232 	ring = &cpr->ring_struct;
233 	rmem = &ring->ring_mem;
234 	rmem->nr_pages = bn->cp_nr_pages;
235 	rmem->page_size = HW_CMPD_RING_SIZE;
236 	rmem->pg_arr = (void **)cpr->desc_ring;
237 	rmem->dma_arr = cpr->desc_mapping;
238 	rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
239 	rc = bnge_alloc_ring(bd, rmem);
240 	if (rc)
241 		goto err_free_cp_desc_arr;
242 	return rc;
243 
244 err_free_cp_desc_arr:
245 	bnge_free_cp_desc_arr(cpr);
246 	return rc;
247 }
248 
249 static int bnge_alloc_nq_tree(struct bnge_net *bn)
250 {
251 	struct bnge_dev *bd = bn->bd;
252 	int i, j, ulp_msix, rc;
253 	int tcs = 1;
254 
255 	ulp_msix = bnge_aux_get_msix(bd);
256 	for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
257 		bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
258 		struct bnge_napi *bnapi = bn->bnapi[i];
259 		struct bnge_nq_ring_info *nqr;
260 		struct bnge_cp_ring_info *cpr;
261 		struct bnge_ring_struct *ring;
262 		int cp_count = 0, k;
263 		int rx = 0, tx = 0;
264 
265 		nqr = &bnapi->nq_ring;
266 		nqr->bnapi = bnapi;
267 		ring = &nqr->ring_struct;
268 
269 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
270 		if (rc)
271 			goto err_free_nq_tree;
272 
273 		ring->map_idx = ulp_msix + i;
274 
275 		if (i < bd->rx_nr_rings) {
276 			cp_count++;
277 			rx = 1;
278 		}
279 
280 		if ((sh && i < bd->tx_nr_rings) ||
281 		    (!sh && i >= bd->rx_nr_rings)) {
282 			cp_count += tcs;
283 			tx = 1;
284 		}
285 
286 		nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
287 					   GFP_KERNEL);
288 		if (!nqr->cp_ring_arr) {
289 			rc = -ENOMEM;
290 			goto err_free_nq_tree;
291 		}
292 
293 		nqr->cp_ring_count = cp_count;
294 
295 		for (k = 0; k < cp_count; k++) {
296 			cpr = &nqr->cp_ring_arr[k];
297 			rc = alloc_one_cp_ring(bn, cpr);
298 			if (rc)
299 				goto err_free_nq_tree;
300 
301 			cpr->bnapi = bnapi;
302 			cpr->cp_idx = k;
303 			if (!k && rx) {
304 				bn->rx_ring[i].rx_cpr = cpr;
305 				cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
306 			} else {
307 				int n, tc = k - rx;
308 
309 				n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
310 				bn->tx_ring[n].tx_cpr = cpr;
311 				cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
312 			}
313 		}
314 		if (tx)
315 			j++;
316 	}
317 	return 0;
318 
319 err_free_nq_tree:
320 	bnge_free_nq_tree(bn);
321 	return rc;
322 }
323 
324 static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
325 {
326 	return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
327 }
328 
329 static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn,
330 				       struct bnge_rx_ring_info *rxr)
331 {
332 	int i, max_idx;
333 
334 	if (!rxr->rx_buf_ring)
335 		return;
336 
337 	max_idx = bn->rx_nr_pages * RX_DESC_CNT;
338 
339 	for (i = 0; i < max_idx; i++) {
340 		struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
341 		void *data = rx_buf->data;
342 
343 		if (!data)
344 			continue;
345 
346 		rx_buf->data = NULL;
347 		page_pool_free_va(rxr->head_pool, data, true);
348 	}
349 }
350 
351 static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
352 					struct bnge_rx_ring_info *rxr)
353 {
354 	int i, max_idx;
355 
356 	if (!rxr->rx_agg_buf_ring)
357 		return;
358 
359 	max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT;
360 
361 	for (i = 0; i < max_idx; i++) {
362 		struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i];
363 		netmem_ref netmem = rx_agg_buf->netmem;
364 
365 		if (!netmem)
366 			continue;
367 
368 		rx_agg_buf->netmem = 0;
369 		__clear_bit(i, rxr->rx_agg_bmap);
370 
371 		page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
372 	}
373 }
374 
375 static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
376 					    struct bnge_rx_ring_info *rxr)
377 {
378 	bnge_free_one_rx_ring_bufs(bn, rxr);
379 	bnge_free_one_agg_ring_bufs(bn, rxr);
380 }
381 
382 static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
383 {
384 	struct bnge_dev *bd = bn->bd;
385 	int i;
386 
387 	if (!bn->rx_ring)
388 		return;
389 
390 	for (i = 0; i < bd->rx_nr_rings; i++)
391 		bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
392 }
393 
394 static void bnge_free_all_rings_bufs(struct bnge_net *bn)
395 {
396 	bnge_free_rx_ring_pair_bufs(bn);
397 }
398 
399 static void bnge_free_rx_rings(struct bnge_net *bn)
400 {
401 	struct bnge_dev *bd = bn->bd;
402 	int i;
403 
404 	for (i = 0; i < bd->rx_nr_rings; i++) {
405 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
406 		struct bnge_ring_struct *ring;
407 
408 		page_pool_destroy(rxr->page_pool);
409 		page_pool_destroy(rxr->head_pool);
410 		rxr->page_pool = rxr->head_pool = NULL;
411 
412 		kfree(rxr->rx_agg_bmap);
413 		rxr->rx_agg_bmap = NULL;
414 
415 		ring = &rxr->rx_ring_struct;
416 		bnge_free_ring(bd, &ring->ring_mem);
417 
418 		ring = &rxr->rx_agg_ring_struct;
419 		bnge_free_ring(bd, &ring->ring_mem);
420 	}
421 }
422 
423 static int bnge_alloc_rx_page_pool(struct bnge_net *bn,
424 				   struct bnge_rx_ring_info *rxr,
425 				   int numa_node)
426 {
427 	const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE;
428 	const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
429 	struct page_pool_params pp = { 0 };
430 	struct bnge_dev *bd = bn->bd;
431 	struct page_pool *pool;
432 
433 	pp.pool_size = bn->rx_agg_ring_size / agg_size_fac;
434 	pp.nid = numa_node;
435 	pp.netdev = bn->netdev;
436 	pp.dev = bd->dev;
437 	pp.dma_dir = bn->rx_dir;
438 	pp.max_len = PAGE_SIZE;
439 	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
440 		   PP_FLAG_ALLOW_UNREADABLE_NETMEM;
441 	pp.queue_idx = rxr->bnapi->index;
442 
443 	pool = page_pool_create(&pp);
444 	if (IS_ERR(pool))
445 		return PTR_ERR(pool);
446 	rxr->page_pool = pool;
447 
448 	rxr->need_head_pool = page_pool_is_unreadable(pool);
449 	if (bnge_separate_head_pool(rxr)) {
450 		pp.pool_size = min(bn->rx_ring_size / rx_size_fac, 1024);
451 		pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
452 		pool = page_pool_create(&pp);
453 		if (IS_ERR(pool))
454 			goto err_destroy_pp;
455 	} else {
456 		page_pool_get(pool);
457 	}
458 	rxr->head_pool = pool;
459 	return 0;
460 
461 err_destroy_pp:
462 	page_pool_destroy(rxr->page_pool);
463 	rxr->page_pool = NULL;
464 	return PTR_ERR(pool);
465 }
466 
467 static void bnge_enable_rx_page_pool(struct bnge_rx_ring_info *rxr)
468 {
469 	page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
470 	page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
471 }
472 
473 static int bnge_alloc_rx_agg_bmap(struct bnge_net *bn,
474 				  struct bnge_rx_ring_info *rxr)
475 {
476 	u16 mem_size;
477 
478 	rxr->rx_agg_bmap_size = bn->rx_agg_ring_mask + 1;
479 	mem_size = rxr->rx_agg_bmap_size / 8;
480 	rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
481 	if (!rxr->rx_agg_bmap)
482 		return -ENOMEM;
483 
484 	return 0;
485 }
486 
487 static int bnge_alloc_rx_rings(struct bnge_net *bn)
488 {
489 	int i, rc = 0, agg_rings = 0, cpu;
490 	struct bnge_dev *bd = bn->bd;
491 
492 	if (bnge_is_agg_reqd(bd))
493 		agg_rings = 1;
494 
495 	for (i = 0; i < bd->rx_nr_rings; i++) {
496 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
497 		struct bnge_ring_struct *ring;
498 		int cpu_node;
499 
500 		ring = &rxr->rx_ring_struct;
501 
502 		cpu = cpumask_local_spread(i, dev_to_node(bd->dev));
503 		cpu_node = cpu_to_node(cpu);
504 		netdev_dbg(bn->netdev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
505 			   i, cpu_node);
506 		rc = bnge_alloc_rx_page_pool(bn, rxr, cpu_node);
507 		if (rc)
508 			goto err_free_rx_rings;
509 		bnge_enable_rx_page_pool(rxr);
510 
511 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
512 		if (rc)
513 			goto err_free_rx_rings;
514 
515 		ring->grp_idx = i;
516 		if (agg_rings) {
517 			ring = &rxr->rx_agg_ring_struct;
518 			rc = bnge_alloc_ring(bd, &ring->ring_mem);
519 			if (rc)
520 				goto err_free_rx_rings;
521 
522 			ring->grp_idx = i;
523 			rc = bnge_alloc_rx_agg_bmap(bn, rxr);
524 			if (rc)
525 				goto err_free_rx_rings;
526 		}
527 	}
528 	return rc;
529 
530 err_free_rx_rings:
531 	bnge_free_rx_rings(bn);
532 	return rc;
533 }
534 
535 static void bnge_free_tx_rings(struct bnge_net *bn)
536 {
537 	struct bnge_dev *bd = bn->bd;
538 	int i;
539 
540 	for (i = 0; i < bd->tx_nr_rings; i++) {
541 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
542 		struct bnge_ring_struct *ring;
543 
544 		ring = &txr->tx_ring_struct;
545 
546 		bnge_free_ring(bd, &ring->ring_mem);
547 	}
548 }
549 
550 static int bnge_alloc_tx_rings(struct bnge_net *bn)
551 {
552 	struct bnge_dev *bd = bn->bd;
553 	int i, j, rc;
554 
555 	for (i = 0, j = 0; i < bd->tx_nr_rings; i++) {
556 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
557 		struct bnge_ring_struct *ring;
558 		u8 qidx;
559 
560 		ring = &txr->tx_ring_struct;
561 
562 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
563 		if (rc)
564 			goto err_free_tx_rings;
565 
566 		ring->grp_idx = txr->bnapi->index;
567 		qidx = bd->tc_to_qidx[j];
568 		ring->queue_id = bd->q_info[qidx].queue_id;
569 		if (BNGE_RING_TO_TC_OFF(bd, i) == (bd->tx_nr_rings_per_tc - 1))
570 			j++;
571 	}
572 	return 0;
573 
574 err_free_tx_rings:
575 	bnge_free_tx_rings(bn);
576 	return rc;
577 }
578 
579 static void bnge_free_vnic_attributes(struct bnge_net *bn)
580 {
581 	struct pci_dev *pdev = bn->bd->pdev;
582 	struct bnge_vnic_info *vnic;
583 	int i;
584 
585 	if (!bn->vnic_info)
586 		return;
587 
588 	for (i = 0; i < bn->nr_vnics; i++) {
589 		vnic = &bn->vnic_info[i];
590 
591 		kfree(vnic->uc_list);
592 		vnic->uc_list = NULL;
593 
594 		if (vnic->mc_list) {
595 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
596 					  vnic->mc_list, vnic->mc_list_mapping);
597 			vnic->mc_list = NULL;
598 		}
599 
600 		if (vnic->rss_table) {
601 			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
602 					  vnic->rss_table,
603 					  vnic->rss_table_dma_addr);
604 			vnic->rss_table = NULL;
605 		}
606 
607 		vnic->rss_hash_key = NULL;
608 		vnic->flags = 0;
609 	}
610 }
611 
612 static int bnge_alloc_vnic_attributes(struct bnge_net *bn)
613 {
614 	struct bnge_dev *bd = bn->bd;
615 	struct bnge_vnic_info *vnic;
616 	int i, size;
617 
618 	for (i = 0; i < bn->nr_vnics; i++) {
619 		vnic = &bn->vnic_info[i];
620 
621 		if (vnic->flags & BNGE_VNIC_UCAST_FLAG) {
622 			int mem_size = (BNGE_MAX_UC_ADDRS - 1) * ETH_ALEN;
623 
624 			vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
625 			if (!vnic->uc_list)
626 				goto err_free_vnic_attributes;
627 		}
628 
629 		if (vnic->flags & BNGE_VNIC_MCAST_FLAG) {
630 			vnic->mc_list_size = BNGE_MAX_MC_ADDRS * ETH_ALEN;
631 			vnic->mc_list =
632 				dma_alloc_coherent(bd->dev,
633 						   vnic->mc_list_size,
634 						   &vnic->mc_list_mapping,
635 						   GFP_KERNEL);
636 			if (!vnic->mc_list)
637 				goto err_free_vnic_attributes;
638 		}
639 
640 		/* Allocate rss table and hash key */
641 		size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE);
642 
643 		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
644 		vnic->rss_table = dma_alloc_coherent(bd->dev,
645 						     vnic->rss_table_size,
646 						     &vnic->rss_table_dma_addr,
647 						     GFP_KERNEL);
648 		if (!vnic->rss_table)
649 			goto err_free_vnic_attributes;
650 
651 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
652 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
653 	}
654 	return 0;
655 
656 err_free_vnic_attributes:
657 	bnge_free_vnic_attributes(bn);
658 	return -ENOMEM;
659 }
660 
661 static int bnge_alloc_vnics(struct bnge_net *bn)
662 {
663 	int num_vnics;
664 
665 	/* Allocate only 1 VNIC for now
666 	 * Additional VNICs will be added based on RFS/NTUPLE in future patches
667 	 */
668 	num_vnics = 1;
669 
670 	bn->vnic_info = kcalloc(num_vnics, sizeof(struct bnge_vnic_info),
671 				GFP_KERNEL);
672 	if (!bn->vnic_info)
673 		return -ENOMEM;
674 
675 	bn->nr_vnics = num_vnics;
676 
677 	return 0;
678 }
679 
680 static void bnge_free_vnics(struct bnge_net *bn)
681 {
682 	kfree(bn->vnic_info);
683 	bn->vnic_info = NULL;
684 	bn->nr_vnics = 0;
685 }
686 
687 static void bnge_free_ring_grps(struct bnge_net *bn)
688 {
689 	kfree(bn->grp_info);
690 	bn->grp_info = NULL;
691 }
692 
693 static int bnge_init_ring_grps(struct bnge_net *bn)
694 {
695 	struct bnge_dev *bd = bn->bd;
696 	int i;
697 
698 	bn->grp_info = kcalloc(bd->nq_nr_rings,
699 			       sizeof(struct bnge_ring_grp_info),
700 			       GFP_KERNEL);
701 	if (!bn->grp_info)
702 		return -ENOMEM;
703 	for (i = 0; i < bd->nq_nr_rings; i++) {
704 		bn->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
705 		bn->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
706 		bn->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
707 		bn->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
708 		bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
709 	}
710 
711 	return 0;
712 }
713 
714 static void bnge_free_core(struct bnge_net *bn)
715 {
716 	bnge_free_vnic_attributes(bn);
717 	bnge_free_tx_rings(bn);
718 	bnge_free_rx_rings(bn);
719 	bnge_free_nq_tree(bn);
720 	bnge_free_nq_arrays(bn);
721 	bnge_free_ring_stats(bn);
722 	bnge_free_ring_grps(bn);
723 	bnge_free_vnics(bn);
724 	kfree(bn->tx_ring_map);
725 	bn->tx_ring_map = NULL;
726 	kfree(bn->tx_ring);
727 	bn->tx_ring = NULL;
728 	kfree(bn->rx_ring);
729 	bn->rx_ring = NULL;
730 	kfree(bn->bnapi);
731 	bn->bnapi = NULL;
732 }
733 
734 static int bnge_alloc_core(struct bnge_net *bn)
735 {
736 	struct bnge_dev *bd = bn->bd;
737 	int i, j, size, arr_size;
738 	int rc = -ENOMEM;
739 	void *bnapi;
740 
741 	arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) *
742 			bd->nq_nr_rings);
743 	size = L1_CACHE_ALIGN(sizeof(struct bnge_napi));
744 	bnapi = kzalloc(arr_size + size * bd->nq_nr_rings, GFP_KERNEL);
745 	if (!bnapi)
746 		return rc;
747 
748 	bn->bnapi = bnapi;
749 	bnapi += arr_size;
750 	for (i = 0; i < bd->nq_nr_rings; i++, bnapi += size) {
751 		struct bnge_nq_ring_info *nqr;
752 
753 		bn->bnapi[i] = bnapi;
754 		bn->bnapi[i]->index = i;
755 		bn->bnapi[i]->bn = bn;
756 		nqr = &bn->bnapi[i]->nq_ring;
757 		nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
758 	}
759 
760 	bn->rx_ring = kcalloc(bd->rx_nr_rings,
761 			      sizeof(struct bnge_rx_ring_info),
762 			      GFP_KERNEL);
763 	if (!bn->rx_ring)
764 		goto err_free_core;
765 
766 	for (i = 0; i < bd->rx_nr_rings; i++) {
767 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
768 
769 		rxr->rx_ring_struct.ring_mem.flags =
770 			BNGE_RMEM_RING_PTE_FLAG;
771 		rxr->rx_agg_ring_struct.ring_mem.flags =
772 			BNGE_RMEM_RING_PTE_FLAG;
773 		rxr->bnapi = bn->bnapi[i];
774 		bn->bnapi[i]->rx_ring = &bn->rx_ring[i];
775 	}
776 
777 	bn->tx_ring = kcalloc(bd->tx_nr_rings,
778 			      sizeof(struct bnge_tx_ring_info),
779 			      GFP_KERNEL);
780 	if (!bn->tx_ring)
781 		goto err_free_core;
782 
783 	bn->tx_ring_map = kcalloc(bd->tx_nr_rings, sizeof(u16),
784 				  GFP_KERNEL);
785 	if (!bn->tx_ring_map)
786 		goto err_free_core;
787 
788 	if (bd->flags & BNGE_EN_SHARED_CHNL)
789 		j = 0;
790 	else
791 		j = bd->rx_nr_rings;
792 
793 	for (i = 0; i < bd->tx_nr_rings; i++) {
794 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
795 		struct bnge_napi *bnapi2;
796 		int k;
797 
798 		txr->tx_ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
799 		bn->tx_ring_map[i] = i;
800 		k = j + BNGE_RING_TO_TC_OFF(bd, i);
801 
802 		bnapi2 = bn->bnapi[k];
803 		txr->txq_index = i;
804 		txr->tx_napi_idx =
805 			BNGE_RING_TO_TC(bd, txr->txq_index);
806 		bnapi2->tx_ring[txr->tx_napi_idx] = txr;
807 		txr->bnapi = bnapi2;
808 	}
809 
810 	rc = bnge_alloc_ring_stats(bn);
811 	if (rc)
812 		goto err_free_core;
813 
814 	rc = bnge_alloc_vnics(bn);
815 	if (rc)
816 		goto err_free_core;
817 
818 	rc = bnge_alloc_nq_arrays(bn);
819 	if (rc)
820 		goto err_free_core;
821 
822 	bnge_init_ring_struct(bn);
823 
824 	rc = bnge_alloc_rx_rings(bn);
825 	if (rc)
826 		goto err_free_core;
827 
828 	rc = bnge_alloc_tx_rings(bn);
829 	if (rc)
830 		goto err_free_core;
831 
832 	rc = bnge_alloc_nq_tree(bn);
833 	if (rc)
834 		goto err_free_core;
835 
836 	bn->vnic_info[BNGE_VNIC_DEFAULT].flags |= BNGE_VNIC_RSS_FLAG |
837 						  BNGE_VNIC_MCAST_FLAG |
838 						  BNGE_VNIC_UCAST_FLAG;
839 	rc = bnge_alloc_vnic_attributes(bn);
840 	if (rc)
841 		goto err_free_core;
842 	return 0;
843 
844 err_free_core:
845 	bnge_free_core(bn);
846 	return rc;
847 }
848 
849 u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr)
850 {
851 	return rxr->rx_cpr->ring_struct.fw_ring_id;
852 }
853 
854 u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
855 {
856 	return txr->tx_cpr->ring_struct.fw_ring_id;
857 }
858 
859 static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
860 {
861 	bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
862 		    DB_RING_IDX(db, idx), db->doorbell);
863 }
864 
865 static void bnge_db_cq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
866 {
867 	bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_CQ_ARMALL |
868 		    DB_RING_IDX(db, idx), db->doorbell);
869 }
870 
871 static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
872 {
873 	struct bnge_napi *bnapi = bn->bnapi[n];
874 	struct bnge_nq_ring_info *nqr;
875 
876 	nqr = &bnapi->nq_ring;
877 
878 	return nqr->ring_struct.map_idx;
879 }
880 
881 static irqreturn_t bnge_msix(int irq, void *dev_instance)
882 {
883 	/* NAPI scheduling to be added in a future patch */
884 	return IRQ_HANDLED;
885 }
886 
887 static void bnge_init_nq_tree(struct bnge_net *bn)
888 {
889 	struct bnge_dev *bd = bn->bd;
890 	int i, j;
891 
892 	for (i = 0; i < bd->nq_nr_rings; i++) {
893 		struct bnge_nq_ring_info *nqr = &bn->bnapi[i]->nq_ring;
894 		struct bnge_ring_struct *ring = &nqr->ring_struct;
895 
896 		ring->fw_ring_id = INVALID_HW_RING_ID;
897 		for (j = 0; j < nqr->cp_ring_count; j++) {
898 			struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
899 
900 			ring = &cpr->ring_struct;
901 			ring->fw_ring_id = INVALID_HW_RING_ID;
902 		}
903 	}
904 }
905 
906 static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
907 					 dma_addr_t *mapping,
908 					 struct bnge_rx_ring_info *rxr,
909 					 unsigned int *offset,
910 					 gfp_t gfp)
911 {
912 	netmem_ref netmem;
913 
914 	if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
915 		netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
916 						     BNGE_RX_PAGE_SIZE, gfp);
917 	} else {
918 		netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
919 		*offset = 0;
920 	}
921 	if (!netmem)
922 		return 0;
923 
924 	*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
925 	return netmem;
926 }
927 
928 static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
929 				struct bnge_rx_ring_info *rxr,
930 				gfp_t gfp)
931 {
932 	unsigned int offset;
933 	struct page *page;
934 
935 	page = page_pool_alloc_frag(rxr->head_pool, &offset,
936 				    bn->rx_buf_size, gfp);
937 	if (!page)
938 		return NULL;
939 
940 	*mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
941 	return page_address(page) + offset;
942 }
943 
944 static int bnge_alloc_rx_data(struct bnge_net *bn,
945 			      struct bnge_rx_ring_info *rxr,
946 			      u16 prod, gfp_t gfp)
947 {
948 	struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
949 	struct rx_bd *rxbd;
950 	dma_addr_t mapping;
951 	u8 *data;
952 
953 	rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
954 	data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
955 	if (!data)
956 		return -ENOMEM;
957 
958 	rx_buf->data = data;
959 	rx_buf->data_ptr = data + bn->rx_offset;
960 	rx_buf->mapping = mapping;
961 
962 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
963 
964 	return 0;
965 }
966 
967 static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
968 				       struct bnge_rx_ring_info *rxr,
969 				       int ring_nr)
970 {
971 	u32 prod = rxr->rx_prod;
972 	int i, rc = 0;
973 
974 	for (i = 0; i < bn->rx_ring_size; i++) {
975 		rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL);
976 		if (rc)
977 			break;
978 		prod = NEXT_RX(prod);
979 	}
980 
981 	/* Abort if not a single buffer can be allocated */
982 	if (rc && !i) {
983 		netdev_err(bn->netdev,
984 			   "RX ring %d: allocated %d/%d buffers, abort\n",
985 			   ring_nr, i, bn->rx_ring_size);
986 		return rc;
987 	}
988 
989 	rxr->rx_prod = prod;
990 
991 	if (i < bn->rx_ring_size)
992 		netdev_warn(bn->netdev,
993 			    "RX ring %d: allocated %d/%d buffers, continuing\n",
994 			    ring_nr, i, bn->rx_ring_size);
995 	return 0;
996 }
997 
998 static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
999 {
1000 	u16 next, max = rxr->rx_agg_bmap_size;
1001 
1002 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1003 	if (next >= max)
1004 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1005 	return next;
1006 }
1007 
1008 static int bnge_alloc_rx_netmem(struct bnge_net *bn,
1009 				struct bnge_rx_ring_info *rxr,
1010 				u16 prod, gfp_t gfp)
1011 {
1012 	struct bnge_sw_rx_agg_bd *rx_agg_buf;
1013 	u16 sw_prod = rxr->rx_sw_agg_prod;
1014 	unsigned int offset = 0;
1015 	struct rx_bd *rxbd;
1016 	dma_addr_t mapping;
1017 	netmem_ref netmem;
1018 
1019 	rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)];
1020 	netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
1021 	if (!netmem)
1022 		return -ENOMEM;
1023 
1024 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1025 		sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
1026 
1027 	__set_bit(sw_prod, rxr->rx_agg_bmap);
1028 	rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod];
1029 	rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
1030 
1031 	rx_agg_buf->netmem = netmem;
1032 	rx_agg_buf->offset = offset;
1033 	rx_agg_buf->mapping = mapping;
1034 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1035 	rxbd->rx_bd_opaque = sw_prod;
1036 	return 0;
1037 }
1038 
1039 static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn,
1040 					struct bnge_rx_ring_info *rxr,
1041 					int ring_nr)
1042 {
1043 	u32 prod = rxr->rx_agg_prod;
1044 	int i, rc = 0;
1045 
1046 	for (i = 0; i < bn->rx_agg_ring_size; i++) {
1047 		rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL);
1048 		if (rc)
1049 			break;
1050 		prod = NEXT_RX_AGG(prod);
1051 	}
1052 
1053 	if (rc && i < MAX_SKB_FRAGS) {
1054 		netdev_err(bn->netdev,
1055 			   "Agg ring %d: allocated %d/%d buffers (min %d), abort\n",
1056 			   ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS);
1057 		goto err_free_one_agg_ring_bufs;
1058 	}
1059 
1060 	rxr->rx_agg_prod = prod;
1061 
1062 	if (i < bn->rx_agg_ring_size)
1063 		netdev_warn(bn->netdev,
1064 			    "Agg ring %d: allocated %d/%d buffers, continuing\n",
1065 			    ring_nr, i, bn->rx_agg_ring_size);
1066 	return 0;
1067 
1068 err_free_one_agg_ring_bufs:
1069 	bnge_free_one_agg_ring_bufs(bn, rxr);
1070 	return -ENOMEM;
1071 }
1072 
1073 static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
1074 {
1075 	struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
1076 	int rc;
1077 
1078 	rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr);
1079 	if (rc)
1080 		return rc;
1081 
1082 	if (bnge_is_agg_reqd(bn->bd)) {
1083 		rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr);
1084 		if (rc)
1085 			goto err_free_one_rx_ring_bufs;
1086 	}
1087 	return 0;
1088 
1089 err_free_one_rx_ring_bufs:
1090 	bnge_free_one_rx_ring_bufs(bn, rxr);
1091 	return rc;
1092 }
1093 
1094 static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type)
1095 {
1096 	struct rx_bd **rx_desc_ring;
1097 	u32 prod;
1098 	int i;
1099 
1100 	rx_desc_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
1101 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
1102 		struct rx_bd *rxbd = rx_desc_ring[i];
1103 		int j;
1104 
1105 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
1106 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
1107 			rxbd->rx_bd_opaque = prod;
1108 		}
1109 	}
1110 }
1111 
1112 static void bnge_init_one_rx_ring_rxbd(struct bnge_net *bn,
1113 				       struct bnge_rx_ring_info *rxr)
1114 {
1115 	struct bnge_ring_struct *ring;
1116 	u32 type;
1117 
1118 	type = (bn->rx_buf_use_size << RX_BD_LEN_SHIFT) |
1119 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
1120 
1121 	if (NET_IP_ALIGN == 2)
1122 		type |= RX_BD_FLAGS_SOP;
1123 
1124 	ring = &rxr->rx_ring_struct;
1125 	bnge_init_rxbd_pages(ring, type);
1126 	ring->fw_ring_id = INVALID_HW_RING_ID;
1127 }
1128 
1129 static void bnge_init_one_agg_ring_rxbd(struct bnge_net *bn,
1130 					struct bnge_rx_ring_info *rxr)
1131 {
1132 	struct bnge_ring_struct *ring;
1133 	u32 type;
1134 
1135 	ring = &rxr->rx_agg_ring_struct;
1136 	ring->fw_ring_id = INVALID_HW_RING_ID;
1137 	if (bnge_is_agg_reqd(bn->bd)) {
1138 		type = ((u32)BNGE_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
1139 			RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
1140 
1141 		bnge_init_rxbd_pages(ring, type);
1142 	}
1143 }
1144 
1145 static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr)
1146 {
1147 	struct bnge_rx_ring_info *rxr;
1148 
1149 	rxr = &bn->rx_ring[ring_nr];
1150 	bnge_init_one_rx_ring_rxbd(bn, rxr);
1151 
1152 	netif_queue_set_napi(bn->netdev, ring_nr, NETDEV_QUEUE_TYPE_RX,
1153 			     &rxr->bnapi->napi);
1154 
1155 	bnge_init_one_agg_ring_rxbd(bn, rxr);
1156 }
1157 
1158 static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn)
1159 {
1160 	int i, rc;
1161 
1162 	for (i = 0; i < bn->bd->rx_nr_rings; i++) {
1163 		rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i);
1164 		if (rc)
1165 			goto err_free_rx_ring_pair_bufs;
1166 	}
1167 	return 0;
1168 
1169 err_free_rx_ring_pair_bufs:
1170 	bnge_free_rx_ring_pair_bufs(bn);
1171 	return rc;
1172 }
1173 
1174 static void bnge_init_rx_rings(struct bnge_net *bn)
1175 {
1176 	int i;
1177 
1178 #define BNGE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
1179 #define BNGE_RX_DMA_OFFSET NET_SKB_PAD
1180 	bn->rx_offset = BNGE_RX_OFFSET;
1181 	bn->rx_dma_offset = BNGE_RX_DMA_OFFSET;
1182 
1183 	for (i = 0; i < bn->bd->rx_nr_rings; i++)
1184 		bnge_init_one_rx_ring_pair(bn, i);
1185 }
1186 
1187 static void bnge_init_tx_rings(struct bnge_net *bn)
1188 {
1189 	int i;
1190 
1191 	bn->tx_wake_thresh = max(bn->tx_ring_size / 2, BNGE_MIN_TX_DESC_CNT);
1192 
1193 	for (i = 0; i < bn->bd->tx_nr_rings; i++) {
1194 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
1195 		struct bnge_ring_struct *ring = &txr->tx_ring_struct;
1196 
1197 		ring->fw_ring_id = INVALID_HW_RING_ID;
1198 
1199 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX,
1200 				     &txr->bnapi->napi);
1201 	}
1202 }
1203 
1204 static void bnge_init_vnics(struct bnge_net *bn)
1205 {
1206 	struct bnge_vnic_info *vnic0 = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1207 	int i;
1208 
1209 	for (i = 0; i < bn->nr_vnics; i++) {
1210 		struct bnge_vnic_info *vnic = &bn->vnic_info[i];
1211 		int j;
1212 
1213 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
1214 		vnic->vnic_id = i;
1215 		for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++)
1216 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
1217 
1218 		if (bn->vnic_info[i].rss_hash_key) {
1219 			if (i == BNGE_VNIC_DEFAULT) {
1220 				u8 *key = (void *)vnic->rss_hash_key;
1221 				int k;
1222 
1223 				if (!bn->rss_hash_key_valid &&
1224 				    !bn->rss_hash_key_updated) {
1225 					get_random_bytes(bn->rss_hash_key,
1226 							 HW_HASH_KEY_SIZE);
1227 					bn->rss_hash_key_updated = true;
1228 				}
1229 
1230 				memcpy(vnic->rss_hash_key, bn->rss_hash_key,
1231 				       HW_HASH_KEY_SIZE);
1232 
1233 				if (!bn->rss_hash_key_updated)
1234 					continue;
1235 
1236 				bn->rss_hash_key_updated = false;
1237 				bn->rss_hash_key_valid = true;
1238 
1239 				bn->toeplitz_prefix = 0;
1240 				for (k = 0; k < 8; k++) {
1241 					bn->toeplitz_prefix <<= 8;
1242 					bn->toeplitz_prefix |= key[k];
1243 				}
1244 			} else {
1245 				memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
1246 				       HW_HASH_KEY_SIZE);
1247 			}
1248 		}
1249 	}
1250 }
1251 
1252 static void bnge_set_db_mask(struct bnge_net *bn, struct bnge_db_info *db,
1253 			     u32 ring_type)
1254 {
1255 	switch (ring_type) {
1256 	case HWRM_RING_ALLOC_TX:
1257 		db->db_ring_mask = bn->tx_ring_mask;
1258 		break;
1259 	case HWRM_RING_ALLOC_RX:
1260 		db->db_ring_mask = bn->rx_ring_mask;
1261 		break;
1262 	case HWRM_RING_ALLOC_AGG:
1263 		db->db_ring_mask = bn->rx_agg_ring_mask;
1264 		break;
1265 	case HWRM_RING_ALLOC_CMPL:
1266 	case HWRM_RING_ALLOC_NQ:
1267 		db->db_ring_mask = bn->cp_ring_mask;
1268 		break;
1269 	}
1270 	db->db_epoch_mask = db->db_ring_mask + 1;
1271 	db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
1272 }
1273 
1274 static void bnge_set_db(struct bnge_net *bn, struct bnge_db_info *db,
1275 			u32 ring_type, u32 map_idx, u32 xid)
1276 {
1277 	struct bnge_dev *bd = bn->bd;
1278 
1279 	switch (ring_type) {
1280 	case HWRM_RING_ALLOC_TX:
1281 		db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
1282 		break;
1283 	case HWRM_RING_ALLOC_RX:
1284 	case HWRM_RING_ALLOC_AGG:
1285 		db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
1286 		break;
1287 	case HWRM_RING_ALLOC_CMPL:
1288 		db->db_key64 = DBR_PATH_L2;
1289 		break;
1290 	case HWRM_RING_ALLOC_NQ:
1291 		db->db_key64 = DBR_PATH_L2;
1292 		break;
1293 	}
1294 	db->db_key64 |= ((u64)xid << DBR_XID_SFT) | DBR_VALID;
1295 
1296 	db->doorbell = bd->bar1 + bd->db_offset;
1297 	bnge_set_db_mask(bn, db, ring_type);
1298 }
1299 
1300 static int bnge_hwrm_cp_ring_alloc(struct bnge_net *bn,
1301 				   struct bnge_cp_ring_info *cpr)
1302 {
1303 	const u32 type = HWRM_RING_ALLOC_CMPL;
1304 	struct bnge_napi *bnapi = cpr->bnapi;
1305 	struct bnge_ring_struct *ring;
1306 	u32 map_idx = bnapi->index;
1307 	int rc;
1308 
1309 	ring = &cpr->ring_struct;
1310 	ring->handle = BNGE_SET_NQ_HDL(cpr);
1311 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1312 	if (rc)
1313 		return rc;
1314 
1315 	bnge_set_db(bn, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
1316 	bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
1317 
1318 	return 0;
1319 }
1320 
1321 static int bnge_hwrm_tx_ring_alloc(struct bnge_net *bn,
1322 				   struct bnge_tx_ring_info *txr, u32 tx_idx)
1323 {
1324 	struct bnge_ring_struct *ring = &txr->tx_ring_struct;
1325 	const u32 type = HWRM_RING_ALLOC_TX;
1326 	int rc;
1327 
1328 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, tx_idx);
1329 	if (rc)
1330 		return rc;
1331 
1332 	bnge_set_db(bn, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
1333 
1334 	return 0;
1335 }
1336 
1337 static int bnge_hwrm_rx_agg_ring_alloc(struct bnge_net *bn,
1338 				       struct bnge_rx_ring_info *rxr)
1339 {
1340 	struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
1341 	u32 type = HWRM_RING_ALLOC_AGG;
1342 	struct bnge_dev *bd = bn->bd;
1343 	u32 grp_idx = ring->grp_idx;
1344 	u32 map_idx;
1345 	int rc;
1346 
1347 	map_idx = grp_idx + bd->rx_nr_rings;
1348 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1349 	if (rc)
1350 		return rc;
1351 
1352 	bnge_set_db(bn, &rxr->rx_agg_db, type, map_idx,
1353 		    ring->fw_ring_id);
1354 	bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
1355 	bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
1356 	bn->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
1357 
1358 	return 0;
1359 }
1360 
1361 static int bnge_hwrm_rx_ring_alloc(struct bnge_net *bn,
1362 				   struct bnge_rx_ring_info *rxr)
1363 {
1364 	struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
1365 	struct bnge_napi *bnapi = rxr->bnapi;
1366 	u32 type = HWRM_RING_ALLOC_RX;
1367 	u32 map_idx = bnapi->index;
1368 	int rc;
1369 
1370 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1371 	if (rc)
1372 		return rc;
1373 
1374 	bnge_set_db(bn, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
1375 	bn->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
1376 
1377 	return 0;
1378 }
1379 
1380 static int bnge_hwrm_ring_alloc(struct bnge_net *bn)
1381 {
1382 	struct bnge_dev *bd = bn->bd;
1383 	bool agg_rings;
1384 	int i, rc = 0;
1385 
1386 	agg_rings = !!(bnge_is_agg_reqd(bd));
1387 	for (i = 0; i < bd->nq_nr_rings; i++) {
1388 		struct bnge_napi *bnapi = bn->bnapi[i];
1389 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
1390 		struct bnge_ring_struct *ring = &nqr->ring_struct;
1391 		u32 type = HWRM_RING_ALLOC_NQ;
1392 		u32 map_idx = ring->map_idx;
1393 		unsigned int vector;
1394 
1395 		vector = bd->irq_tbl[map_idx].vector;
1396 		disable_irq_nosync(vector);
1397 		rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1398 		if (rc) {
1399 			enable_irq(vector);
1400 			goto err_out;
1401 		}
1402 		bnge_set_db(bn, &nqr->nq_db, type, map_idx, ring->fw_ring_id);
1403 		bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
1404 		enable_irq(vector);
1405 		bn->grp_info[i].nq_fw_ring_id = ring->fw_ring_id;
1406 
1407 		if (!i) {
1408 			rc = bnge_hwrm_set_async_event_cr(bd, ring->fw_ring_id);
1409 			if (rc)
1410 				netdev_warn(bn->netdev, "Failed to set async event completion ring.\n");
1411 		}
1412 	}
1413 
1414 	for (i = 0; i < bd->tx_nr_rings; i++) {
1415 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
1416 
1417 		rc = bnge_hwrm_cp_ring_alloc(bn, txr->tx_cpr);
1418 		if (rc)
1419 			goto err_out;
1420 		rc = bnge_hwrm_tx_ring_alloc(bn, txr, i);
1421 		if (rc)
1422 			goto err_out;
1423 	}
1424 
1425 	for (i = 0; i < bd->rx_nr_rings; i++) {
1426 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
1427 		struct bnge_cp_ring_info *cpr;
1428 		struct bnge_ring_struct *ring;
1429 		struct bnge_napi *bnapi;
1430 		u32 map_idx, type;
1431 
1432 		rc = bnge_hwrm_rx_ring_alloc(bn, rxr);
1433 		if (rc)
1434 			goto err_out;
1435 		/* If we have agg rings, post agg buffers first. */
1436 		if (!agg_rings)
1437 			bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
1438 
1439 		cpr = rxr->rx_cpr;
1440 		bnapi = rxr->bnapi;
1441 		type = HWRM_RING_ALLOC_CMPL;
1442 		map_idx = bnapi->index;
1443 
1444 		ring = &cpr->ring_struct;
1445 		ring->handle = BNGE_SET_NQ_HDL(cpr);
1446 		rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1447 		if (rc)
1448 			goto err_out;
1449 		bnge_set_db(bn, &cpr->cp_db, type, map_idx,
1450 			    ring->fw_ring_id);
1451 		bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
1452 	}
1453 
1454 	if (agg_rings) {
1455 		for (i = 0; i < bd->rx_nr_rings; i++) {
1456 			rc = bnge_hwrm_rx_agg_ring_alloc(bn, &bn->rx_ring[i]);
1457 			if (rc)
1458 				goto err_out;
1459 		}
1460 	}
1461 err_out:
1462 	return rc;
1463 }
1464 
1465 void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic)
1466 {
1467 	__le16 *ring_tbl = vnic->rss_table;
1468 	struct bnge_rx_ring_info *rxr;
1469 	struct bnge_dev *bd = bn->bd;
1470 	u16 tbl_size, i;
1471 
1472 	tbl_size = bnge_get_rxfh_indir_size(bd);
1473 
1474 	for (i = 0; i < tbl_size; i++) {
1475 		u16 ring_id, j;
1476 
1477 		j = bd->rss_indir_tbl[i];
1478 		rxr = &bn->rx_ring[j];
1479 
1480 		ring_id = rxr->rx_ring_struct.fw_ring_id;
1481 		*ring_tbl++ = cpu_to_le16(ring_id);
1482 		ring_id = bnge_cp_ring_for_rx(rxr);
1483 		*ring_tbl++ = cpu_to_le16(ring_id);
1484 	}
1485 }
1486 
1487 static int bnge_hwrm_vnic_rss_cfg(struct bnge_net *bn,
1488 				  struct bnge_vnic_info *vnic)
1489 {
1490 	int rc;
1491 
1492 	rc = bnge_hwrm_vnic_set_rss(bn, vnic, true);
1493 	if (rc) {
1494 		netdev_err(bn->netdev, "hwrm vnic %d set rss failure rc: %d\n",
1495 			   vnic->vnic_id, rc);
1496 		return rc;
1497 	}
1498 	rc = bnge_hwrm_vnic_cfg(bn, vnic);
1499 	if (rc)
1500 		netdev_err(bn->netdev, "hwrm vnic %d cfg failure rc: %d\n",
1501 			   vnic->vnic_id, rc);
1502 	return rc;
1503 }
1504 
1505 static int bnge_setup_vnic(struct bnge_net *bn, struct bnge_vnic_info *vnic)
1506 {
1507 	struct bnge_dev *bd = bn->bd;
1508 	int rc, i, nr_ctxs;
1509 
1510 	nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
1511 	for (i = 0; i < nr_ctxs; i++) {
1512 		rc = bnge_hwrm_vnic_ctx_alloc(bd, vnic, i);
1513 		if (rc) {
1514 			netdev_err(bn->netdev, "hwrm vnic %d ctx %d alloc failure rc: %d\n",
1515 				   vnic->vnic_id, i, rc);
1516 			return -ENOMEM;
1517 		}
1518 		bn->rsscos_nr_ctxs++;
1519 	}
1520 
1521 	rc = bnge_hwrm_vnic_rss_cfg(bn, vnic);
1522 	if (rc)
1523 		return rc;
1524 
1525 	if (bnge_is_agg_reqd(bd)) {
1526 		rc = bnge_hwrm_vnic_set_hds(bn, vnic);
1527 		if (rc)
1528 			netdev_err(bn->netdev, "hwrm vnic %d set hds failure rc: %d\n",
1529 				   vnic->vnic_id, rc);
1530 	}
1531 	return rc;
1532 }
1533 
1534 static void bnge_del_l2_filter(struct bnge_net *bn, struct bnge_l2_filter *fltr)
1535 {
1536 	if (!refcount_dec_and_test(&fltr->refcnt))
1537 		return;
1538 	hlist_del_rcu(&fltr->base.hash);
1539 	kfree_rcu(fltr, base.rcu);
1540 }
1541 
1542 static void bnge_init_l2_filter(struct bnge_net *bn,
1543 				struct bnge_l2_filter *fltr,
1544 				struct bnge_l2_key *key, u32 idx)
1545 {
1546 	struct hlist_head *head;
1547 
1548 	ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
1549 	fltr->l2_key.vlan = key->vlan;
1550 	fltr->base.type = BNGE_FLTR_TYPE_L2;
1551 
1552 	head = &bn->l2_fltr_hash_tbl[idx];
1553 	hlist_add_head_rcu(&fltr->base.hash, head);
1554 	refcount_set(&fltr->refcnt, 1);
1555 }
1556 
1557 static struct bnge_l2_filter *__bnge_lookup_l2_filter(struct bnge_net *bn,
1558 						      struct bnge_l2_key *key,
1559 						      u32 idx)
1560 {
1561 	struct bnge_l2_filter *fltr;
1562 	struct hlist_head *head;
1563 
1564 	head = &bn->l2_fltr_hash_tbl[idx];
1565 	hlist_for_each_entry_rcu(fltr, head, base.hash) {
1566 		struct bnge_l2_key *l2_key = &fltr->l2_key;
1567 
1568 		if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
1569 		    l2_key->vlan == key->vlan)
1570 			return fltr;
1571 	}
1572 	return NULL;
1573 }
1574 
1575 static struct bnge_l2_filter *bnge_lookup_l2_filter(struct bnge_net *bn,
1576 						    struct bnge_l2_key *key,
1577 						    u32 idx)
1578 {
1579 	struct bnge_l2_filter *fltr;
1580 
1581 	rcu_read_lock();
1582 	fltr = __bnge_lookup_l2_filter(bn, key, idx);
1583 	if (fltr)
1584 		refcount_inc(&fltr->refcnt);
1585 	rcu_read_unlock();
1586 	return fltr;
1587 }
1588 
1589 static struct bnge_l2_filter *bnge_alloc_l2_filter(struct bnge_net *bn,
1590 						   struct bnge_l2_key *key,
1591 						   gfp_t gfp)
1592 {
1593 	struct bnge_l2_filter *fltr;
1594 	u32 idx;
1595 
1596 	idx = jhash2(&key->filter_key, BNGE_L2_KEY_SIZE, bn->hash_seed) &
1597 	      BNGE_L2_FLTR_HASH_MASK;
1598 	fltr = bnge_lookup_l2_filter(bn, key, idx);
1599 	if (fltr)
1600 		return fltr;
1601 
1602 	fltr = kzalloc(sizeof(*fltr), gfp);
1603 	if (!fltr)
1604 		return ERR_PTR(-ENOMEM);
1605 
1606 	bnge_init_l2_filter(bn, fltr, key, idx);
1607 	return fltr;
1608 }
1609 
1610 static int bnge_hwrm_set_vnic_filter(struct bnge_net *bn, u16 vnic_id, u16 idx,
1611 				     const u8 *mac_addr)
1612 {
1613 	struct bnge_l2_filter *fltr;
1614 	struct bnge_l2_key key;
1615 	int rc;
1616 
1617 	ether_addr_copy(key.dst_mac_addr, mac_addr);
1618 	key.vlan = 0;
1619 	fltr = bnge_alloc_l2_filter(bn, &key, GFP_KERNEL);
1620 	if (IS_ERR(fltr))
1621 		return PTR_ERR(fltr);
1622 
1623 	fltr->base.fw_vnic_id = bn->vnic_info[vnic_id].fw_vnic_id;
1624 	rc = bnge_hwrm_l2_filter_alloc(bn->bd, fltr);
1625 	if (rc)
1626 		goto err_del_l2_filter;
1627 	bn->vnic_info[vnic_id].l2_filters[idx] = fltr;
1628 	return rc;
1629 
1630 err_del_l2_filter:
1631 	bnge_del_l2_filter(bn, fltr);
1632 	return rc;
1633 }
1634 
1635 static bool bnge_mc_list_updated(struct bnge_net *bn, u32 *rx_mask)
1636 {
1637 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1638 	struct net_device *dev = bn->netdev;
1639 	struct netdev_hw_addr *ha;
1640 	int mc_count = 0, off = 0;
1641 	bool update = false;
1642 	u8 *haddr;
1643 
1644 	netdev_for_each_mc_addr(ha, dev) {
1645 		if (mc_count >= BNGE_MAX_MC_ADDRS) {
1646 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1647 			vnic->mc_list_count = 0;
1648 			return false;
1649 		}
1650 		haddr = ha->addr;
1651 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
1652 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
1653 			update = true;
1654 		}
1655 		off += ETH_ALEN;
1656 		mc_count++;
1657 	}
1658 	if (mc_count)
1659 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1660 
1661 	if (mc_count != vnic->mc_list_count) {
1662 		vnic->mc_list_count = mc_count;
1663 		update = true;
1664 	}
1665 	return update;
1666 }
1667 
1668 static bool bnge_uc_list_updated(struct bnge_net *bn)
1669 {
1670 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1671 	struct net_device *dev = bn->netdev;
1672 	struct netdev_hw_addr *ha;
1673 	int off = 0;
1674 
1675 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
1676 		return true;
1677 
1678 	netdev_for_each_uc_addr(ha, dev) {
1679 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
1680 			return true;
1681 
1682 		off += ETH_ALEN;
1683 	}
1684 	return false;
1685 }
1686 
1687 static bool bnge_promisc_ok(struct bnge_net *bn)
1688 {
1689 	return true;
1690 }
1691 
1692 static int bnge_cfg_def_vnic(struct bnge_net *bn)
1693 {
1694 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1695 	struct net_device *dev = bn->netdev;
1696 	struct bnge_dev *bd = bn->bd;
1697 	struct netdev_hw_addr *ha;
1698 	int i, off = 0, rc;
1699 	bool uc_update;
1700 
1701 	netif_addr_lock_bh(dev);
1702 	uc_update = bnge_uc_list_updated(bn);
1703 	netif_addr_unlock_bh(dev);
1704 
1705 	if (!uc_update)
1706 		goto skip_uc;
1707 
1708 	for (i = 1; i < vnic->uc_filter_count; i++) {
1709 		struct bnge_l2_filter *fltr = vnic->l2_filters[i];
1710 
1711 		bnge_hwrm_l2_filter_free(bd, fltr);
1712 		bnge_del_l2_filter(bn, fltr);
1713 	}
1714 
1715 	vnic->uc_filter_count = 1;
1716 
1717 	netif_addr_lock_bh(dev);
1718 	if (netdev_uc_count(dev) > (BNGE_MAX_UC_ADDRS - 1)) {
1719 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1720 	} else {
1721 		netdev_for_each_uc_addr(ha, dev) {
1722 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
1723 			off += ETH_ALEN;
1724 			vnic->uc_filter_count++;
1725 		}
1726 	}
1727 	netif_addr_unlock_bh(dev);
1728 
1729 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
1730 		rc = bnge_hwrm_set_vnic_filter(bn, 0, i, vnic->uc_list + off);
1731 		if (rc) {
1732 			netdev_err(dev, "HWRM vnic filter failure rc: %d\n", rc);
1733 			vnic->uc_filter_count = i;
1734 			return rc;
1735 		}
1736 	}
1737 
1738 skip_uc:
1739 	if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
1740 	    !bnge_promisc_ok(bn))
1741 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1742 	rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
1743 	if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
1744 		netdev_info(dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
1745 			    rc);
1746 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1747 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1748 		vnic->mc_list_count = 0;
1749 		rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
1750 	}
1751 	if (rc)
1752 		netdev_err(dev, "HWRM cfa l2 rx mask failure rc: %d\n",
1753 			   rc);
1754 
1755 	return rc;
1756 }
1757 
1758 static void bnge_hwrm_vnic_free(struct bnge_net *bn)
1759 {
1760 	int i;
1761 
1762 	for (i = 0; i < bn->nr_vnics; i++)
1763 		bnge_hwrm_vnic_free_one(bn->bd, &bn->vnic_info[i]);
1764 }
1765 
1766 static void bnge_hwrm_vnic_ctx_free(struct bnge_net *bn)
1767 {
1768 	int i, j;
1769 
1770 	for (i = 0; i < bn->nr_vnics; i++) {
1771 		struct bnge_vnic_info *vnic = &bn->vnic_info[i];
1772 
1773 		for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) {
1774 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
1775 				bnge_hwrm_vnic_ctx_free_one(bn->bd, vnic, j);
1776 		}
1777 	}
1778 	bn->rsscos_nr_ctxs = 0;
1779 }
1780 
1781 static void bnge_hwrm_clear_vnic_filter(struct bnge_net *bn)
1782 {
1783 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1784 	int i;
1785 
1786 	for (i = 0; i < vnic->uc_filter_count; i++) {
1787 		struct bnge_l2_filter *fltr = vnic->l2_filters[i];
1788 
1789 		bnge_hwrm_l2_filter_free(bn->bd, fltr);
1790 		bnge_del_l2_filter(bn, fltr);
1791 	}
1792 
1793 	vnic->uc_filter_count = 0;
1794 }
1795 
1796 static void bnge_clear_vnic(struct bnge_net *bn)
1797 {
1798 	bnge_hwrm_clear_vnic_filter(bn);
1799 	bnge_hwrm_vnic_free(bn);
1800 	bnge_hwrm_vnic_ctx_free(bn);
1801 }
1802 
1803 static void bnge_hwrm_rx_ring_free(struct bnge_net *bn,
1804 				   struct bnge_rx_ring_info *rxr,
1805 				   bool close_path)
1806 {
1807 	struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
1808 	u32 grp_idx = rxr->bnapi->index;
1809 	u32 cmpl_ring_id;
1810 
1811 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
1812 		return;
1813 
1814 	cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
1815 	hwrm_ring_free_send_msg(bn, ring,
1816 				RING_FREE_REQ_RING_TYPE_RX,
1817 				close_path ? cmpl_ring_id :
1818 				INVALID_HW_RING_ID);
1819 	ring->fw_ring_id = INVALID_HW_RING_ID;
1820 	bn->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1821 }
1822 
1823 static void bnge_hwrm_rx_agg_ring_free(struct bnge_net *bn,
1824 				       struct bnge_rx_ring_info *rxr,
1825 				       bool close_path)
1826 {
1827 	struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
1828 	u32 grp_idx = rxr->bnapi->index;
1829 	u32 cmpl_ring_id;
1830 
1831 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
1832 		return;
1833 
1834 	cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
1835 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_RX_AGG,
1836 				close_path ? cmpl_ring_id :
1837 				INVALID_HW_RING_ID);
1838 	ring->fw_ring_id = INVALID_HW_RING_ID;
1839 	bn->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
1840 }
1841 
1842 static void bnge_hwrm_tx_ring_free(struct bnge_net *bn,
1843 				   struct bnge_tx_ring_info *txr,
1844 				   bool close_path)
1845 {
1846 	struct bnge_ring_struct *ring = &txr->tx_ring_struct;
1847 	u32 cmpl_ring_id;
1848 
1849 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
1850 		return;
1851 
1852 	cmpl_ring_id = close_path ? bnge_cp_ring_for_tx(txr) :
1853 		       INVALID_HW_RING_ID;
1854 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_TX,
1855 				cmpl_ring_id);
1856 	ring->fw_ring_id = INVALID_HW_RING_ID;
1857 }
1858 
1859 static void bnge_hwrm_cp_ring_free(struct bnge_net *bn,
1860 				   struct bnge_cp_ring_info *cpr)
1861 {
1862 	struct bnge_ring_struct *ring;
1863 
1864 	ring = &cpr->ring_struct;
1865 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
1866 		return;
1867 
1868 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
1869 				INVALID_HW_RING_ID);
1870 	ring->fw_ring_id = INVALID_HW_RING_ID;
1871 }
1872 
1873 static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
1874 {
1875 	struct bnge_dev *bd = bn->bd;
1876 	int i;
1877 
1878 	if (!bn->bnapi)
1879 		return;
1880 
1881 	for (i = 0; i < bd->tx_nr_rings; i++)
1882 		bnge_hwrm_tx_ring_free(bn, &bn->tx_ring[i], close_path);
1883 
1884 	for (i = 0; i < bd->rx_nr_rings; i++) {
1885 		bnge_hwrm_rx_ring_free(bn, &bn->rx_ring[i], close_path);
1886 		bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
1887 	}
1888 
1889 	for (i = 0; i < bd->nq_nr_rings; i++) {
1890 		struct bnge_napi *bnapi = bn->bnapi[i];
1891 		struct bnge_nq_ring_info *nqr;
1892 		struct bnge_ring_struct *ring;
1893 		int j;
1894 
1895 		nqr = &bnapi->nq_ring;
1896 		for (j = 0; j < nqr->cp_ring_count && nqr->cp_ring_arr; j++)
1897 			bnge_hwrm_cp_ring_free(bn, &nqr->cp_ring_arr[j]);
1898 
1899 		ring = &nqr->ring_struct;
1900 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1901 			hwrm_ring_free_send_msg(bn, ring,
1902 						RING_FREE_REQ_RING_TYPE_NQ,
1903 						INVALID_HW_RING_ID);
1904 			ring->fw_ring_id = INVALID_HW_RING_ID;
1905 			bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
1906 		}
1907 	}
1908 }
1909 
1910 static void bnge_setup_msix(struct bnge_net *bn)
1911 {
1912 	struct net_device *dev = bn->netdev;
1913 	struct bnge_dev *bd = bn->bd;
1914 	int len, i;
1915 
1916 	len = sizeof(bd->irq_tbl[0].name);
1917 	for (i = 0; i < bd->nq_nr_rings; i++) {
1918 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
1919 		char *attr;
1920 
1921 		if (bd->flags & BNGE_EN_SHARED_CHNL)
1922 			attr = "TxRx";
1923 		else if (i < bd->rx_nr_rings)
1924 			attr = "rx";
1925 		else
1926 			attr = "tx";
1927 
1928 		snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
1929 			 attr, i);
1930 		bd->irq_tbl[map_idx].handler = bnge_msix;
1931 	}
1932 }
1933 
1934 static int bnge_setup_interrupts(struct bnge_net *bn)
1935 {
1936 	struct net_device *dev = bn->netdev;
1937 	struct bnge_dev *bd = bn->bd;
1938 
1939 	bnge_setup_msix(bn);
1940 
1941 	return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
1942 }
1943 
1944 static void bnge_hwrm_resource_free(struct bnge_net *bn, bool close_path)
1945 {
1946 	bnge_clear_vnic(bn);
1947 	bnge_hwrm_ring_free(bn, close_path);
1948 	bnge_hwrm_stat_ctx_free(bn);
1949 }
1950 
1951 static void bnge_free_irq(struct bnge_net *bn)
1952 {
1953 	struct bnge_dev *bd = bn->bd;
1954 	struct bnge_irq *irq;
1955 	int i;
1956 
1957 	for (i = 0; i < bd->nq_nr_rings; i++) {
1958 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
1959 
1960 		irq = &bd->irq_tbl[map_idx];
1961 		if (irq->requested) {
1962 			if (irq->have_cpumask) {
1963 				irq_set_affinity_hint(irq->vector, NULL);
1964 				free_cpumask_var(irq->cpu_mask);
1965 				irq->have_cpumask = 0;
1966 			}
1967 			free_irq(irq->vector, bn->bnapi[i]);
1968 		}
1969 
1970 		irq->requested = 0;
1971 	}
1972 }
1973 
1974 static int bnge_request_irq(struct bnge_net *bn)
1975 {
1976 	struct bnge_dev *bd = bn->bd;
1977 	int i, rc;
1978 
1979 	rc = bnge_setup_interrupts(bn);
1980 	if (rc) {
1981 		netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
1982 		return rc;
1983 	}
1984 	for (i = 0; i < bd->nq_nr_rings; i++) {
1985 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
1986 		struct bnge_irq *irq = &bd->irq_tbl[map_idx];
1987 
1988 		rc = request_irq(irq->vector, irq->handler, 0, irq->name,
1989 				 bn->bnapi[i]);
1990 		if (rc)
1991 			goto err_free_irq;
1992 
1993 		netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
1994 		irq->requested = 1;
1995 
1996 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
1997 			int numa_node = dev_to_node(&bd->pdev->dev);
1998 
1999 			irq->have_cpumask = 1;
2000 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2001 					irq->cpu_mask);
2002 			rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
2003 			if (rc) {
2004 				netdev_warn(bn->netdev,
2005 					    "Set affinity failed, IRQ = %d\n",
2006 					    irq->vector);
2007 				goto err_free_irq;
2008 			}
2009 		}
2010 	}
2011 	return 0;
2012 
2013 err_free_irq:
2014 	bnge_free_irq(bn);
2015 	return rc;
2016 }
2017 
2018 static int bnge_init_chip(struct bnge_net *bn)
2019 {
2020 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
2021 	struct bnge_dev *bd = bn->bd;
2022 	int rc;
2023 
2024 #define BNGE_DEF_STATS_COAL_TICKS	 1000000
2025 	bn->stats_coal_ticks = BNGE_DEF_STATS_COAL_TICKS;
2026 
2027 	rc = bnge_hwrm_stat_ctx_alloc(bn);
2028 	if (rc) {
2029 		netdev_err(bn->netdev, "hwrm stat ctx alloc failure rc: %d\n", rc);
2030 		goto err_out;
2031 	}
2032 
2033 	rc = bnge_hwrm_ring_alloc(bn);
2034 	if (rc) {
2035 		netdev_err(bn->netdev, "hwrm ring alloc failure rc: %d\n", rc);
2036 		goto err_out;
2037 	}
2038 
2039 	rc = bnge_hwrm_vnic_alloc(bd, vnic, bd->rx_nr_rings);
2040 	if (rc) {
2041 		netdev_err(bn->netdev, "hwrm vnic alloc failure rc: %d\n", rc);
2042 		goto err_out;
2043 	}
2044 
2045 	rc = bnge_setup_vnic(bn, vnic);
2046 	if (rc)
2047 		goto err_out;
2048 
2049 	if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
2050 		bnge_hwrm_update_rss_hash_cfg(bn);
2051 
2052 	/* Filter for default vnic 0 */
2053 	rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
2054 	if (rc) {
2055 		netdev_err(bn->netdev, "HWRM vnic filter failure rc: %d\n", rc);
2056 		goto err_out;
2057 	}
2058 	vnic->uc_filter_count = 1;
2059 
2060 	vnic->rx_mask = 0;
2061 
2062 	if (bn->netdev->flags & IFF_BROADCAST)
2063 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
2064 
2065 	if (bn->netdev->flags & IFF_PROMISC)
2066 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
2067 
2068 	if (bn->netdev->flags & IFF_ALLMULTI) {
2069 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
2070 		vnic->mc_list_count = 0;
2071 	} else if (bn->netdev->flags & IFF_MULTICAST) {
2072 		u32 mask = 0;
2073 
2074 		bnge_mc_list_updated(bn, &mask);
2075 		vnic->rx_mask |= mask;
2076 	}
2077 
2078 	rc = bnge_cfg_def_vnic(bn);
2079 	if (rc)
2080 		goto err_out;
2081 	return 0;
2082 
2083 err_out:
2084 	bnge_hwrm_resource_free(bn, 0);
2085 	return rc;
2086 }
2087 
2088 static int bnge_napi_poll(struct napi_struct *napi, int budget)
2089 {
2090 	int work_done = 0;
2091 
2092 	/* defer NAPI implementation to next patch series */
2093 	napi_complete_done(napi, work_done);
2094 
2095 	return work_done;
2096 }
2097 
2098 static void bnge_init_napi(struct bnge_net *bn)
2099 {
2100 	struct bnge_dev *bd = bn->bd;
2101 	struct bnge_napi *bnapi;
2102 	int i;
2103 
2104 	for (i = 0; i < bd->nq_nr_rings; i++) {
2105 		bnapi = bn->bnapi[i];
2106 		netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
2107 					     bnge_napi_poll, bnapi->index);
2108 	}
2109 }
2110 
2111 static void bnge_del_napi(struct bnge_net *bn)
2112 {
2113 	struct bnge_dev *bd = bn->bd;
2114 	int i;
2115 
2116 	for (i = 0; i < bd->rx_nr_rings; i++)
2117 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
2118 	for (i = 0; i < bd->tx_nr_rings; i++)
2119 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
2120 
2121 	for (i = 0; i < bd->nq_nr_rings; i++) {
2122 		struct bnge_napi *bnapi = bn->bnapi[i];
2123 
2124 		__netif_napi_del_locked(&bnapi->napi);
2125 	}
2126 
2127 	/* Wait for RCU grace period after removing NAPI instances */
2128 	synchronize_net();
2129 }
2130 
2131 static int bnge_init_nic(struct bnge_net *bn)
2132 {
2133 	int rc;
2134 
2135 	bnge_init_nq_tree(bn);
2136 
2137 	bnge_init_rx_rings(bn);
2138 	rc = bnge_alloc_rx_ring_pair_bufs(bn);
2139 	if (rc)
2140 		return rc;
2141 
2142 	bnge_init_tx_rings(bn);
2143 
2144 	rc = bnge_init_ring_grps(bn);
2145 	if (rc)
2146 		goto err_free_rx_ring_pair_bufs;
2147 
2148 	bnge_init_vnics(bn);
2149 
2150 	rc = bnge_init_chip(bn);
2151 	if (rc)
2152 		goto err_free_ring_grps;
2153 	return rc;
2154 
2155 err_free_ring_grps:
2156 	bnge_free_ring_grps(bn);
2157 	return rc;
2158 
2159 err_free_rx_ring_pair_bufs:
2160 	bnge_free_rx_ring_pair_bufs(bn);
2161 	return rc;
2162 }
2163 
2164 static int bnge_open_core(struct bnge_net *bn)
2165 {
2166 	struct bnge_dev *bd = bn->bd;
2167 	int rc;
2168 
2169 	netif_carrier_off(bn->netdev);
2170 
2171 	rc = bnge_reserve_rings(bd);
2172 	if (rc) {
2173 		netdev_err(bn->netdev, "bnge_reserve_rings err: %d\n", rc);
2174 		return rc;
2175 	}
2176 
2177 	rc = bnge_alloc_core(bn);
2178 	if (rc) {
2179 		netdev_err(bn->netdev, "bnge_alloc_core err: %d\n", rc);
2180 		return rc;
2181 	}
2182 
2183 	bnge_init_napi(bn);
2184 	rc = bnge_request_irq(bn);
2185 	if (rc) {
2186 		netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
2187 		goto err_del_napi;
2188 	}
2189 
2190 	rc = bnge_init_nic(bn);
2191 	if (rc) {
2192 		netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
2193 		goto err_free_irq;
2194 	}
2195 	set_bit(BNGE_STATE_OPEN, &bd->state);
2196 	return 0;
2197 
2198 err_free_irq:
2199 	bnge_free_irq(bn);
2200 err_del_napi:
2201 	bnge_del_napi(bn);
2202 	bnge_free_core(bn);
2203 	return rc;
2204 }
2205 
2206 static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
2207 {
2208 	dev_kfree_skb_any(skb);
2209 
2210 	return NETDEV_TX_OK;
2211 }
2212 
2213 static int bnge_open(struct net_device *dev)
2214 {
2215 	struct bnge_net *bn = netdev_priv(dev);
2216 	int rc;
2217 
2218 	rc = bnge_open_core(bn);
2219 	if (rc)
2220 		netdev_err(dev, "bnge_open_core err: %d\n", rc);
2221 
2222 	return rc;
2223 }
2224 
2225 static int bnge_shutdown_nic(struct bnge_net *bn)
2226 {
2227 	/* TODO: close_path = 0 until we make NAPI functional */
2228 	bnge_hwrm_resource_free(bn, 0);
2229 	return 0;
2230 }
2231 
2232 static void bnge_close_core(struct bnge_net *bn)
2233 {
2234 	struct bnge_dev *bd = bn->bd;
2235 
2236 	clear_bit(BNGE_STATE_OPEN, &bd->state);
2237 	bnge_shutdown_nic(bn);
2238 	bnge_free_all_rings_bufs(bn);
2239 	bnge_free_irq(bn);
2240 	bnge_del_napi(bn);
2241 
2242 	bnge_free_core(bn);
2243 }
2244 
2245 static int bnge_close(struct net_device *dev)
2246 {
2247 	struct bnge_net *bn = netdev_priv(dev);
2248 
2249 	bnge_close_core(bn);
2250 
2251 	return 0;
2252 }
2253 
2254 static const struct net_device_ops bnge_netdev_ops = {
2255 	.ndo_open		= bnge_open,
2256 	.ndo_stop		= bnge_close,
2257 	.ndo_start_xmit		= bnge_start_xmit,
2258 };
2259 
2260 static void bnge_init_mac_addr(struct bnge_dev *bd)
2261 {
2262 	eth_hw_addr_set(bd->netdev, bd->pf.mac_addr);
2263 }
2264 
2265 static void bnge_set_tpa_flags(struct bnge_dev *bd)
2266 {
2267 	struct bnge_net *bn = netdev_priv(bd->netdev);
2268 
2269 	bn->priv_flags &= ~BNGE_NET_EN_TPA;
2270 
2271 	if (bd->netdev->features & NETIF_F_LRO)
2272 		bn->priv_flags |= BNGE_NET_EN_LRO;
2273 	else if (bd->netdev->features & NETIF_F_GRO_HW)
2274 		bn->priv_flags |= BNGE_NET_EN_GRO;
2275 }
2276 
2277 static void bnge_init_l2_fltr_tbl(struct bnge_net *bn)
2278 {
2279 	int i;
2280 
2281 	for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++)
2282 		INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]);
2283 	get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed));
2284 }
2285 
2286 void bnge_set_ring_params(struct bnge_dev *bd)
2287 {
2288 	struct bnge_net *bn = netdev_priv(bd->netdev);
2289 	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
2290 	u32 agg_factor = 0, agg_ring_size = 0;
2291 
2292 	/* 8 for CRC and VLAN */
2293 	rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2294 
2295 	rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
2296 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2297 
2298 	bn->rx_copy_thresh = BNGE_RX_COPY_THRESH;
2299 	ring_size = bn->rx_ring_size;
2300 	bn->rx_agg_ring_size = 0;
2301 	bn->rx_agg_nr_pages = 0;
2302 
2303 	if (bn->priv_flags & BNGE_NET_EN_TPA)
2304 		agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE);
2305 
2306 	bn->priv_flags &= ~BNGE_NET_EN_JUMBO;
2307 	if (rx_space > PAGE_SIZE) {
2308 		u32 jumbo_factor;
2309 
2310 		bn->priv_flags |= BNGE_NET_EN_JUMBO;
2311 		jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT;
2312 		if (jumbo_factor > agg_factor)
2313 			agg_factor = jumbo_factor;
2314 	}
2315 	if (agg_factor) {
2316 		if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) {
2317 			ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA;
2318 			netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n",
2319 				    bn->rx_ring_size, ring_size);
2320 			bn->rx_ring_size = ring_size;
2321 		}
2322 		agg_ring_size = ring_size * agg_factor;
2323 
2324 		bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size,
2325 							  RX_DESC_CNT);
2326 		if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2327 			u32 tmp = agg_ring_size;
2328 
2329 			bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2330 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2331 			netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n",
2332 				    tmp, agg_ring_size);
2333 		}
2334 		bn->rx_agg_ring_size = agg_ring_size;
2335 		bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2336 
2337 		rx_size = SKB_DATA_ALIGN(BNGE_RX_COPY_THRESH + NET_IP_ALIGN);
2338 		rx_space = rx_size + NET_SKB_PAD +
2339 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2340 	}
2341 
2342 	bn->rx_buf_use_size = rx_size;
2343 	bn->rx_buf_size = rx_space;
2344 
2345 	bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT);
2346 	bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1;
2347 
2348 	ring_size = bn->tx_ring_size;
2349 	bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT);
2350 	bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1;
2351 
2352 	max_rx_cmpl = bn->rx_ring_size;
2353 
2354 	if (bn->priv_flags & BNGE_NET_EN_TPA)
2355 		max_rx_cmpl += bd->max_tpa_v2;
2356 	ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size;
2357 	bn->cp_ring_size = ring_size;
2358 
2359 	bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT);
2360 	if (bn->cp_nr_pages > MAX_CP_PAGES) {
2361 		bn->cp_nr_pages = MAX_CP_PAGES;
2362 		bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2363 		netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n",
2364 			    ring_size, bn->cp_ring_size);
2365 	}
2366 	bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT;
2367 	bn->cp_ring_mask = bn->cp_bit - 1;
2368 }
2369 
2370 int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
2371 {
2372 	struct net_device *netdev;
2373 	struct bnge_net *bn;
2374 	int rc;
2375 
2376 	netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE,
2377 				    max_irqs);
2378 	if (!netdev)
2379 		return -ENOMEM;
2380 
2381 	SET_NETDEV_DEV(netdev, bd->dev);
2382 	bd->netdev = netdev;
2383 
2384 	netdev->netdev_ops = &bnge_netdev_ops;
2385 
2386 	bnge_set_ethtool_ops(netdev);
2387 
2388 	bn = netdev_priv(netdev);
2389 	bn->netdev = netdev;
2390 	bn->bd = bd;
2391 
2392 	netdev->min_mtu = ETH_ZLEN;
2393 	netdev->max_mtu = bd->max_mtu;
2394 
2395 	netdev->hw_features = NETIF_F_IP_CSUM |
2396 			      NETIF_F_IPV6_CSUM |
2397 			      NETIF_F_SG |
2398 			      NETIF_F_TSO |
2399 			      NETIF_F_TSO6 |
2400 			      NETIF_F_GSO_UDP_TUNNEL |
2401 			      NETIF_F_GSO_GRE |
2402 			      NETIF_F_GSO_IPXIP4 |
2403 			      NETIF_F_GSO_UDP_TUNNEL_CSUM |
2404 			      NETIF_F_GSO_GRE_CSUM |
2405 			      NETIF_F_GSO_PARTIAL |
2406 			      NETIF_F_RXHASH |
2407 			      NETIF_F_RXCSUM |
2408 			      NETIF_F_GRO;
2409 
2410 	if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
2411 		netdev->hw_features |= NETIF_F_GSO_UDP_L4;
2412 
2413 	if (BNGE_SUPPORTS_TPA(bd))
2414 		netdev->hw_features |= NETIF_F_LRO;
2415 
2416 	netdev->hw_enc_features = NETIF_F_IP_CSUM |
2417 				  NETIF_F_IPV6_CSUM |
2418 				  NETIF_F_SG |
2419 				  NETIF_F_TSO |
2420 				  NETIF_F_TSO6 |
2421 				  NETIF_F_GSO_UDP_TUNNEL |
2422 				  NETIF_F_GSO_GRE |
2423 				  NETIF_F_GSO_UDP_TUNNEL_CSUM |
2424 				  NETIF_F_GSO_GRE_CSUM |
2425 				  NETIF_F_GSO_IPXIP4 |
2426 				  NETIF_F_GSO_PARTIAL;
2427 
2428 	if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
2429 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
2430 
2431 	netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
2432 				       NETIF_F_GSO_GRE_CSUM;
2433 
2434 	netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA;
2435 	if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP)
2436 		netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX;
2437 	if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT)
2438 		netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX;
2439 
2440 	if (BNGE_SUPPORTS_TPA(bd))
2441 		netdev->hw_features |= NETIF_F_GRO_HW;
2442 
2443 	netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
2444 
2445 	if (netdev->features & NETIF_F_GRO_HW)
2446 		netdev->features &= ~NETIF_F_LRO;
2447 
2448 	netdev->priv_flags |= IFF_UNICAST_FLT;
2449 
2450 	netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
2451 	if (bd->tso_max_segs)
2452 		netif_set_tso_max_segs(netdev, bd->tso_max_segs);
2453 
2454 	bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
2455 	bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
2456 	bn->rx_dir = DMA_FROM_DEVICE;
2457 
2458 	bnge_set_tpa_flags(bd);
2459 	bnge_set_ring_params(bd);
2460 
2461 	bnge_init_l2_fltr_tbl(bn);
2462 	bnge_init_mac_addr(bd);
2463 
2464 	netdev->request_ops_lock = true;
2465 	rc = register_netdev(netdev);
2466 	if (rc) {
2467 		dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
2468 		goto err_netdev;
2469 	}
2470 
2471 	return 0;
2472 
2473 err_netdev:
2474 	free_netdev(netdev);
2475 	return rc;
2476 }
2477 
2478 void bnge_netdev_free(struct bnge_dev *bd)
2479 {
2480 	struct net_device *netdev = bd->netdev;
2481 
2482 	unregister_netdev(netdev);
2483 	free_netdev(netdev);
2484 	bd->netdev = NULL;
2485 }
2486