xref: /linux/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3 
4 #include <asm/byteorder.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/dmapool.h>
7 #include <linux/delay.h>
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/list.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <net/netdev_lock.h>
14 #include <net/netdev_queues.h>
15 #include <net/netdev_rx_queue.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if.h>
18 #include <net/ip.h>
19 #include <linux/skbuff.h>
20 #include <net/page_pool/helpers.h>
21 
22 #include "bnge.h"
23 #include "bnge_hwrm_lib.h"
24 #include "bnge_ethtool.h"
25 #include "bnge_rmem.h"
26 #include "bnge_txrx.h"
27 
28 #define BNGE_RING_TO_TC_OFF(bd, tx)	\
29 	((tx) % (bd)->tx_nr_rings_per_tc)
30 
31 #define BNGE_RING_TO_TC(bd, tx)		\
32 	((tx) / (bd)->tx_nr_rings_per_tc)
33 
34 #define BNGE_TC_TO_RING_BASE(bd, tc)	\
35 	((tc) * (bd)->tx_nr_rings_per_tc)
36 
37 static void bnge_free_stats_mem(struct bnge_net *bn,
38 				struct bnge_stats_mem *stats)
39 {
40 	struct bnge_dev *bd = bn->bd;
41 
42 	if (stats->hw_stats) {
43 		dma_free_coherent(bd->dev, stats->len, stats->hw_stats,
44 				  stats->hw_stats_map);
45 		stats->hw_stats = NULL;
46 	}
47 }
48 
49 static int bnge_alloc_stats_mem(struct bnge_net *bn,
50 				struct bnge_stats_mem *stats)
51 {
52 	struct bnge_dev *bd = bn->bd;
53 
54 	stats->hw_stats = dma_alloc_coherent(bd->dev, stats->len,
55 					     &stats->hw_stats_map, GFP_KERNEL);
56 	if (!stats->hw_stats)
57 		return -ENOMEM;
58 
59 	return 0;
60 }
61 
62 static void bnge_free_ring_stats(struct bnge_net *bn)
63 {
64 	struct bnge_dev *bd = bn->bd;
65 	int i;
66 
67 	if (!bn->bnapi)
68 		return;
69 
70 	for (i = 0; i < bd->nq_nr_rings; i++) {
71 		struct bnge_napi *bnapi = bn->bnapi[i];
72 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
73 
74 		bnge_free_stats_mem(bn, &nqr->stats);
75 	}
76 }
77 
78 static int bnge_alloc_ring_stats(struct bnge_net *bn)
79 {
80 	struct bnge_dev *bd = bn->bd;
81 	u32 size, i;
82 	int rc;
83 
84 	size = bd->hw_ring_stats_size;
85 
86 	for (i = 0; i < bd->nq_nr_rings; i++) {
87 		struct bnge_napi *bnapi = bn->bnapi[i];
88 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
89 
90 		nqr->stats.len = size;
91 		rc = bnge_alloc_stats_mem(bn, &nqr->stats);
92 		if (rc)
93 			goto err_free_ring_stats;
94 
95 		nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
96 	}
97 	return 0;
98 
99 err_free_ring_stats:
100 	bnge_free_ring_stats(bn);
101 	return rc;
102 }
103 
104 static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
105 {
106 	struct bnge_ring_struct *ring = &nqr->ring_struct;
107 
108 	kfree(nqr->desc_ring);
109 	nqr->desc_ring = NULL;
110 	ring->ring_mem.pg_arr = NULL;
111 	kfree(nqr->desc_mapping);
112 	nqr->desc_mapping = NULL;
113 	ring->ring_mem.dma_arr = NULL;
114 }
115 
116 static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
117 {
118 	struct bnge_ring_struct *ring = &cpr->ring_struct;
119 
120 	kfree(cpr->desc_ring);
121 	cpr->desc_ring = NULL;
122 	ring->ring_mem.pg_arr = NULL;
123 	kfree(cpr->desc_mapping);
124 	cpr->desc_mapping = NULL;
125 	ring->ring_mem.dma_arr = NULL;
126 }
127 
128 static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
129 {
130 	nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL);
131 	if (!nqr->desc_ring)
132 		return -ENOMEM;
133 
134 	nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL);
135 	if (!nqr->desc_mapping)
136 		goto err_free_desc_ring;
137 	return 0;
138 
139 err_free_desc_ring:
140 	kfree(nqr->desc_ring);
141 	nqr->desc_ring = NULL;
142 	return -ENOMEM;
143 }
144 
145 static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
146 {
147 	cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL);
148 	if (!cpr->desc_ring)
149 		return -ENOMEM;
150 
151 	cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL);
152 	if (!cpr->desc_mapping)
153 		goto err_free_desc_ring;
154 	return 0;
155 
156 err_free_desc_ring:
157 	kfree(cpr->desc_ring);
158 	cpr->desc_ring = NULL;
159 	return -ENOMEM;
160 }
161 
162 static void bnge_free_nq_arrays(struct bnge_net *bn)
163 {
164 	struct bnge_dev *bd = bn->bd;
165 	int i;
166 
167 	for (i = 0; i < bd->nq_nr_rings; i++) {
168 		struct bnge_napi *bnapi = bn->bnapi[i];
169 
170 		bnge_free_nq_desc_arr(&bnapi->nq_ring);
171 	}
172 }
173 
174 static int bnge_alloc_nq_arrays(struct bnge_net *bn)
175 {
176 	struct bnge_dev *bd = bn->bd;
177 	int i, rc;
178 
179 	for (i = 0; i < bd->nq_nr_rings; i++) {
180 		struct bnge_napi *bnapi = bn->bnapi[i];
181 
182 		rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
183 		if (rc)
184 			goto err_free_nq_arrays;
185 	}
186 	return 0;
187 
188 err_free_nq_arrays:
189 	bnge_free_nq_arrays(bn);
190 	return rc;
191 }
192 
193 static void bnge_free_nq_tree(struct bnge_net *bn)
194 {
195 	struct bnge_dev *bd = bn->bd;
196 	int i;
197 
198 	for (i = 0; i < bd->nq_nr_rings; i++) {
199 		struct bnge_napi *bnapi = bn->bnapi[i];
200 		struct bnge_nq_ring_info *nqr;
201 		struct bnge_ring_struct *ring;
202 		int j;
203 
204 		nqr = &bnapi->nq_ring;
205 		ring = &nqr->ring_struct;
206 
207 		bnge_free_ring(bd, &ring->ring_mem);
208 
209 		if (!nqr->cp_ring_arr)
210 			continue;
211 
212 		for (j = 0; j < nqr->cp_ring_count; j++) {
213 			struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
214 
215 			ring = &cpr->ring_struct;
216 			bnge_free_ring(bd, &ring->ring_mem);
217 			bnge_free_cp_desc_arr(cpr);
218 		}
219 		kfree(nqr->cp_ring_arr);
220 		nqr->cp_ring_arr = NULL;
221 		nqr->cp_ring_count = 0;
222 	}
223 }
224 
225 static int alloc_one_cp_ring(struct bnge_net *bn,
226 			     struct bnge_cp_ring_info *cpr)
227 {
228 	struct bnge_ring_mem_info *rmem;
229 	struct bnge_ring_struct *ring;
230 	struct bnge_dev *bd = bn->bd;
231 	int rc;
232 
233 	rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
234 	if (rc)
235 		return -ENOMEM;
236 	ring = &cpr->ring_struct;
237 	rmem = &ring->ring_mem;
238 	rmem->nr_pages = bn->cp_nr_pages;
239 	rmem->page_size = HW_CMPD_RING_SIZE;
240 	rmem->pg_arr = (void **)cpr->desc_ring;
241 	rmem->dma_arr = cpr->desc_mapping;
242 	rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
243 	rc = bnge_alloc_ring(bd, rmem);
244 	if (rc)
245 		goto err_free_cp_desc_arr;
246 	return rc;
247 
248 err_free_cp_desc_arr:
249 	bnge_free_cp_desc_arr(cpr);
250 	return rc;
251 }
252 
253 static int bnge_alloc_nq_tree(struct bnge_net *bn)
254 {
255 	struct bnge_dev *bd = bn->bd;
256 	int i, j, ulp_msix, rc;
257 	int tcs = 1;
258 
259 	ulp_msix = bnge_aux_get_msix(bd);
260 	for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
261 		bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
262 		struct bnge_napi *bnapi = bn->bnapi[i];
263 		struct bnge_nq_ring_info *nqr;
264 		struct bnge_cp_ring_info *cpr;
265 		struct bnge_ring_struct *ring;
266 		int cp_count = 0, k;
267 		int rx = 0, tx = 0;
268 
269 		nqr = &bnapi->nq_ring;
270 		nqr->bnapi = bnapi;
271 		ring = &nqr->ring_struct;
272 
273 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
274 		if (rc)
275 			goto err_free_nq_tree;
276 
277 		ring->map_idx = ulp_msix + i;
278 
279 		if (i < bd->rx_nr_rings) {
280 			cp_count++;
281 			rx = 1;
282 		}
283 
284 		if ((sh && i < bd->tx_nr_rings) ||
285 		    (!sh && i >= bd->rx_nr_rings)) {
286 			cp_count += tcs;
287 			tx = 1;
288 		}
289 
290 		nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
291 					   GFP_KERNEL);
292 		if (!nqr->cp_ring_arr) {
293 			rc = -ENOMEM;
294 			goto err_free_nq_tree;
295 		}
296 
297 		nqr->cp_ring_count = cp_count;
298 
299 		for (k = 0; k < cp_count; k++) {
300 			cpr = &nqr->cp_ring_arr[k];
301 			rc = alloc_one_cp_ring(bn, cpr);
302 			if (rc)
303 				goto err_free_nq_tree;
304 
305 			cpr->bnapi = bnapi;
306 			cpr->cp_idx = k;
307 			if (!k && rx) {
308 				bn->rx_ring[i].rx_cpr = cpr;
309 				cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
310 			} else {
311 				int n, tc = k - rx;
312 
313 				n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
314 				bn->tx_ring[n].tx_cpr = cpr;
315 				cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
316 			}
317 		}
318 		if (tx)
319 			j++;
320 	}
321 	return 0;
322 
323 err_free_nq_tree:
324 	bnge_free_nq_tree(bn);
325 	return rc;
326 }
327 
328 static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
329 {
330 	return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
331 }
332 
333 static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn,
334 				       struct bnge_rx_ring_info *rxr)
335 {
336 	int i, max_idx;
337 
338 	if (!rxr->rx_buf_ring)
339 		return;
340 
341 	max_idx = bn->rx_nr_pages * RX_DESC_CNT;
342 
343 	for (i = 0; i < max_idx; i++) {
344 		struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
345 		void *data = rx_buf->data;
346 
347 		if (!data)
348 			continue;
349 
350 		rx_buf->data = NULL;
351 		page_pool_free_va(rxr->head_pool, data, true);
352 	}
353 }
354 
355 static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
356 					struct bnge_rx_ring_info *rxr)
357 {
358 	int i, max_idx;
359 
360 	if (!rxr->rx_agg_buf_ring)
361 		return;
362 
363 	max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT;
364 
365 	for (i = 0; i < max_idx; i++) {
366 		struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i];
367 		netmem_ref netmem = rx_agg_buf->netmem;
368 
369 		if (!netmem)
370 			continue;
371 
372 		rx_agg_buf->netmem = 0;
373 		__clear_bit(i, rxr->rx_agg_bmap);
374 
375 		page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
376 	}
377 }
378 
379 static void bnge_free_one_tpa_info_data(struct bnge_net *bn,
380 					struct bnge_rx_ring_info *rxr)
381 {
382 	int i;
383 
384 	for (i = 0; i < bn->max_tpa; i++) {
385 		struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[i];
386 		u8 *data = tpa_info->data;
387 
388 		if (!data)
389 			continue;
390 
391 		tpa_info->data = NULL;
392 		page_pool_free_va(rxr->head_pool, data, false);
393 	}
394 }
395 
396 static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
397 					    struct bnge_rx_ring_info *rxr)
398 {
399 	struct bnge_tpa_idx_map *map;
400 
401 	if (rxr->rx_tpa)
402 		bnge_free_one_tpa_info_data(bn, rxr);
403 
404 	bnge_free_one_rx_ring_bufs(bn, rxr);
405 	bnge_free_one_agg_ring_bufs(bn, rxr);
406 
407 	map = rxr->rx_tpa_idx_map;
408 	if (map)
409 		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
410 }
411 
412 static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
413 {
414 	struct bnge_dev *bd = bn->bd;
415 	int i;
416 
417 	if (!bn->rx_ring)
418 		return;
419 
420 	for (i = 0; i < bd->rx_nr_rings; i++)
421 		bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
422 }
423 
424 static void bnge_free_tx_skbs(struct bnge_net *bn)
425 {
426 	struct bnge_dev *bd = bn->bd;
427 	u16 max_idx;
428 	int i;
429 
430 	max_idx = bn->tx_nr_pages * TX_DESC_CNT;
431 	for (i = 0; i < bd->tx_nr_rings; i++) {
432 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
433 		int j;
434 
435 		if (!txr->tx_buf_ring)
436 			continue;
437 
438 		for (j = 0; j < max_idx;) {
439 			struct bnge_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
440 			struct sk_buff *skb;
441 			int k, last;
442 
443 			skb = tx_buf->skb;
444 			if (!skb) {
445 				j++;
446 				continue;
447 			}
448 
449 			tx_buf->skb = NULL;
450 
451 			dma_unmap_single(bd->dev,
452 					 dma_unmap_addr(tx_buf, mapping),
453 					 skb_headlen(skb),
454 					 DMA_TO_DEVICE);
455 
456 			last = tx_buf->nr_frags;
457 			j += 2;
458 			for (k = 0; k < last; k++, j++) {
459 				int ring_idx = j & bn->tx_ring_mask;
460 				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
461 
462 				tx_buf = &txr->tx_buf_ring[ring_idx];
463 				dma_unmap_page(bd->dev,
464 					       dma_unmap_addr(tx_buf, mapping),
465 					       skb_frag_size(frag),
466 					       DMA_TO_DEVICE);
467 			}
468 			dev_kfree_skb(skb);
469 		}
470 		netdev_tx_reset_queue(netdev_get_tx_queue(bd->netdev, i));
471 	}
472 }
473 
474 static void bnge_free_all_rings_bufs(struct bnge_net *bn)
475 {
476 	bnge_free_rx_ring_pair_bufs(bn);
477 	bnge_free_tx_skbs(bn);
478 }
479 
480 static void bnge_free_tpa_info(struct bnge_net *bn)
481 {
482 	struct bnge_dev *bd = bn->bd;
483 	int i, j;
484 
485 	for (i = 0; i < bd->rx_nr_rings; i++) {
486 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
487 
488 		kfree(rxr->rx_tpa_idx_map);
489 		rxr->rx_tpa_idx_map = NULL;
490 		if (rxr->rx_tpa) {
491 			for (j = 0; j < bn->max_tpa; j++) {
492 				kfree(rxr->rx_tpa[j].agg_arr);
493 				rxr->rx_tpa[j].agg_arr = NULL;
494 			}
495 		}
496 		kfree(rxr->rx_tpa);
497 		rxr->rx_tpa = NULL;
498 	}
499 }
500 
501 static int bnge_alloc_tpa_info(struct bnge_net *bn)
502 {
503 	struct bnge_dev *bd = bn->bd;
504 	int i, j;
505 
506 	if (!bd->max_tpa_v2)
507 		return 0;
508 
509 	bn->max_tpa = max_t(u16, bd->max_tpa_v2, MAX_TPA);
510 	for (i = 0; i < bd->rx_nr_rings; i++) {
511 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
512 
513 		rxr->rx_tpa = kcalloc(bn->max_tpa, sizeof(struct bnge_tpa_info),
514 				      GFP_KERNEL);
515 		if (!rxr->rx_tpa)
516 			goto err_free_tpa_info;
517 
518 		for (j = 0; j < bn->max_tpa; j++) {
519 			struct rx_agg_cmp *agg;
520 
521 			agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
522 			if (!agg)
523 				goto err_free_tpa_info;
524 			rxr->rx_tpa[j].agg_arr = agg;
525 		}
526 		rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
527 					      GFP_KERNEL);
528 		if (!rxr->rx_tpa_idx_map)
529 			goto err_free_tpa_info;
530 	}
531 	return 0;
532 
533 err_free_tpa_info:
534 	bnge_free_tpa_info(bn);
535 	return -ENOMEM;
536 }
537 
538 static void bnge_free_rx_rings(struct bnge_net *bn)
539 {
540 	struct bnge_dev *bd = bn->bd;
541 	int i;
542 
543 	bnge_free_tpa_info(bn);
544 	for (i = 0; i < bd->rx_nr_rings; i++) {
545 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
546 		struct bnge_ring_struct *ring;
547 
548 		page_pool_destroy(rxr->page_pool);
549 		page_pool_destroy(rxr->head_pool);
550 		rxr->page_pool = rxr->head_pool = NULL;
551 
552 		kfree(rxr->rx_agg_bmap);
553 		rxr->rx_agg_bmap = NULL;
554 
555 		ring = &rxr->rx_ring_struct;
556 		bnge_free_ring(bd, &ring->ring_mem);
557 
558 		ring = &rxr->rx_agg_ring_struct;
559 		bnge_free_ring(bd, &ring->ring_mem);
560 	}
561 }
562 
563 static int bnge_alloc_rx_page_pool(struct bnge_net *bn,
564 				   struct bnge_rx_ring_info *rxr,
565 				   int numa_node)
566 {
567 	const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE;
568 	const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
569 	struct page_pool_params pp = { 0 };
570 	struct bnge_dev *bd = bn->bd;
571 	struct page_pool *pool;
572 
573 	pp.pool_size = bn->rx_agg_ring_size / agg_size_fac;
574 	pp.nid = numa_node;
575 	pp.netdev = bn->netdev;
576 	pp.dev = bd->dev;
577 	pp.dma_dir = bn->rx_dir;
578 	pp.max_len = PAGE_SIZE;
579 	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
580 		   PP_FLAG_ALLOW_UNREADABLE_NETMEM;
581 	pp.queue_idx = rxr->bnapi->index;
582 
583 	pool = page_pool_create(&pp);
584 	if (IS_ERR(pool))
585 		return PTR_ERR(pool);
586 	rxr->page_pool = pool;
587 
588 	rxr->need_head_pool = page_pool_is_unreadable(pool);
589 	if (bnge_separate_head_pool(rxr)) {
590 		pp.pool_size = min(bn->rx_ring_size / rx_size_fac, 1024);
591 		pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
592 		pool = page_pool_create(&pp);
593 		if (IS_ERR(pool))
594 			goto err_destroy_pp;
595 	} else {
596 		page_pool_get(pool);
597 	}
598 	rxr->head_pool = pool;
599 	return 0;
600 
601 err_destroy_pp:
602 	page_pool_destroy(rxr->page_pool);
603 	rxr->page_pool = NULL;
604 	return PTR_ERR(pool);
605 }
606 
607 static void bnge_enable_rx_page_pool(struct bnge_rx_ring_info *rxr)
608 {
609 	page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
610 	page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
611 }
612 
613 static int bnge_alloc_rx_agg_bmap(struct bnge_net *bn,
614 				  struct bnge_rx_ring_info *rxr)
615 {
616 	u16 mem_size;
617 
618 	rxr->rx_agg_bmap_size = bn->rx_agg_ring_mask + 1;
619 	mem_size = rxr->rx_agg_bmap_size / 8;
620 	rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
621 	if (!rxr->rx_agg_bmap)
622 		return -ENOMEM;
623 
624 	return 0;
625 }
626 
627 static int bnge_alloc_rx_rings(struct bnge_net *bn)
628 {
629 	int i, rc = 0, agg_rings = 0, cpu;
630 	struct bnge_dev *bd = bn->bd;
631 
632 	if (bnge_is_agg_reqd(bd))
633 		agg_rings = 1;
634 
635 	for (i = 0; i < bd->rx_nr_rings; i++) {
636 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
637 		struct bnge_ring_struct *ring;
638 		int cpu_node;
639 
640 		ring = &rxr->rx_ring_struct;
641 
642 		cpu = cpumask_local_spread(i, dev_to_node(bd->dev));
643 		cpu_node = cpu_to_node(cpu);
644 		netdev_dbg(bn->netdev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
645 			   i, cpu_node);
646 		rc = bnge_alloc_rx_page_pool(bn, rxr, cpu_node);
647 		if (rc)
648 			goto err_free_rx_rings;
649 		bnge_enable_rx_page_pool(rxr);
650 
651 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
652 		if (rc)
653 			goto err_free_rx_rings;
654 
655 		ring->grp_idx = i;
656 		if (agg_rings) {
657 			ring = &rxr->rx_agg_ring_struct;
658 			rc = bnge_alloc_ring(bd, &ring->ring_mem);
659 			if (rc)
660 				goto err_free_rx_rings;
661 
662 			ring->grp_idx = i;
663 			rc = bnge_alloc_rx_agg_bmap(bn, rxr);
664 			if (rc)
665 				goto err_free_rx_rings;
666 		}
667 	}
668 
669 	if (bn->priv_flags & BNGE_NET_EN_TPA) {
670 		rc = bnge_alloc_tpa_info(bn);
671 		if (rc)
672 			goto err_free_rx_rings;
673 	}
674 	return rc;
675 
676 err_free_rx_rings:
677 	bnge_free_rx_rings(bn);
678 	return rc;
679 }
680 
681 static void bnge_free_tx_rings(struct bnge_net *bn)
682 {
683 	struct bnge_dev *bd = bn->bd;
684 	int i;
685 
686 	for (i = 0; i < bd->tx_nr_rings; i++) {
687 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
688 		struct bnge_ring_struct *ring;
689 
690 		ring = &txr->tx_ring_struct;
691 
692 		bnge_free_ring(bd, &ring->ring_mem);
693 	}
694 }
695 
696 static int bnge_alloc_tx_rings(struct bnge_net *bn)
697 {
698 	struct bnge_dev *bd = bn->bd;
699 	int i, j, rc;
700 
701 	for (i = 0, j = 0; i < bd->tx_nr_rings; i++) {
702 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
703 		struct bnge_ring_struct *ring;
704 		u8 qidx;
705 
706 		ring = &txr->tx_ring_struct;
707 
708 		rc = bnge_alloc_ring(bd, &ring->ring_mem);
709 		if (rc)
710 			goto err_free_tx_rings;
711 
712 		ring->grp_idx = txr->bnapi->index;
713 		qidx = bd->tc_to_qidx[j];
714 		ring->queue_id = bd->q_info[qidx].queue_id;
715 		if (BNGE_RING_TO_TC_OFF(bd, i) == (bd->tx_nr_rings_per_tc - 1))
716 			j++;
717 	}
718 	return 0;
719 
720 err_free_tx_rings:
721 	bnge_free_tx_rings(bn);
722 	return rc;
723 }
724 
725 static void bnge_free_vnic_attributes(struct bnge_net *bn)
726 {
727 	struct pci_dev *pdev = bn->bd->pdev;
728 	struct bnge_vnic_info *vnic;
729 	int i;
730 
731 	if (!bn->vnic_info)
732 		return;
733 
734 	for (i = 0; i < bn->nr_vnics; i++) {
735 		vnic = &bn->vnic_info[i];
736 
737 		kfree(vnic->uc_list);
738 		vnic->uc_list = NULL;
739 
740 		if (vnic->mc_list) {
741 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
742 					  vnic->mc_list, vnic->mc_list_mapping);
743 			vnic->mc_list = NULL;
744 		}
745 
746 		if (vnic->rss_table) {
747 			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
748 					  vnic->rss_table,
749 					  vnic->rss_table_dma_addr);
750 			vnic->rss_table = NULL;
751 		}
752 
753 		vnic->rss_hash_key = NULL;
754 		vnic->flags = 0;
755 	}
756 }
757 
758 static int bnge_alloc_vnic_attributes(struct bnge_net *bn)
759 {
760 	struct bnge_dev *bd = bn->bd;
761 	struct bnge_vnic_info *vnic;
762 	int i, size;
763 
764 	for (i = 0; i < bn->nr_vnics; i++) {
765 		vnic = &bn->vnic_info[i];
766 
767 		if (vnic->flags & BNGE_VNIC_UCAST_FLAG) {
768 			int mem_size = (BNGE_MAX_UC_ADDRS - 1) * ETH_ALEN;
769 
770 			vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
771 			if (!vnic->uc_list)
772 				goto err_free_vnic_attributes;
773 		}
774 
775 		if (vnic->flags & BNGE_VNIC_MCAST_FLAG) {
776 			vnic->mc_list_size = BNGE_MAX_MC_ADDRS * ETH_ALEN;
777 			vnic->mc_list =
778 				dma_alloc_coherent(bd->dev,
779 						   vnic->mc_list_size,
780 						   &vnic->mc_list_mapping,
781 						   GFP_KERNEL);
782 			if (!vnic->mc_list)
783 				goto err_free_vnic_attributes;
784 		}
785 
786 		/* Allocate rss table and hash key */
787 		size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE);
788 
789 		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
790 		vnic->rss_table = dma_alloc_coherent(bd->dev,
791 						     vnic->rss_table_size,
792 						     &vnic->rss_table_dma_addr,
793 						     GFP_KERNEL);
794 		if (!vnic->rss_table)
795 			goto err_free_vnic_attributes;
796 
797 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
798 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
799 	}
800 	return 0;
801 
802 err_free_vnic_attributes:
803 	bnge_free_vnic_attributes(bn);
804 	return -ENOMEM;
805 }
806 
807 static int bnge_alloc_vnics(struct bnge_net *bn)
808 {
809 	int num_vnics;
810 
811 	/* Allocate only 1 VNIC for now
812 	 * Additional VNICs will be added based on RFS/NTUPLE in future patches
813 	 */
814 	num_vnics = 1;
815 
816 	bn->vnic_info = kcalloc(num_vnics, sizeof(struct bnge_vnic_info),
817 				GFP_KERNEL);
818 	if (!bn->vnic_info)
819 		return -ENOMEM;
820 
821 	bn->nr_vnics = num_vnics;
822 
823 	return 0;
824 }
825 
826 static void bnge_free_vnics(struct bnge_net *bn)
827 {
828 	kfree(bn->vnic_info);
829 	bn->vnic_info = NULL;
830 	bn->nr_vnics = 0;
831 }
832 
833 static void bnge_free_ring_grps(struct bnge_net *bn)
834 {
835 	kfree(bn->grp_info);
836 	bn->grp_info = NULL;
837 }
838 
839 static int bnge_init_ring_grps(struct bnge_net *bn)
840 {
841 	struct bnge_dev *bd = bn->bd;
842 	int i;
843 
844 	bn->grp_info = kcalloc(bd->nq_nr_rings,
845 			       sizeof(struct bnge_ring_grp_info),
846 			       GFP_KERNEL);
847 	if (!bn->grp_info)
848 		return -ENOMEM;
849 	for (i = 0; i < bd->nq_nr_rings; i++) {
850 		bn->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
851 		bn->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
852 		bn->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
853 		bn->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
854 		bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
855 	}
856 
857 	return 0;
858 }
859 
860 static void bnge_free_core(struct bnge_net *bn)
861 {
862 	bnge_free_vnic_attributes(bn);
863 	bnge_free_tx_rings(bn);
864 	bnge_free_rx_rings(bn);
865 	bnge_free_nq_tree(bn);
866 	bnge_free_nq_arrays(bn);
867 	bnge_free_ring_stats(bn);
868 	bnge_free_ring_grps(bn);
869 	bnge_free_vnics(bn);
870 	kfree(bn->tx_ring_map);
871 	bn->tx_ring_map = NULL;
872 	kfree(bn->tx_ring);
873 	bn->tx_ring = NULL;
874 	kfree(bn->rx_ring);
875 	bn->rx_ring = NULL;
876 	kfree(bn->bnapi);
877 	bn->bnapi = NULL;
878 }
879 
880 static int bnge_alloc_core(struct bnge_net *bn)
881 {
882 	struct bnge_dev *bd = bn->bd;
883 	int i, j, size, arr_size;
884 	int rc = -ENOMEM;
885 	void *bnapi;
886 
887 	arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) *
888 			bd->nq_nr_rings);
889 	size = L1_CACHE_ALIGN(sizeof(struct bnge_napi));
890 	bnapi = kzalloc(arr_size + size * bd->nq_nr_rings, GFP_KERNEL);
891 	if (!bnapi)
892 		return rc;
893 
894 	bn->bnapi = bnapi;
895 	bnapi += arr_size;
896 	for (i = 0; i < bd->nq_nr_rings; i++, bnapi += size) {
897 		struct bnge_nq_ring_info *nqr;
898 
899 		bn->bnapi[i] = bnapi;
900 		bn->bnapi[i]->index = i;
901 		bn->bnapi[i]->bn = bn;
902 		nqr = &bn->bnapi[i]->nq_ring;
903 		nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
904 	}
905 
906 	bn->rx_ring = kcalloc(bd->rx_nr_rings,
907 			      sizeof(struct bnge_rx_ring_info),
908 			      GFP_KERNEL);
909 	if (!bn->rx_ring)
910 		goto err_free_core;
911 
912 	for (i = 0; i < bd->rx_nr_rings; i++) {
913 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
914 
915 		rxr->rx_ring_struct.ring_mem.flags =
916 			BNGE_RMEM_RING_PTE_FLAG;
917 		rxr->rx_agg_ring_struct.ring_mem.flags =
918 			BNGE_RMEM_RING_PTE_FLAG;
919 		rxr->bnapi = bn->bnapi[i];
920 		bn->bnapi[i]->rx_ring = &bn->rx_ring[i];
921 	}
922 
923 	bn->tx_ring = kcalloc(bd->tx_nr_rings,
924 			      sizeof(struct bnge_tx_ring_info),
925 			      GFP_KERNEL);
926 	if (!bn->tx_ring)
927 		goto err_free_core;
928 
929 	bn->tx_ring_map = kcalloc(bd->tx_nr_rings, sizeof(u16),
930 				  GFP_KERNEL);
931 	if (!bn->tx_ring_map)
932 		goto err_free_core;
933 
934 	if (bd->flags & BNGE_EN_SHARED_CHNL)
935 		j = 0;
936 	else
937 		j = bd->rx_nr_rings;
938 
939 	for (i = 0; i < bd->tx_nr_rings; i++) {
940 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
941 		struct bnge_napi *bnapi2;
942 		int k;
943 
944 		txr->tx_ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
945 		bn->tx_ring_map[i] = i;
946 		k = j + BNGE_RING_TO_TC_OFF(bd, i);
947 
948 		bnapi2 = bn->bnapi[k];
949 		txr->txq_index = i;
950 		txr->tx_napi_idx =
951 			BNGE_RING_TO_TC(bd, txr->txq_index);
952 		bnapi2->tx_ring[txr->tx_napi_idx] = txr;
953 		txr->bnapi = bnapi2;
954 	}
955 
956 	rc = bnge_alloc_ring_stats(bn);
957 	if (rc)
958 		goto err_free_core;
959 
960 	rc = bnge_alloc_vnics(bn);
961 	if (rc)
962 		goto err_free_core;
963 
964 	rc = bnge_alloc_nq_arrays(bn);
965 	if (rc)
966 		goto err_free_core;
967 
968 	bnge_init_ring_struct(bn);
969 
970 	rc = bnge_alloc_rx_rings(bn);
971 	if (rc)
972 		goto err_free_core;
973 
974 	rc = bnge_alloc_tx_rings(bn);
975 	if (rc)
976 		goto err_free_core;
977 
978 	rc = bnge_alloc_nq_tree(bn);
979 	if (rc)
980 		goto err_free_core;
981 
982 	bn->vnic_info[BNGE_VNIC_DEFAULT].flags |= BNGE_VNIC_RSS_FLAG |
983 						  BNGE_VNIC_MCAST_FLAG |
984 						  BNGE_VNIC_UCAST_FLAG;
985 	rc = bnge_alloc_vnic_attributes(bn);
986 	if (rc)
987 		goto err_free_core;
988 	return 0;
989 
990 err_free_core:
991 	bnge_free_core(bn);
992 	return rc;
993 }
994 
995 u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr)
996 {
997 	return rxr->rx_cpr->ring_struct.fw_ring_id;
998 }
999 
1000 u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
1001 {
1002 	return txr->tx_cpr->ring_struct.fw_ring_id;
1003 }
1004 
1005 static void bnge_db_nq_arm(struct bnge_net *bn,
1006 			   struct bnge_db_info *db, u32 idx)
1007 {
1008 	bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_ARM |
1009 		    DB_RING_IDX(db, idx), db->doorbell);
1010 }
1011 
1012 static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
1013 {
1014 	bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
1015 		    DB_RING_IDX(db, idx), db->doorbell);
1016 }
1017 
1018 static void bnge_db_cq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
1019 {
1020 	bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_CQ_ARMALL |
1021 		    DB_RING_IDX(db, idx), db->doorbell);
1022 }
1023 
1024 static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
1025 {
1026 	struct bnge_napi *bnapi = bn->bnapi[n];
1027 	struct bnge_nq_ring_info *nqr;
1028 
1029 	nqr = &bnapi->nq_ring;
1030 
1031 	return nqr->ring_struct.map_idx;
1032 }
1033 
1034 static void bnge_init_nq_tree(struct bnge_net *bn)
1035 {
1036 	struct bnge_dev *bd = bn->bd;
1037 	int i, j;
1038 
1039 	for (i = 0; i < bd->nq_nr_rings; i++) {
1040 		struct bnge_nq_ring_info *nqr = &bn->bnapi[i]->nq_ring;
1041 		struct bnge_ring_struct *ring = &nqr->ring_struct;
1042 
1043 		ring->fw_ring_id = INVALID_HW_RING_ID;
1044 		for (j = 0; j < nqr->cp_ring_count; j++) {
1045 			struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
1046 
1047 			ring = &cpr->ring_struct;
1048 			ring->fw_ring_id = INVALID_HW_RING_ID;
1049 		}
1050 	}
1051 }
1052 
1053 static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
1054 					 dma_addr_t *mapping,
1055 					 struct bnge_rx_ring_info *rxr,
1056 					 unsigned int *offset,
1057 					 gfp_t gfp)
1058 {
1059 	netmem_ref netmem;
1060 
1061 	if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
1062 		netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
1063 						     BNGE_RX_PAGE_SIZE, gfp);
1064 	} else {
1065 		netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
1066 		*offset = 0;
1067 	}
1068 	if (!netmem)
1069 		return 0;
1070 
1071 	*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
1072 	return netmem;
1073 }
1074 
1075 u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
1076 			 struct bnge_rx_ring_info *rxr,
1077 			 gfp_t gfp)
1078 {
1079 	unsigned int offset;
1080 	struct page *page;
1081 
1082 	page = page_pool_alloc_frag(rxr->head_pool, &offset,
1083 				    bn->rx_buf_size, gfp);
1084 	if (!page)
1085 		return NULL;
1086 
1087 	*mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
1088 	return page_address(page) + offset;
1089 }
1090 
1091 int bnge_alloc_rx_data(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
1092 		       u16 prod, gfp_t gfp)
1093 {
1094 	struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
1095 	struct rx_bd *rxbd;
1096 	dma_addr_t mapping;
1097 	u8 *data;
1098 
1099 	rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
1100 	data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
1101 	if (!data)
1102 		return -ENOMEM;
1103 
1104 	rx_buf->data = data;
1105 	rx_buf->data_ptr = data + bn->rx_offset;
1106 	rx_buf->mapping = mapping;
1107 
1108 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1109 
1110 	return 0;
1111 }
1112 
1113 static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
1114 				       struct bnge_rx_ring_info *rxr,
1115 				       int ring_nr)
1116 {
1117 	u32 prod = rxr->rx_prod;
1118 	int i, rc = 0;
1119 
1120 	for (i = 0; i < bn->rx_ring_size; i++) {
1121 		rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL);
1122 		if (rc)
1123 			break;
1124 		prod = NEXT_RX(prod);
1125 	}
1126 
1127 	/* Abort if not a single buffer can be allocated */
1128 	if (rc && !i) {
1129 		netdev_err(bn->netdev,
1130 			   "RX ring %d: allocated %d/%d buffers, abort\n",
1131 			   ring_nr, i, bn->rx_ring_size);
1132 		return rc;
1133 	}
1134 
1135 	rxr->rx_prod = prod;
1136 
1137 	if (i < bn->rx_ring_size)
1138 		netdev_warn(bn->netdev,
1139 			    "RX ring %d: allocated %d/%d buffers, continuing\n",
1140 			    ring_nr, i, bn->rx_ring_size);
1141 	return 0;
1142 }
1143 
1144 u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
1145 {
1146 	u16 next, max = rxr->rx_agg_bmap_size;
1147 
1148 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1149 	if (next >= max)
1150 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1151 	return next;
1152 }
1153 
1154 int bnge_alloc_rx_netmem(struct bnge_net *bn,
1155 			 struct bnge_rx_ring_info *rxr,
1156 			 u16 prod, gfp_t gfp)
1157 {
1158 	struct bnge_sw_rx_agg_bd *rx_agg_buf;
1159 	u16 sw_prod = rxr->rx_sw_agg_prod;
1160 	unsigned int offset = 0;
1161 	struct rx_bd *rxbd;
1162 	dma_addr_t mapping;
1163 	netmem_ref netmem;
1164 
1165 	rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)];
1166 	netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
1167 	if (!netmem)
1168 		return -ENOMEM;
1169 
1170 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1171 		sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
1172 
1173 	__set_bit(sw_prod, rxr->rx_agg_bmap);
1174 	rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod];
1175 	rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
1176 
1177 	rx_agg_buf->netmem = netmem;
1178 	rx_agg_buf->offset = offset;
1179 	rx_agg_buf->mapping = mapping;
1180 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1181 	rxbd->rx_bd_opaque = sw_prod;
1182 	return 0;
1183 }
1184 
1185 static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn,
1186 					struct bnge_rx_ring_info *rxr,
1187 					int ring_nr)
1188 {
1189 	u32 prod = rxr->rx_agg_prod;
1190 	int i, rc = 0;
1191 
1192 	for (i = 0; i < bn->rx_agg_ring_size; i++) {
1193 		rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL);
1194 		if (rc)
1195 			break;
1196 		prod = NEXT_RX_AGG(prod);
1197 	}
1198 
1199 	if (rc && i < MAX_SKB_FRAGS) {
1200 		netdev_err(bn->netdev,
1201 			   "Agg ring %d: allocated %d/%d buffers (min %d), abort\n",
1202 			   ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS);
1203 		goto err_free_one_agg_ring_bufs;
1204 	}
1205 
1206 	rxr->rx_agg_prod = prod;
1207 
1208 	if (i < bn->rx_agg_ring_size)
1209 		netdev_warn(bn->netdev,
1210 			    "Agg ring %d: allocated %d/%d buffers, continuing\n",
1211 			    ring_nr, i, bn->rx_agg_ring_size);
1212 	return 0;
1213 
1214 err_free_one_agg_ring_bufs:
1215 	bnge_free_one_agg_ring_bufs(bn, rxr);
1216 	return -ENOMEM;
1217 }
1218 
1219 static int bnge_alloc_one_tpa_info_data(struct bnge_net *bn,
1220 					struct bnge_rx_ring_info *rxr)
1221 {
1222 	dma_addr_t mapping;
1223 	u8 *data;
1224 	int i;
1225 
1226 	for (i = 0; i < bn->max_tpa; i++) {
1227 		data = __bnge_alloc_rx_frag(bn, &mapping, rxr,
1228 					    GFP_KERNEL);
1229 		if (!data)
1230 			goto err_free_tpa_info_data;
1231 
1232 		rxr->rx_tpa[i].data = data;
1233 		rxr->rx_tpa[i].data_ptr = data + bn->rx_offset;
1234 		rxr->rx_tpa[i].mapping = mapping;
1235 	}
1236 	return 0;
1237 
1238 err_free_tpa_info_data:
1239 	bnge_free_one_tpa_info_data(bn, rxr);
1240 	return -ENOMEM;
1241 }
1242 
1243 static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
1244 {
1245 	struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
1246 	int rc;
1247 
1248 	rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr);
1249 	if (rc)
1250 		return rc;
1251 
1252 	if (bnge_is_agg_reqd(bn->bd)) {
1253 		rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr);
1254 		if (rc)
1255 			goto err_free_one_rx_ring_bufs;
1256 	}
1257 
1258 	if (rxr->rx_tpa) {
1259 		rc = bnge_alloc_one_tpa_info_data(bn, rxr);
1260 		if (rc)
1261 			goto err_free_one_agg_ring_bufs;
1262 	}
1263 
1264 	return 0;
1265 
1266 err_free_one_agg_ring_bufs:
1267 	bnge_free_one_agg_ring_bufs(bn, rxr);
1268 err_free_one_rx_ring_bufs:
1269 	bnge_free_one_rx_ring_bufs(bn, rxr);
1270 	return rc;
1271 }
1272 
1273 static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type)
1274 {
1275 	struct rx_bd **rx_desc_ring;
1276 	u32 prod;
1277 	int i;
1278 
1279 	rx_desc_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
1280 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
1281 		struct rx_bd *rxbd = rx_desc_ring[i];
1282 		int j;
1283 
1284 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
1285 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
1286 			rxbd->rx_bd_opaque = prod;
1287 		}
1288 	}
1289 }
1290 
1291 static void bnge_init_one_rx_ring_rxbd(struct bnge_net *bn,
1292 				       struct bnge_rx_ring_info *rxr)
1293 {
1294 	struct bnge_ring_struct *ring;
1295 	u32 type;
1296 
1297 	type = (bn->rx_buf_use_size << RX_BD_LEN_SHIFT) |
1298 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
1299 
1300 	if (NET_IP_ALIGN == 2)
1301 		type |= RX_BD_FLAGS_SOP;
1302 
1303 	ring = &rxr->rx_ring_struct;
1304 	bnge_init_rxbd_pages(ring, type);
1305 	ring->fw_ring_id = INVALID_HW_RING_ID;
1306 }
1307 
1308 static void bnge_init_one_agg_ring_rxbd(struct bnge_net *bn,
1309 					struct bnge_rx_ring_info *rxr)
1310 {
1311 	struct bnge_ring_struct *ring;
1312 	u32 type;
1313 
1314 	ring = &rxr->rx_agg_ring_struct;
1315 	ring->fw_ring_id = INVALID_HW_RING_ID;
1316 	if (bnge_is_agg_reqd(bn->bd)) {
1317 		type = ((u32)BNGE_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
1318 			RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
1319 
1320 		bnge_init_rxbd_pages(ring, type);
1321 	}
1322 }
1323 
1324 static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr)
1325 {
1326 	struct bnge_rx_ring_info *rxr;
1327 
1328 	rxr = &bn->rx_ring[ring_nr];
1329 	bnge_init_one_rx_ring_rxbd(bn, rxr);
1330 
1331 	netif_queue_set_napi(bn->netdev, ring_nr, NETDEV_QUEUE_TYPE_RX,
1332 			     &rxr->bnapi->napi);
1333 
1334 	bnge_init_one_agg_ring_rxbd(bn, rxr);
1335 }
1336 
1337 static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn)
1338 {
1339 	int i, rc;
1340 
1341 	for (i = 0; i < bn->bd->rx_nr_rings; i++) {
1342 		rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i);
1343 		if (rc)
1344 			goto err_free_rx_ring_pair_bufs;
1345 	}
1346 	return 0;
1347 
1348 err_free_rx_ring_pair_bufs:
1349 	bnge_free_rx_ring_pair_bufs(bn);
1350 	return rc;
1351 }
1352 
1353 static void bnge_init_rx_rings(struct bnge_net *bn)
1354 {
1355 	int i;
1356 
1357 #define BNGE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
1358 #define BNGE_RX_DMA_OFFSET NET_SKB_PAD
1359 	bn->rx_offset = BNGE_RX_OFFSET;
1360 	bn->rx_dma_offset = BNGE_RX_DMA_OFFSET;
1361 
1362 	for (i = 0; i < bn->bd->rx_nr_rings; i++)
1363 		bnge_init_one_rx_ring_pair(bn, i);
1364 }
1365 
1366 static void bnge_init_tx_rings(struct bnge_net *bn)
1367 {
1368 	int i;
1369 
1370 	bn->tx_wake_thresh = max(bn->tx_ring_size / 2, BNGE_MIN_TX_DESC_CNT);
1371 
1372 	for (i = 0; i < bn->bd->tx_nr_rings; i++) {
1373 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
1374 		struct bnge_ring_struct *ring = &txr->tx_ring_struct;
1375 
1376 		ring->fw_ring_id = INVALID_HW_RING_ID;
1377 
1378 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX,
1379 				     &txr->bnapi->napi);
1380 	}
1381 }
1382 
1383 static void bnge_init_vnics(struct bnge_net *bn)
1384 {
1385 	struct bnge_vnic_info *vnic0 = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1386 	int i;
1387 
1388 	for (i = 0; i < bn->nr_vnics; i++) {
1389 		struct bnge_vnic_info *vnic = &bn->vnic_info[i];
1390 		int j;
1391 
1392 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
1393 		vnic->vnic_id = i;
1394 		for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++)
1395 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
1396 
1397 		if (bn->vnic_info[i].rss_hash_key) {
1398 			if (i == BNGE_VNIC_DEFAULT) {
1399 				u8 *key = (void *)vnic->rss_hash_key;
1400 				int k;
1401 
1402 				if (!bn->rss_hash_key_valid &&
1403 				    !bn->rss_hash_key_updated) {
1404 					get_random_bytes(bn->rss_hash_key,
1405 							 HW_HASH_KEY_SIZE);
1406 					bn->rss_hash_key_updated = true;
1407 				}
1408 
1409 				memcpy(vnic->rss_hash_key, bn->rss_hash_key,
1410 				       HW_HASH_KEY_SIZE);
1411 
1412 				if (!bn->rss_hash_key_updated)
1413 					continue;
1414 
1415 				bn->rss_hash_key_updated = false;
1416 				bn->rss_hash_key_valid = true;
1417 
1418 				bn->toeplitz_prefix = 0;
1419 				for (k = 0; k < 8; k++) {
1420 					bn->toeplitz_prefix <<= 8;
1421 					bn->toeplitz_prefix |= key[k];
1422 				}
1423 			} else {
1424 				memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
1425 				       HW_HASH_KEY_SIZE);
1426 			}
1427 		}
1428 	}
1429 }
1430 
1431 static void bnge_set_db_mask(struct bnge_net *bn, struct bnge_db_info *db,
1432 			     u32 ring_type)
1433 {
1434 	switch (ring_type) {
1435 	case HWRM_RING_ALLOC_TX:
1436 		db->db_ring_mask = bn->tx_ring_mask;
1437 		break;
1438 	case HWRM_RING_ALLOC_RX:
1439 		db->db_ring_mask = bn->rx_ring_mask;
1440 		break;
1441 	case HWRM_RING_ALLOC_AGG:
1442 		db->db_ring_mask = bn->rx_agg_ring_mask;
1443 		break;
1444 	case HWRM_RING_ALLOC_CMPL:
1445 	case HWRM_RING_ALLOC_NQ:
1446 		db->db_ring_mask = bn->cp_ring_mask;
1447 		break;
1448 	}
1449 	db->db_epoch_mask = db->db_ring_mask + 1;
1450 	db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
1451 }
1452 
1453 static void bnge_set_db(struct bnge_net *bn, struct bnge_db_info *db,
1454 			u32 ring_type, u32 map_idx, u32 xid)
1455 {
1456 	struct bnge_dev *bd = bn->bd;
1457 
1458 	switch (ring_type) {
1459 	case HWRM_RING_ALLOC_TX:
1460 		db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
1461 		break;
1462 	case HWRM_RING_ALLOC_RX:
1463 	case HWRM_RING_ALLOC_AGG:
1464 		db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
1465 		break;
1466 	case HWRM_RING_ALLOC_CMPL:
1467 		db->db_key64 = DBR_PATH_L2;
1468 		break;
1469 	case HWRM_RING_ALLOC_NQ:
1470 		db->db_key64 = DBR_PATH_L2;
1471 		break;
1472 	}
1473 	db->db_key64 |= ((u64)xid << DBR_XID_SFT) | DBR_VALID;
1474 
1475 	db->doorbell = bd->bar1 + bd->db_offset;
1476 	bnge_set_db_mask(bn, db, ring_type);
1477 }
1478 
1479 static int bnge_hwrm_cp_ring_alloc(struct bnge_net *bn,
1480 				   struct bnge_cp_ring_info *cpr)
1481 {
1482 	const u32 type = HWRM_RING_ALLOC_CMPL;
1483 	struct bnge_napi *bnapi = cpr->bnapi;
1484 	struct bnge_ring_struct *ring;
1485 	u32 map_idx = bnapi->index;
1486 	int rc;
1487 
1488 	ring = &cpr->ring_struct;
1489 	ring->handle = BNGE_SET_NQ_HDL(cpr);
1490 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1491 	if (rc)
1492 		return rc;
1493 
1494 	bnge_set_db(bn, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
1495 	bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
1496 
1497 	return 0;
1498 }
1499 
1500 static int bnge_hwrm_tx_ring_alloc(struct bnge_net *bn,
1501 				   struct bnge_tx_ring_info *txr, u32 tx_idx)
1502 {
1503 	struct bnge_ring_struct *ring = &txr->tx_ring_struct;
1504 	const u32 type = HWRM_RING_ALLOC_TX;
1505 	int rc;
1506 
1507 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, tx_idx);
1508 	if (rc)
1509 		return rc;
1510 
1511 	bnge_set_db(bn, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
1512 
1513 	return 0;
1514 }
1515 
1516 static int bnge_hwrm_rx_agg_ring_alloc(struct bnge_net *bn,
1517 				       struct bnge_rx_ring_info *rxr)
1518 {
1519 	struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
1520 	u32 type = HWRM_RING_ALLOC_AGG;
1521 	struct bnge_dev *bd = bn->bd;
1522 	u32 grp_idx = ring->grp_idx;
1523 	u32 map_idx;
1524 	int rc;
1525 
1526 	map_idx = grp_idx + bd->rx_nr_rings;
1527 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1528 	if (rc)
1529 		return rc;
1530 
1531 	bnge_set_db(bn, &rxr->rx_agg_db, type, map_idx,
1532 		    ring->fw_ring_id);
1533 	bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
1534 	bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
1535 	bn->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
1536 
1537 	return 0;
1538 }
1539 
1540 static int bnge_hwrm_rx_ring_alloc(struct bnge_net *bn,
1541 				   struct bnge_rx_ring_info *rxr)
1542 {
1543 	struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
1544 	struct bnge_napi *bnapi = rxr->bnapi;
1545 	u32 type = HWRM_RING_ALLOC_RX;
1546 	u32 map_idx = bnapi->index;
1547 	int rc;
1548 
1549 	rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1550 	if (rc)
1551 		return rc;
1552 
1553 	bnge_set_db(bn, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
1554 	bn->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
1555 
1556 	return 0;
1557 }
1558 
1559 static int bnge_hwrm_ring_alloc(struct bnge_net *bn)
1560 {
1561 	struct bnge_dev *bd = bn->bd;
1562 	bool agg_rings;
1563 	int i, rc = 0;
1564 
1565 	agg_rings = !!(bnge_is_agg_reqd(bd));
1566 	for (i = 0; i < bd->nq_nr_rings; i++) {
1567 		struct bnge_napi *bnapi = bn->bnapi[i];
1568 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
1569 		struct bnge_ring_struct *ring = &nqr->ring_struct;
1570 		u32 type = HWRM_RING_ALLOC_NQ;
1571 		u32 map_idx = ring->map_idx;
1572 		unsigned int vector;
1573 
1574 		vector = bd->irq_tbl[map_idx].vector;
1575 		disable_irq_nosync(vector);
1576 		rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1577 		if (rc) {
1578 			enable_irq(vector);
1579 			goto err_out;
1580 		}
1581 		bnge_set_db(bn, &nqr->nq_db, type, map_idx, ring->fw_ring_id);
1582 		bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
1583 		enable_irq(vector);
1584 		bn->grp_info[i].nq_fw_ring_id = ring->fw_ring_id;
1585 
1586 		if (!i) {
1587 			rc = bnge_hwrm_set_async_event_cr(bd, ring->fw_ring_id);
1588 			if (rc)
1589 				netdev_warn(bn->netdev, "Failed to set async event completion ring.\n");
1590 		}
1591 	}
1592 
1593 	for (i = 0; i < bd->tx_nr_rings; i++) {
1594 		struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
1595 
1596 		rc = bnge_hwrm_cp_ring_alloc(bn, txr->tx_cpr);
1597 		if (rc)
1598 			goto err_out;
1599 		rc = bnge_hwrm_tx_ring_alloc(bn, txr, i);
1600 		if (rc)
1601 			goto err_out;
1602 	}
1603 
1604 	for (i = 0; i < bd->rx_nr_rings; i++) {
1605 		struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
1606 		struct bnge_cp_ring_info *cpr;
1607 		struct bnge_ring_struct *ring;
1608 		struct bnge_napi *bnapi;
1609 		u32 map_idx, type;
1610 
1611 		rc = bnge_hwrm_rx_ring_alloc(bn, rxr);
1612 		if (rc)
1613 			goto err_out;
1614 		/* If we have agg rings, post agg buffers first. */
1615 		if (!agg_rings)
1616 			bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
1617 
1618 		cpr = rxr->rx_cpr;
1619 		bnapi = rxr->bnapi;
1620 		type = HWRM_RING_ALLOC_CMPL;
1621 		map_idx = bnapi->index;
1622 
1623 		ring = &cpr->ring_struct;
1624 		ring->handle = BNGE_SET_NQ_HDL(cpr);
1625 		rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1626 		if (rc)
1627 			goto err_out;
1628 		bnge_set_db(bn, &cpr->cp_db, type, map_idx,
1629 			    ring->fw_ring_id);
1630 		bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
1631 	}
1632 
1633 	if (agg_rings) {
1634 		for (i = 0; i < bd->rx_nr_rings; i++) {
1635 			rc = bnge_hwrm_rx_agg_ring_alloc(bn, &bn->rx_ring[i]);
1636 			if (rc)
1637 				goto err_out;
1638 		}
1639 	}
1640 err_out:
1641 	return rc;
1642 }
1643 
1644 void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic)
1645 {
1646 	__le16 *ring_tbl = vnic->rss_table;
1647 	struct bnge_rx_ring_info *rxr;
1648 	struct bnge_dev *bd = bn->bd;
1649 	u16 tbl_size, i;
1650 
1651 	tbl_size = bnge_get_rxfh_indir_size(bd);
1652 
1653 	for (i = 0; i < tbl_size; i++) {
1654 		u16 ring_id, j;
1655 
1656 		j = bd->rss_indir_tbl[i];
1657 		rxr = &bn->rx_ring[j];
1658 
1659 		ring_id = rxr->rx_ring_struct.fw_ring_id;
1660 		*ring_tbl++ = cpu_to_le16(ring_id);
1661 		ring_id = bnge_cp_ring_for_rx(rxr);
1662 		*ring_tbl++ = cpu_to_le16(ring_id);
1663 	}
1664 }
1665 
1666 static int bnge_hwrm_vnic_rss_cfg(struct bnge_net *bn,
1667 				  struct bnge_vnic_info *vnic)
1668 {
1669 	int rc;
1670 
1671 	rc = bnge_hwrm_vnic_set_rss(bn, vnic, true);
1672 	if (rc) {
1673 		netdev_err(bn->netdev, "hwrm vnic %d set rss failure rc: %d\n",
1674 			   vnic->vnic_id, rc);
1675 		return rc;
1676 	}
1677 	rc = bnge_hwrm_vnic_cfg(bn, vnic);
1678 	if (rc)
1679 		netdev_err(bn->netdev, "hwrm vnic %d cfg failure rc: %d\n",
1680 			   vnic->vnic_id, rc);
1681 	return rc;
1682 }
1683 
1684 static int bnge_setup_vnic(struct bnge_net *bn, struct bnge_vnic_info *vnic)
1685 {
1686 	struct bnge_dev *bd = bn->bd;
1687 	int rc, i, nr_ctxs;
1688 
1689 	nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
1690 	for (i = 0; i < nr_ctxs; i++) {
1691 		rc = bnge_hwrm_vnic_ctx_alloc(bd, vnic, i);
1692 		if (rc) {
1693 			netdev_err(bn->netdev, "hwrm vnic %d ctx %d alloc failure rc: %d\n",
1694 				   vnic->vnic_id, i, rc);
1695 			return -ENOMEM;
1696 		}
1697 		bn->rsscos_nr_ctxs++;
1698 	}
1699 
1700 	rc = bnge_hwrm_vnic_rss_cfg(bn, vnic);
1701 	if (rc)
1702 		return rc;
1703 
1704 	if (bnge_is_agg_reqd(bd)) {
1705 		rc = bnge_hwrm_vnic_set_hds(bn, vnic);
1706 		if (rc)
1707 			netdev_err(bn->netdev, "hwrm vnic %d set hds failure rc: %d\n",
1708 				   vnic->vnic_id, rc);
1709 	}
1710 	return rc;
1711 }
1712 
1713 static void bnge_del_l2_filter(struct bnge_net *bn, struct bnge_l2_filter *fltr)
1714 {
1715 	if (!refcount_dec_and_test(&fltr->refcnt))
1716 		return;
1717 	hlist_del_rcu(&fltr->base.hash);
1718 	kfree_rcu(fltr, base.rcu);
1719 }
1720 
1721 static void bnge_init_l2_filter(struct bnge_net *bn,
1722 				struct bnge_l2_filter *fltr,
1723 				struct bnge_l2_key *key, u32 idx)
1724 {
1725 	struct hlist_head *head;
1726 
1727 	ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
1728 	fltr->l2_key.vlan = key->vlan;
1729 	fltr->base.type = BNGE_FLTR_TYPE_L2;
1730 
1731 	head = &bn->l2_fltr_hash_tbl[idx];
1732 	hlist_add_head_rcu(&fltr->base.hash, head);
1733 	refcount_set(&fltr->refcnt, 1);
1734 }
1735 
1736 static struct bnge_l2_filter *__bnge_lookup_l2_filter(struct bnge_net *bn,
1737 						      struct bnge_l2_key *key,
1738 						      u32 idx)
1739 {
1740 	struct bnge_l2_filter *fltr;
1741 	struct hlist_head *head;
1742 
1743 	head = &bn->l2_fltr_hash_tbl[idx];
1744 	hlist_for_each_entry_rcu(fltr, head, base.hash) {
1745 		struct bnge_l2_key *l2_key = &fltr->l2_key;
1746 
1747 		if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
1748 		    l2_key->vlan == key->vlan)
1749 			return fltr;
1750 	}
1751 	return NULL;
1752 }
1753 
1754 static struct bnge_l2_filter *bnge_lookup_l2_filter(struct bnge_net *bn,
1755 						    struct bnge_l2_key *key,
1756 						    u32 idx)
1757 {
1758 	struct bnge_l2_filter *fltr;
1759 
1760 	rcu_read_lock();
1761 	fltr = __bnge_lookup_l2_filter(bn, key, idx);
1762 	if (fltr)
1763 		refcount_inc(&fltr->refcnt);
1764 	rcu_read_unlock();
1765 	return fltr;
1766 }
1767 
1768 static struct bnge_l2_filter *bnge_alloc_l2_filter(struct bnge_net *bn,
1769 						   struct bnge_l2_key *key,
1770 						   gfp_t gfp)
1771 {
1772 	struct bnge_l2_filter *fltr;
1773 	u32 idx;
1774 
1775 	idx = jhash2(&key->filter_key, BNGE_L2_KEY_SIZE, bn->hash_seed) &
1776 	      BNGE_L2_FLTR_HASH_MASK;
1777 	fltr = bnge_lookup_l2_filter(bn, key, idx);
1778 	if (fltr)
1779 		return fltr;
1780 
1781 	fltr = kzalloc(sizeof(*fltr), gfp);
1782 	if (!fltr)
1783 		return ERR_PTR(-ENOMEM);
1784 
1785 	bnge_init_l2_filter(bn, fltr, key, idx);
1786 	return fltr;
1787 }
1788 
1789 static int bnge_hwrm_set_vnic_filter(struct bnge_net *bn, u16 vnic_id, u16 idx,
1790 				     const u8 *mac_addr)
1791 {
1792 	struct bnge_l2_filter *fltr;
1793 	struct bnge_l2_key key;
1794 	int rc;
1795 
1796 	ether_addr_copy(key.dst_mac_addr, mac_addr);
1797 	key.vlan = 0;
1798 	fltr = bnge_alloc_l2_filter(bn, &key, GFP_KERNEL);
1799 	if (IS_ERR(fltr))
1800 		return PTR_ERR(fltr);
1801 
1802 	fltr->base.fw_vnic_id = bn->vnic_info[vnic_id].fw_vnic_id;
1803 	rc = bnge_hwrm_l2_filter_alloc(bn->bd, fltr);
1804 	if (rc)
1805 		goto err_del_l2_filter;
1806 	bn->vnic_info[vnic_id].l2_filters[idx] = fltr;
1807 	return rc;
1808 
1809 err_del_l2_filter:
1810 	bnge_del_l2_filter(bn, fltr);
1811 	return rc;
1812 }
1813 
1814 static bool bnge_mc_list_updated(struct bnge_net *bn, u32 *rx_mask)
1815 {
1816 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1817 	struct net_device *dev = bn->netdev;
1818 	struct netdev_hw_addr *ha;
1819 	int mc_count = 0, off = 0;
1820 	bool update = false;
1821 	u8 *haddr;
1822 
1823 	netdev_for_each_mc_addr(ha, dev) {
1824 		if (mc_count >= BNGE_MAX_MC_ADDRS) {
1825 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1826 			vnic->mc_list_count = 0;
1827 			return false;
1828 		}
1829 		haddr = ha->addr;
1830 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
1831 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
1832 			update = true;
1833 		}
1834 		off += ETH_ALEN;
1835 		mc_count++;
1836 	}
1837 	if (mc_count)
1838 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1839 
1840 	if (mc_count != vnic->mc_list_count) {
1841 		vnic->mc_list_count = mc_count;
1842 		update = true;
1843 	}
1844 	return update;
1845 }
1846 
1847 static bool bnge_uc_list_updated(struct bnge_net *bn)
1848 {
1849 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1850 	struct net_device *dev = bn->netdev;
1851 	struct netdev_hw_addr *ha;
1852 	int off = 0;
1853 
1854 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
1855 		return true;
1856 
1857 	netdev_for_each_uc_addr(ha, dev) {
1858 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
1859 			return true;
1860 
1861 		off += ETH_ALEN;
1862 	}
1863 	return false;
1864 }
1865 
1866 static bool bnge_promisc_ok(struct bnge_net *bn)
1867 {
1868 	return true;
1869 }
1870 
1871 static int bnge_cfg_def_vnic(struct bnge_net *bn)
1872 {
1873 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1874 	struct net_device *dev = bn->netdev;
1875 	struct bnge_dev *bd = bn->bd;
1876 	struct netdev_hw_addr *ha;
1877 	int i, off = 0, rc;
1878 	bool uc_update;
1879 
1880 	netif_addr_lock_bh(dev);
1881 	uc_update = bnge_uc_list_updated(bn);
1882 	netif_addr_unlock_bh(dev);
1883 
1884 	if (!uc_update)
1885 		goto skip_uc;
1886 
1887 	for (i = 1; i < vnic->uc_filter_count; i++) {
1888 		struct bnge_l2_filter *fltr = vnic->l2_filters[i];
1889 
1890 		bnge_hwrm_l2_filter_free(bd, fltr);
1891 		bnge_del_l2_filter(bn, fltr);
1892 	}
1893 
1894 	vnic->uc_filter_count = 1;
1895 
1896 	netif_addr_lock_bh(dev);
1897 	if (netdev_uc_count(dev) > (BNGE_MAX_UC_ADDRS - 1)) {
1898 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1899 	} else {
1900 		netdev_for_each_uc_addr(ha, dev) {
1901 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
1902 			off += ETH_ALEN;
1903 			vnic->uc_filter_count++;
1904 		}
1905 	}
1906 	netif_addr_unlock_bh(dev);
1907 
1908 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
1909 		rc = bnge_hwrm_set_vnic_filter(bn, 0, i, vnic->uc_list + off);
1910 		if (rc) {
1911 			netdev_err(dev, "HWRM vnic filter failure rc: %d\n", rc);
1912 			vnic->uc_filter_count = i;
1913 			return rc;
1914 		}
1915 	}
1916 
1917 skip_uc:
1918 	if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
1919 	    !bnge_promisc_ok(bn))
1920 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1921 	rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
1922 	if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
1923 		netdev_info(dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
1924 			    rc);
1925 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1926 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1927 		vnic->mc_list_count = 0;
1928 		rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
1929 	}
1930 	if (rc)
1931 		netdev_err(dev, "HWRM cfa l2 rx mask failure rc: %d\n",
1932 			   rc);
1933 
1934 	return rc;
1935 }
1936 
1937 static void bnge_disable_int(struct bnge_net *bn)
1938 {
1939 	struct bnge_dev *bd = bn->bd;
1940 	int i;
1941 
1942 	if (!bn->bnapi)
1943 		return;
1944 
1945 	for (i = 0; i < bd->nq_nr_rings; i++) {
1946 		struct bnge_napi *bnapi = bn->bnapi[i];
1947 		struct bnge_nq_ring_info *nqr;
1948 		struct bnge_ring_struct *ring;
1949 
1950 		nqr = &bnapi->nq_ring;
1951 		ring = &nqr->ring_struct;
1952 
1953 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
1954 			bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
1955 	}
1956 }
1957 
1958 static void bnge_disable_int_sync(struct bnge_net *bn)
1959 {
1960 	struct bnge_dev *bd = bn->bd;
1961 	int i;
1962 
1963 	bnge_disable_int(bn);
1964 	for (i = 0; i < bd->nq_nr_rings; i++) {
1965 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
1966 
1967 		synchronize_irq(bd->irq_tbl[map_idx].vector);
1968 	}
1969 }
1970 
1971 static void bnge_enable_int(struct bnge_net *bn)
1972 {
1973 	struct bnge_dev *bd = bn->bd;
1974 	int i;
1975 
1976 	for (i = 0; i < bd->nq_nr_rings; i++) {
1977 		struct bnge_napi *bnapi = bn->bnapi[i];
1978 		struct bnge_nq_ring_info *nqr;
1979 
1980 		nqr = &bnapi->nq_ring;
1981 		bnge_db_nq_arm(bn, &nqr->nq_db, nqr->nq_raw_cons);
1982 	}
1983 }
1984 
1985 static void bnge_disable_napi(struct bnge_net *bn)
1986 {
1987 	struct bnge_dev *bd = bn->bd;
1988 	int i;
1989 
1990 	if (test_and_set_bit(BNGE_STATE_NAPI_DISABLED, &bn->state))
1991 		return;
1992 
1993 	for (i = 0; i < bd->nq_nr_rings; i++) {
1994 		struct bnge_napi *bnapi = bn->bnapi[i];
1995 
1996 		napi_disable_locked(&bnapi->napi);
1997 	}
1998 }
1999 
2000 static void bnge_enable_napi(struct bnge_net *bn)
2001 {
2002 	struct bnge_dev *bd = bn->bd;
2003 	int i;
2004 
2005 	clear_bit(BNGE_STATE_NAPI_DISABLED, &bn->state);
2006 	for (i = 0; i < bd->nq_nr_rings; i++) {
2007 		struct bnge_napi *bnapi = bn->bnapi[i];
2008 
2009 		bnapi->in_reset = false;
2010 		bnapi->tx_fault = 0;
2011 
2012 		napi_enable_locked(&bnapi->napi);
2013 	}
2014 }
2015 
2016 static void bnge_hwrm_vnic_free(struct bnge_net *bn)
2017 {
2018 	int i;
2019 
2020 	for (i = 0; i < bn->nr_vnics; i++)
2021 		bnge_hwrm_vnic_free_one(bn->bd, &bn->vnic_info[i]);
2022 }
2023 
2024 static void bnge_hwrm_vnic_ctx_free(struct bnge_net *bn)
2025 {
2026 	int i, j;
2027 
2028 	for (i = 0; i < bn->nr_vnics; i++) {
2029 		struct bnge_vnic_info *vnic = &bn->vnic_info[i];
2030 
2031 		for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) {
2032 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
2033 				bnge_hwrm_vnic_ctx_free_one(bn->bd, vnic, j);
2034 		}
2035 	}
2036 	bn->rsscos_nr_ctxs = 0;
2037 }
2038 
2039 static void bnge_hwrm_clear_vnic_filter(struct bnge_net *bn)
2040 {
2041 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
2042 	int i;
2043 
2044 	for (i = 0; i < vnic->uc_filter_count; i++) {
2045 		struct bnge_l2_filter *fltr = vnic->l2_filters[i];
2046 
2047 		bnge_hwrm_l2_filter_free(bn->bd, fltr);
2048 		bnge_del_l2_filter(bn, fltr);
2049 	}
2050 
2051 	vnic->uc_filter_count = 0;
2052 }
2053 
2054 static void bnge_clear_vnic(struct bnge_net *bn)
2055 {
2056 	bnge_hwrm_clear_vnic_filter(bn);
2057 	bnge_hwrm_vnic_free(bn);
2058 	bnge_hwrm_vnic_ctx_free(bn);
2059 }
2060 
2061 static void bnge_hwrm_rx_ring_free(struct bnge_net *bn,
2062 				   struct bnge_rx_ring_info *rxr,
2063 				   bool close_path)
2064 {
2065 	struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
2066 	u32 grp_idx = rxr->bnapi->index;
2067 	u32 cmpl_ring_id;
2068 
2069 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2070 		return;
2071 
2072 	cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
2073 	hwrm_ring_free_send_msg(bn, ring,
2074 				RING_FREE_REQ_RING_TYPE_RX,
2075 				close_path ? cmpl_ring_id :
2076 				INVALID_HW_RING_ID);
2077 	ring->fw_ring_id = INVALID_HW_RING_ID;
2078 	bn->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
2079 }
2080 
2081 static void bnge_hwrm_rx_agg_ring_free(struct bnge_net *bn,
2082 				       struct bnge_rx_ring_info *rxr,
2083 				       bool close_path)
2084 {
2085 	struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
2086 	u32 grp_idx = rxr->bnapi->index;
2087 	u32 cmpl_ring_id;
2088 
2089 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2090 		return;
2091 
2092 	cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
2093 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_RX_AGG,
2094 				close_path ? cmpl_ring_id :
2095 				INVALID_HW_RING_ID);
2096 	ring->fw_ring_id = INVALID_HW_RING_ID;
2097 	bn->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
2098 }
2099 
2100 static void bnge_hwrm_tx_ring_free(struct bnge_net *bn,
2101 				   struct bnge_tx_ring_info *txr,
2102 				   bool close_path)
2103 {
2104 	struct bnge_ring_struct *ring = &txr->tx_ring_struct;
2105 	u32 cmpl_ring_id;
2106 
2107 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2108 		return;
2109 
2110 	cmpl_ring_id = close_path ? bnge_cp_ring_for_tx(txr) :
2111 		       INVALID_HW_RING_ID;
2112 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_TX,
2113 				cmpl_ring_id);
2114 	ring->fw_ring_id = INVALID_HW_RING_ID;
2115 }
2116 
2117 static void bnge_hwrm_cp_ring_free(struct bnge_net *bn,
2118 				   struct bnge_cp_ring_info *cpr)
2119 {
2120 	struct bnge_ring_struct *ring;
2121 
2122 	ring = &cpr->ring_struct;
2123 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2124 		return;
2125 
2126 	hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
2127 				INVALID_HW_RING_ID);
2128 	ring->fw_ring_id = INVALID_HW_RING_ID;
2129 }
2130 
2131 static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
2132 {
2133 	struct bnge_dev *bd = bn->bd;
2134 	int i;
2135 
2136 	if (!bn->bnapi)
2137 		return;
2138 
2139 	for (i = 0; i < bd->tx_nr_rings; i++)
2140 		bnge_hwrm_tx_ring_free(bn, &bn->tx_ring[i], close_path);
2141 
2142 	for (i = 0; i < bd->rx_nr_rings; i++) {
2143 		bnge_hwrm_rx_ring_free(bn, &bn->rx_ring[i], close_path);
2144 		bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
2145 	}
2146 
2147 	/* The completion rings are about to be freed.  After that the
2148 	 * IRQ doorbell will not work anymore.  So we need to disable
2149 	 * IRQ here.
2150 	 */
2151 	bnge_disable_int_sync(bn);
2152 
2153 	for (i = 0; i < bd->nq_nr_rings; i++) {
2154 		struct bnge_napi *bnapi = bn->bnapi[i];
2155 		struct bnge_nq_ring_info *nqr;
2156 		struct bnge_ring_struct *ring;
2157 		int j;
2158 
2159 		nqr = &bnapi->nq_ring;
2160 		for (j = 0; j < nqr->cp_ring_count && nqr->cp_ring_arr; j++)
2161 			bnge_hwrm_cp_ring_free(bn, &nqr->cp_ring_arr[j]);
2162 
2163 		ring = &nqr->ring_struct;
2164 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2165 			hwrm_ring_free_send_msg(bn, ring,
2166 						RING_FREE_REQ_RING_TYPE_NQ,
2167 						INVALID_HW_RING_ID);
2168 			ring->fw_ring_id = INVALID_HW_RING_ID;
2169 			bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
2170 		}
2171 	}
2172 }
2173 
2174 static void bnge_setup_msix(struct bnge_net *bn)
2175 {
2176 	struct net_device *dev = bn->netdev;
2177 	struct bnge_dev *bd = bn->bd;
2178 	int len, i;
2179 
2180 	len = sizeof(bd->irq_tbl[0].name);
2181 	for (i = 0; i < bd->nq_nr_rings; i++) {
2182 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
2183 		char *attr;
2184 
2185 		if (bd->flags & BNGE_EN_SHARED_CHNL)
2186 			attr = "TxRx";
2187 		else if (i < bd->rx_nr_rings)
2188 			attr = "rx";
2189 		else
2190 			attr = "tx";
2191 
2192 		snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
2193 			 attr, i);
2194 		bd->irq_tbl[map_idx].handler = bnge_msix;
2195 	}
2196 }
2197 
2198 static int bnge_setup_interrupts(struct bnge_net *bn)
2199 {
2200 	struct net_device *dev = bn->netdev;
2201 	struct bnge_dev *bd = bn->bd;
2202 
2203 	bnge_setup_msix(bn);
2204 
2205 	return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
2206 }
2207 
2208 static void bnge_hwrm_resource_free(struct bnge_net *bn, bool close_path)
2209 {
2210 	bnge_clear_vnic(bn);
2211 	bnge_hwrm_ring_free(bn, close_path);
2212 	bnge_hwrm_stat_ctx_free(bn);
2213 }
2214 
2215 static void bnge_free_irq(struct bnge_net *bn)
2216 {
2217 	struct bnge_dev *bd = bn->bd;
2218 	struct bnge_irq *irq;
2219 	int i;
2220 
2221 	for (i = 0; i < bd->nq_nr_rings; i++) {
2222 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
2223 
2224 		irq = &bd->irq_tbl[map_idx];
2225 		if (irq->requested) {
2226 			if (irq->have_cpumask) {
2227 				irq_set_affinity_hint(irq->vector, NULL);
2228 				free_cpumask_var(irq->cpu_mask);
2229 				irq->have_cpumask = 0;
2230 			}
2231 			free_irq(irq->vector, bn->bnapi[i]);
2232 		}
2233 
2234 		irq->requested = 0;
2235 	}
2236 }
2237 
2238 static int bnge_request_irq(struct bnge_net *bn)
2239 {
2240 	struct bnge_dev *bd = bn->bd;
2241 	int i, rc;
2242 
2243 	rc = bnge_setup_interrupts(bn);
2244 	if (rc) {
2245 		netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
2246 		return rc;
2247 	}
2248 	for (i = 0; i < bd->nq_nr_rings; i++) {
2249 		int map_idx = bnge_cp_num_to_irq_num(bn, i);
2250 		struct bnge_irq *irq = &bd->irq_tbl[map_idx];
2251 
2252 		rc = request_irq(irq->vector, irq->handler, 0, irq->name,
2253 				 bn->bnapi[i]);
2254 		if (rc)
2255 			goto err_free_irq;
2256 
2257 		netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
2258 		irq->requested = 1;
2259 
2260 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
2261 			int numa_node = dev_to_node(&bd->pdev->dev);
2262 
2263 			irq->have_cpumask = 1;
2264 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2265 					irq->cpu_mask);
2266 			rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
2267 			if (rc) {
2268 				netdev_warn(bn->netdev,
2269 					    "Set affinity failed, IRQ = %d\n",
2270 					    irq->vector);
2271 				goto err_free_irq;
2272 			}
2273 		}
2274 	}
2275 	return 0;
2276 
2277 err_free_irq:
2278 	bnge_free_irq(bn);
2279 	return rc;
2280 }
2281 
2282 static int bnge_set_tpa(struct bnge_net *bn, bool set_tpa)
2283 {
2284 	u32 tpa_flags = 0;
2285 	int rc, i;
2286 
2287 	if (set_tpa)
2288 		tpa_flags = bn->priv_flags & BNGE_NET_EN_TPA;
2289 	else if (BNGE_NO_FW_ACCESS(bn->bd))
2290 		return 0;
2291 	for (i = 0; i < bn->nr_vnics; i++) {
2292 		rc = bnge_hwrm_vnic_set_tpa(bn->bd, &bn->vnic_info[i],
2293 					    tpa_flags);
2294 		if (rc) {
2295 			netdev_err(bn->netdev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
2296 				   i, rc);
2297 			return rc;
2298 		}
2299 	}
2300 	return 0;
2301 }
2302 
2303 static int bnge_init_chip(struct bnge_net *bn)
2304 {
2305 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
2306 	struct bnge_dev *bd = bn->bd;
2307 	int rc;
2308 
2309 #define BNGE_DEF_STATS_COAL_TICKS	 1000000
2310 	bn->stats_coal_ticks = BNGE_DEF_STATS_COAL_TICKS;
2311 
2312 	rc = bnge_hwrm_stat_ctx_alloc(bn);
2313 	if (rc) {
2314 		netdev_err(bn->netdev, "hwrm stat ctx alloc failure rc: %d\n", rc);
2315 		goto err_out;
2316 	}
2317 
2318 	rc = bnge_hwrm_ring_alloc(bn);
2319 	if (rc) {
2320 		netdev_err(bn->netdev, "hwrm ring alloc failure rc: %d\n", rc);
2321 		goto err_out;
2322 	}
2323 
2324 	rc = bnge_hwrm_vnic_alloc(bd, vnic, bd->rx_nr_rings);
2325 	if (rc) {
2326 		netdev_err(bn->netdev, "hwrm vnic alloc failure rc: %d\n", rc);
2327 		goto err_out;
2328 	}
2329 
2330 	rc = bnge_setup_vnic(bn, vnic);
2331 	if (rc)
2332 		goto err_out;
2333 
2334 	if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
2335 		bnge_hwrm_update_rss_hash_cfg(bn);
2336 
2337 	if (bn->priv_flags & BNGE_NET_EN_TPA) {
2338 		rc = bnge_set_tpa(bn, true);
2339 		if (rc)
2340 			goto err_out;
2341 	}
2342 
2343 	/* Filter for default vnic 0 */
2344 	rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
2345 	if (rc) {
2346 		netdev_err(bn->netdev, "HWRM vnic filter failure rc: %d\n", rc);
2347 		goto err_out;
2348 	}
2349 	vnic->uc_filter_count = 1;
2350 
2351 	vnic->rx_mask = 0;
2352 
2353 	if (bn->netdev->flags & IFF_BROADCAST)
2354 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
2355 
2356 	if (bn->netdev->flags & IFF_PROMISC)
2357 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
2358 
2359 	if (bn->netdev->flags & IFF_ALLMULTI) {
2360 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
2361 		vnic->mc_list_count = 0;
2362 	} else if (bn->netdev->flags & IFF_MULTICAST) {
2363 		u32 mask = 0;
2364 
2365 		bnge_mc_list_updated(bn, &mask);
2366 		vnic->rx_mask |= mask;
2367 	}
2368 
2369 	rc = bnge_cfg_def_vnic(bn);
2370 	if (rc)
2371 		goto err_out;
2372 	return 0;
2373 
2374 err_out:
2375 	bnge_hwrm_resource_free(bn, 0);
2376 	return rc;
2377 }
2378 
2379 static void bnge_init_napi(struct bnge_net *bn)
2380 {
2381 	struct bnge_dev *bd = bn->bd;
2382 	struct bnge_napi *bnapi;
2383 	int i;
2384 
2385 	for (i = 0; i < bd->nq_nr_rings; i++) {
2386 		bnapi = bn->bnapi[i];
2387 		netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
2388 					     bnge_napi_poll, bnapi->index);
2389 	}
2390 }
2391 
2392 static void bnge_del_napi(struct bnge_net *bn)
2393 {
2394 	struct bnge_dev *bd = bn->bd;
2395 	int i;
2396 
2397 	for (i = 0; i < bd->rx_nr_rings; i++)
2398 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
2399 	for (i = 0; i < bd->tx_nr_rings; i++)
2400 		netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
2401 
2402 	for (i = 0; i < bd->nq_nr_rings; i++) {
2403 		struct bnge_napi *bnapi = bn->bnapi[i];
2404 
2405 		__netif_napi_del_locked(&bnapi->napi);
2406 	}
2407 
2408 	/* Wait for RCU grace period after removing NAPI instances */
2409 	synchronize_net();
2410 }
2411 
2412 static int bnge_init_nic(struct bnge_net *bn)
2413 {
2414 	int rc;
2415 
2416 	bnge_init_nq_tree(bn);
2417 
2418 	bnge_init_rx_rings(bn);
2419 	rc = bnge_alloc_rx_ring_pair_bufs(bn);
2420 	if (rc)
2421 		return rc;
2422 
2423 	bnge_init_tx_rings(bn);
2424 
2425 	rc = bnge_init_ring_grps(bn);
2426 	if (rc)
2427 		goto err_free_rx_ring_pair_bufs;
2428 
2429 	bnge_init_vnics(bn);
2430 
2431 	rc = bnge_init_chip(bn);
2432 	if (rc)
2433 		goto err_free_ring_grps;
2434 	return rc;
2435 
2436 err_free_ring_grps:
2437 	bnge_free_ring_grps(bn);
2438 	return rc;
2439 
2440 err_free_rx_ring_pair_bufs:
2441 	bnge_free_rx_ring_pair_bufs(bn);
2442 	return rc;
2443 }
2444 
2445 static void bnge_tx_disable(struct bnge_net *bn)
2446 {
2447 	struct bnge_tx_ring_info *txr;
2448 	int i;
2449 
2450 	if (bn->tx_ring) {
2451 		for (i = 0; i < bn->bd->tx_nr_rings; i++) {
2452 			txr = &bn->tx_ring[i];
2453 			WRITE_ONCE(txr->dev_state, BNGE_DEV_STATE_CLOSING);
2454 		}
2455 	}
2456 	/* Make sure napi polls see @dev_state change */
2457 	synchronize_net();
2458 
2459 	if (!bn->netdev)
2460 		return;
2461 	/* Drop carrier first to prevent TX timeout */
2462 	netif_carrier_off(bn->netdev);
2463 	/* Stop all TX queues */
2464 	netif_tx_disable(bn->netdev);
2465 }
2466 
2467 static void bnge_tx_enable(struct bnge_net *bn)
2468 {
2469 	struct bnge_tx_ring_info *txr;
2470 	int i;
2471 
2472 	for (i = 0; i < bn->bd->tx_nr_rings; i++) {
2473 		txr = &bn->tx_ring[i];
2474 		WRITE_ONCE(txr->dev_state, 0);
2475 	}
2476 	/* Make sure napi polls see @dev_state change */
2477 	synchronize_net();
2478 	netif_tx_wake_all_queues(bn->netdev);
2479 }
2480 
2481 static int bnge_open_core(struct bnge_net *bn)
2482 {
2483 	struct bnge_dev *bd = bn->bd;
2484 	int rc;
2485 
2486 	netif_carrier_off(bn->netdev);
2487 
2488 	rc = bnge_reserve_rings(bd);
2489 	if (rc) {
2490 		netdev_err(bn->netdev, "bnge_reserve_rings err: %d\n", rc);
2491 		return rc;
2492 	}
2493 
2494 	rc = bnge_alloc_core(bn);
2495 	if (rc) {
2496 		netdev_err(bn->netdev, "bnge_alloc_core err: %d\n", rc);
2497 		return rc;
2498 	}
2499 
2500 	bnge_init_napi(bn);
2501 	rc = bnge_request_irq(bn);
2502 	if (rc) {
2503 		netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
2504 		goto err_del_napi;
2505 	}
2506 
2507 	rc = bnge_init_nic(bn);
2508 	if (rc) {
2509 		netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
2510 		goto err_free_irq;
2511 	}
2512 
2513 	bnge_enable_napi(bn);
2514 
2515 	set_bit(BNGE_STATE_OPEN, &bd->state);
2516 
2517 	bnge_enable_int(bn);
2518 
2519 	bnge_tx_enable(bn);
2520 	return 0;
2521 
2522 err_free_irq:
2523 	bnge_free_irq(bn);
2524 err_del_napi:
2525 	bnge_del_napi(bn);
2526 	bnge_free_core(bn);
2527 	return rc;
2528 }
2529 
2530 static int bnge_open(struct net_device *dev)
2531 {
2532 	struct bnge_net *bn = netdev_priv(dev);
2533 	int rc;
2534 
2535 	rc = bnge_open_core(bn);
2536 	if (rc)
2537 		netdev_err(dev, "bnge_open_core err: %d\n", rc);
2538 
2539 	return rc;
2540 }
2541 
2542 static int bnge_shutdown_nic(struct bnge_net *bn)
2543 {
2544 	bnge_hwrm_resource_free(bn, 1);
2545 	return 0;
2546 }
2547 
2548 static void bnge_close_core(struct bnge_net *bn)
2549 {
2550 	struct bnge_dev *bd = bn->bd;
2551 
2552 	bnge_tx_disable(bn);
2553 
2554 	clear_bit(BNGE_STATE_OPEN, &bd->state);
2555 	bnge_shutdown_nic(bn);
2556 	bnge_disable_napi(bn);
2557 	bnge_free_all_rings_bufs(bn);
2558 	bnge_free_irq(bn);
2559 	bnge_del_napi(bn);
2560 
2561 	bnge_free_core(bn);
2562 }
2563 
2564 static int bnge_close(struct net_device *dev)
2565 {
2566 	struct bnge_net *bn = netdev_priv(dev);
2567 
2568 	bnge_close_core(bn);
2569 
2570 	return 0;
2571 }
2572 
2573 static const struct net_device_ops bnge_netdev_ops = {
2574 	.ndo_open		= bnge_open,
2575 	.ndo_stop		= bnge_close,
2576 	.ndo_start_xmit		= bnge_start_xmit,
2577 	.ndo_features_check	= bnge_features_check,
2578 };
2579 
2580 static void bnge_init_mac_addr(struct bnge_dev *bd)
2581 {
2582 	eth_hw_addr_set(bd->netdev, bd->pf.mac_addr);
2583 }
2584 
2585 static void bnge_set_tpa_flags(struct bnge_dev *bd)
2586 {
2587 	struct bnge_net *bn = netdev_priv(bd->netdev);
2588 
2589 	bn->priv_flags &= ~BNGE_NET_EN_TPA;
2590 
2591 	if (bd->netdev->features & NETIF_F_LRO)
2592 		bn->priv_flags |= BNGE_NET_EN_LRO;
2593 	else if (bd->netdev->features & NETIF_F_GRO_HW)
2594 		bn->priv_flags |= BNGE_NET_EN_GRO;
2595 }
2596 
2597 static void bnge_init_l2_fltr_tbl(struct bnge_net *bn)
2598 {
2599 	int i;
2600 
2601 	for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++)
2602 		INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]);
2603 	get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed));
2604 }
2605 
2606 void bnge_set_ring_params(struct bnge_dev *bd)
2607 {
2608 	struct bnge_net *bn = netdev_priv(bd->netdev);
2609 	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
2610 	u32 agg_factor = 0, agg_ring_size = 0;
2611 
2612 	/* 8 for CRC and VLAN */
2613 	rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2614 
2615 	rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
2616 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2617 
2618 	ring_size = bn->rx_ring_size;
2619 	bn->rx_agg_ring_size = 0;
2620 	bn->rx_agg_nr_pages = 0;
2621 
2622 	if (bn->priv_flags & BNGE_NET_EN_TPA)
2623 		agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE);
2624 
2625 	bn->priv_flags &= ~BNGE_NET_EN_JUMBO;
2626 	if (rx_space > PAGE_SIZE) {
2627 		u32 jumbo_factor;
2628 
2629 		bn->priv_flags |= BNGE_NET_EN_JUMBO;
2630 		jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT;
2631 		if (jumbo_factor > agg_factor)
2632 			agg_factor = jumbo_factor;
2633 	}
2634 	if (agg_factor) {
2635 		if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) {
2636 			ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA;
2637 			netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n",
2638 				    bn->rx_ring_size, ring_size);
2639 			bn->rx_ring_size = ring_size;
2640 		}
2641 		agg_ring_size = ring_size * agg_factor;
2642 
2643 		bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size,
2644 							  RX_DESC_CNT);
2645 		if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2646 			u32 tmp = agg_ring_size;
2647 
2648 			bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2649 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2650 			netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n",
2651 				    tmp, agg_ring_size);
2652 		}
2653 		bn->rx_agg_ring_size = agg_ring_size;
2654 		bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2655 
2656 		rx_size = max3(BNGE_DEFAULT_RX_COPYBREAK,
2657 			       bn->rx_copybreak,
2658 			       bn->netdev->cfg_pending->hds_thresh);
2659 		rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
2660 		rx_space = rx_size + NET_SKB_PAD +
2661 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2662 	}
2663 
2664 	bn->rx_buf_use_size = rx_size;
2665 	bn->rx_buf_size = rx_space;
2666 
2667 	bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT);
2668 	bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1;
2669 
2670 	ring_size = bn->tx_ring_size;
2671 	bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT);
2672 	bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1;
2673 
2674 	max_rx_cmpl = bn->rx_ring_size;
2675 
2676 	if (bn->priv_flags & BNGE_NET_EN_TPA)
2677 		max_rx_cmpl += bd->max_tpa_v2;
2678 	ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size;
2679 	bn->cp_ring_size = ring_size;
2680 
2681 	bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT);
2682 	if (bn->cp_nr_pages > MAX_CP_PAGES) {
2683 		bn->cp_nr_pages = MAX_CP_PAGES;
2684 		bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2685 		netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n",
2686 			    ring_size, bn->cp_ring_size);
2687 	}
2688 	bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT;
2689 	bn->cp_ring_mask = bn->cp_bit - 1;
2690 }
2691 
2692 static void bnge_init_ring_params(struct bnge_net *bn)
2693 {
2694 	u32 rx_size;
2695 
2696 	bn->rx_copybreak = BNGE_DEFAULT_RX_COPYBREAK;
2697 	/* Try to fit 4 chunks into a 4k page */
2698 	rx_size = SZ_1K -
2699 		NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2700 	bn->netdev->cfg->hds_thresh = max(BNGE_DEFAULT_RX_COPYBREAK, rx_size);
2701 }
2702 
2703 int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
2704 {
2705 	struct net_device *netdev;
2706 	struct bnge_net *bn;
2707 	int rc;
2708 
2709 	netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE,
2710 				    max_irqs);
2711 	if (!netdev)
2712 		return -ENOMEM;
2713 
2714 	SET_NETDEV_DEV(netdev, bd->dev);
2715 	bd->netdev = netdev;
2716 
2717 	netdev->netdev_ops = &bnge_netdev_ops;
2718 
2719 	bnge_set_ethtool_ops(netdev);
2720 
2721 	bn = netdev_priv(netdev);
2722 	bn->netdev = netdev;
2723 	bn->bd = bd;
2724 
2725 	netdev->min_mtu = ETH_ZLEN;
2726 	netdev->max_mtu = bd->max_mtu;
2727 
2728 	netdev->hw_features = NETIF_F_IP_CSUM |
2729 			      NETIF_F_IPV6_CSUM |
2730 			      NETIF_F_SG |
2731 			      NETIF_F_TSO |
2732 			      NETIF_F_TSO6 |
2733 			      NETIF_F_GSO_UDP_TUNNEL |
2734 			      NETIF_F_GSO_GRE |
2735 			      NETIF_F_GSO_IPXIP4 |
2736 			      NETIF_F_GSO_UDP_TUNNEL_CSUM |
2737 			      NETIF_F_GSO_GRE_CSUM |
2738 			      NETIF_F_GSO_PARTIAL |
2739 			      NETIF_F_RXHASH |
2740 			      NETIF_F_RXCSUM |
2741 			      NETIF_F_GRO;
2742 
2743 	if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
2744 		netdev->hw_features |= NETIF_F_GSO_UDP_L4;
2745 
2746 	if (BNGE_SUPPORTS_TPA(bd))
2747 		netdev->hw_features |= NETIF_F_LRO;
2748 
2749 	netdev->hw_enc_features = NETIF_F_IP_CSUM |
2750 				  NETIF_F_IPV6_CSUM |
2751 				  NETIF_F_SG |
2752 				  NETIF_F_TSO |
2753 				  NETIF_F_TSO6 |
2754 				  NETIF_F_GSO_UDP_TUNNEL |
2755 				  NETIF_F_GSO_GRE |
2756 				  NETIF_F_GSO_UDP_TUNNEL_CSUM |
2757 				  NETIF_F_GSO_GRE_CSUM |
2758 				  NETIF_F_GSO_IPXIP4 |
2759 				  NETIF_F_GSO_PARTIAL;
2760 
2761 	if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
2762 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
2763 
2764 	netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
2765 				       NETIF_F_GSO_GRE_CSUM;
2766 
2767 	netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA;
2768 	if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP)
2769 		netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX;
2770 	if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT)
2771 		netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX;
2772 
2773 	if (BNGE_SUPPORTS_TPA(bd))
2774 		netdev->hw_features |= NETIF_F_GRO_HW;
2775 
2776 	netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
2777 
2778 	if (netdev->features & NETIF_F_GRO_HW)
2779 		netdev->features &= ~NETIF_F_LRO;
2780 
2781 	netdev->priv_flags |= IFF_UNICAST_FLT;
2782 
2783 	netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
2784 	if (bd->tso_max_segs)
2785 		netif_set_tso_max_segs(netdev, bd->tso_max_segs);
2786 
2787 	bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
2788 	bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
2789 	bn->rx_dir = DMA_FROM_DEVICE;
2790 
2791 	bnge_set_tpa_flags(bd);
2792 	bnge_init_ring_params(bn);
2793 	bnge_set_ring_params(bd);
2794 
2795 	bnge_init_l2_fltr_tbl(bn);
2796 	bnge_init_mac_addr(bd);
2797 
2798 	netdev->request_ops_lock = true;
2799 	rc = register_netdev(netdev);
2800 	if (rc) {
2801 		dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
2802 		goto err_netdev;
2803 	}
2804 
2805 	return 0;
2806 
2807 err_netdev:
2808 	free_netdev(netdev);
2809 	return rc;
2810 }
2811 
2812 void bnge_netdev_free(struct bnge_dev *bd)
2813 {
2814 	struct net_device *netdev = bd->netdev;
2815 
2816 	unregister_netdev(netdev);
2817 	free_netdev(netdev);
2818 	bd->netdev = NULL;
2819 }
2820