1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3
4 #include <asm/byteorder.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/dmapool.h>
7 #include <linux/delay.h>
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/list.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <net/netdev_lock.h>
14 #include <net/netdev_queues.h>
15 #include <net/netdev_rx_queue.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if.h>
18 #include <net/ip.h>
19 #include <linux/skbuff.h>
20 #include <net/page_pool/helpers.h>
21
22 #include "bnge.h"
23 #include "bnge_hwrm_lib.h"
24 #include "bnge_ethtool.h"
25 #include "bnge_rmem.h"
26 #include "bnge_txrx.h"
27
28 #define BNGE_RING_TO_TC_OFF(bd, tx) \
29 ((tx) % (bd)->tx_nr_rings_per_tc)
30
31 #define BNGE_RING_TO_TC(bd, tx) \
32 ((tx) / (bd)->tx_nr_rings_per_tc)
33
34 #define BNGE_TC_TO_RING_BASE(bd, tc) \
35 ((tc) * (bd)->tx_nr_rings_per_tc)
36
bnge_free_stats_mem(struct bnge_net * bn,struct bnge_stats_mem * stats)37 static void bnge_free_stats_mem(struct bnge_net *bn,
38 struct bnge_stats_mem *stats)
39 {
40 struct bnge_dev *bd = bn->bd;
41
42 if (stats->hw_stats) {
43 dma_free_coherent(bd->dev, stats->len, stats->hw_stats,
44 stats->hw_stats_map);
45 stats->hw_stats = NULL;
46 }
47 }
48
bnge_alloc_stats_mem(struct bnge_net * bn,struct bnge_stats_mem * stats)49 static int bnge_alloc_stats_mem(struct bnge_net *bn,
50 struct bnge_stats_mem *stats)
51 {
52 struct bnge_dev *bd = bn->bd;
53
54 stats->hw_stats = dma_alloc_coherent(bd->dev, stats->len,
55 &stats->hw_stats_map, GFP_KERNEL);
56 if (!stats->hw_stats)
57 return -ENOMEM;
58
59 return 0;
60 }
61
bnge_free_ring_stats(struct bnge_net * bn)62 static void bnge_free_ring_stats(struct bnge_net *bn)
63 {
64 struct bnge_dev *bd = bn->bd;
65 int i;
66
67 if (!bn->bnapi)
68 return;
69
70 for (i = 0; i < bd->nq_nr_rings; i++) {
71 struct bnge_napi *bnapi = bn->bnapi[i];
72 struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
73
74 bnge_free_stats_mem(bn, &nqr->stats);
75 }
76 }
77
bnge_alloc_ring_stats(struct bnge_net * bn)78 static int bnge_alloc_ring_stats(struct bnge_net *bn)
79 {
80 struct bnge_dev *bd = bn->bd;
81 u32 size, i;
82 int rc;
83
84 size = bd->hw_ring_stats_size;
85
86 for (i = 0; i < bd->nq_nr_rings; i++) {
87 struct bnge_napi *bnapi = bn->bnapi[i];
88 struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
89
90 nqr->stats.len = size;
91 rc = bnge_alloc_stats_mem(bn, &nqr->stats);
92 if (rc)
93 goto err_free_ring_stats;
94
95 nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
96 }
97 return 0;
98
99 err_free_ring_stats:
100 bnge_free_ring_stats(bn);
101 return rc;
102 }
103
bnge_free_nq_desc_arr(struct bnge_nq_ring_info * nqr)104 static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
105 {
106 struct bnge_ring_struct *ring = &nqr->ring_struct;
107
108 kfree(nqr->desc_ring);
109 nqr->desc_ring = NULL;
110 ring->ring_mem.pg_arr = NULL;
111 kfree(nqr->desc_mapping);
112 nqr->desc_mapping = NULL;
113 ring->ring_mem.dma_arr = NULL;
114 }
115
bnge_free_cp_desc_arr(struct bnge_cp_ring_info * cpr)116 static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
117 {
118 struct bnge_ring_struct *ring = &cpr->ring_struct;
119
120 kfree(cpr->desc_ring);
121 cpr->desc_ring = NULL;
122 ring->ring_mem.pg_arr = NULL;
123 kfree(cpr->desc_mapping);
124 cpr->desc_mapping = NULL;
125 ring->ring_mem.dma_arr = NULL;
126 }
127
bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info * nqr,int n)128 static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
129 {
130 nqr->desc_ring = kzalloc_objs(*nqr->desc_ring, n);
131 if (!nqr->desc_ring)
132 return -ENOMEM;
133
134 nqr->desc_mapping = kzalloc_objs(*nqr->desc_mapping, n);
135 if (!nqr->desc_mapping)
136 goto err_free_desc_ring;
137 return 0;
138
139 err_free_desc_ring:
140 kfree(nqr->desc_ring);
141 nqr->desc_ring = NULL;
142 return -ENOMEM;
143 }
144
bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info * cpr,int n)145 static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
146 {
147 cpr->desc_ring = kzalloc_objs(*cpr->desc_ring, n);
148 if (!cpr->desc_ring)
149 return -ENOMEM;
150
151 cpr->desc_mapping = kzalloc_objs(*cpr->desc_mapping, n);
152 if (!cpr->desc_mapping)
153 goto err_free_desc_ring;
154 return 0;
155
156 err_free_desc_ring:
157 kfree(cpr->desc_ring);
158 cpr->desc_ring = NULL;
159 return -ENOMEM;
160 }
161
bnge_free_nq_arrays(struct bnge_net * bn)162 static void bnge_free_nq_arrays(struct bnge_net *bn)
163 {
164 struct bnge_dev *bd = bn->bd;
165 int i;
166
167 for (i = 0; i < bd->nq_nr_rings; i++) {
168 struct bnge_napi *bnapi = bn->bnapi[i];
169
170 bnge_free_nq_desc_arr(&bnapi->nq_ring);
171 }
172 }
173
bnge_alloc_nq_arrays(struct bnge_net * bn)174 static int bnge_alloc_nq_arrays(struct bnge_net *bn)
175 {
176 struct bnge_dev *bd = bn->bd;
177 int i, rc;
178
179 for (i = 0; i < bd->nq_nr_rings; i++) {
180 struct bnge_napi *bnapi = bn->bnapi[i];
181
182 rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
183 if (rc)
184 goto err_free_nq_arrays;
185 }
186 return 0;
187
188 err_free_nq_arrays:
189 bnge_free_nq_arrays(bn);
190 return rc;
191 }
192
bnge_free_nq_tree(struct bnge_net * bn)193 static void bnge_free_nq_tree(struct bnge_net *bn)
194 {
195 struct bnge_dev *bd = bn->bd;
196 int i;
197
198 for (i = 0; i < bd->nq_nr_rings; i++) {
199 struct bnge_napi *bnapi = bn->bnapi[i];
200 struct bnge_nq_ring_info *nqr;
201 struct bnge_ring_struct *ring;
202 int j;
203
204 nqr = &bnapi->nq_ring;
205 ring = &nqr->ring_struct;
206
207 bnge_free_ring(bd, &ring->ring_mem);
208
209 if (!nqr->cp_ring_arr)
210 continue;
211
212 for (j = 0; j < nqr->cp_ring_count; j++) {
213 struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
214
215 ring = &cpr->ring_struct;
216 bnge_free_ring(bd, &ring->ring_mem);
217 bnge_free_cp_desc_arr(cpr);
218 }
219 kfree(nqr->cp_ring_arr);
220 nqr->cp_ring_arr = NULL;
221 nqr->cp_ring_count = 0;
222 }
223 }
224
alloc_one_cp_ring(struct bnge_net * bn,struct bnge_cp_ring_info * cpr)225 static int alloc_one_cp_ring(struct bnge_net *bn,
226 struct bnge_cp_ring_info *cpr)
227 {
228 struct bnge_ring_mem_info *rmem;
229 struct bnge_ring_struct *ring;
230 struct bnge_dev *bd = bn->bd;
231 int rc;
232
233 rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
234 if (rc)
235 return -ENOMEM;
236 ring = &cpr->ring_struct;
237 rmem = &ring->ring_mem;
238 rmem->nr_pages = bn->cp_nr_pages;
239 rmem->page_size = HW_CMPD_RING_SIZE;
240 rmem->pg_arr = (void **)cpr->desc_ring;
241 rmem->dma_arr = cpr->desc_mapping;
242 rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
243 rc = bnge_alloc_ring(bd, rmem);
244 if (rc)
245 goto err_free_cp_desc_arr;
246 return rc;
247
248 err_free_cp_desc_arr:
249 bnge_free_cp_desc_arr(cpr);
250 return rc;
251 }
252
bnge_alloc_nq_tree(struct bnge_net * bn)253 static int bnge_alloc_nq_tree(struct bnge_net *bn)
254 {
255 struct bnge_dev *bd = bn->bd;
256 int i, j, ulp_msix, rc;
257 int tcs = 1;
258
259 ulp_msix = bnge_aux_get_msix(bd);
260 for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
261 bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
262 struct bnge_napi *bnapi = bn->bnapi[i];
263 struct bnge_nq_ring_info *nqr;
264 struct bnge_cp_ring_info *cpr;
265 struct bnge_ring_struct *ring;
266 int cp_count = 0, k;
267 int rx = 0, tx = 0;
268
269 nqr = &bnapi->nq_ring;
270 nqr->bnapi = bnapi;
271 ring = &nqr->ring_struct;
272
273 rc = bnge_alloc_ring(bd, &ring->ring_mem);
274 if (rc)
275 goto err_free_nq_tree;
276
277 ring->map_idx = ulp_msix + i;
278
279 if (i < bd->rx_nr_rings) {
280 cp_count++;
281 rx = 1;
282 }
283
284 if ((sh && i < bd->tx_nr_rings) ||
285 (!sh && i >= bd->rx_nr_rings)) {
286 cp_count += tcs;
287 tx = 1;
288 }
289
290 nqr->cp_ring_arr = kzalloc_objs(*cpr, cp_count);
291 if (!nqr->cp_ring_arr) {
292 rc = -ENOMEM;
293 goto err_free_nq_tree;
294 }
295
296 nqr->cp_ring_count = cp_count;
297
298 for (k = 0; k < cp_count; k++) {
299 cpr = &nqr->cp_ring_arr[k];
300 rc = alloc_one_cp_ring(bn, cpr);
301 if (rc)
302 goto err_free_nq_tree;
303
304 cpr->bnapi = bnapi;
305 cpr->cp_idx = k;
306 if (!k && rx) {
307 bn->rx_ring[i].rx_cpr = cpr;
308 cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
309 } else {
310 int n, tc = k - rx;
311
312 n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
313 bn->tx_ring[n].tx_cpr = cpr;
314 cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
315 }
316 }
317 if (tx)
318 j++;
319 }
320 return 0;
321
322 err_free_nq_tree:
323 bnge_free_nq_tree(bn);
324 return rc;
325 }
326
bnge_separate_head_pool(struct bnge_rx_ring_info * rxr)327 static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
328 {
329 return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
330 }
331
bnge_free_one_rx_ring_bufs(struct bnge_net * bn,struct bnge_rx_ring_info * rxr)332 static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn,
333 struct bnge_rx_ring_info *rxr)
334 {
335 int i, max_idx;
336
337 if (!rxr->rx_buf_ring)
338 return;
339
340 max_idx = bn->rx_nr_pages * RX_DESC_CNT;
341
342 for (i = 0; i < max_idx; i++) {
343 struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
344 void *data = rx_buf->data;
345
346 if (!data)
347 continue;
348
349 rx_buf->data = NULL;
350 page_pool_free_va(rxr->head_pool, data, true);
351 }
352 }
353
bnge_free_one_agg_ring_bufs(struct bnge_net * bn,struct bnge_rx_ring_info * rxr)354 static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
355 struct bnge_rx_ring_info *rxr)
356 {
357 int i, max_idx;
358
359 if (!rxr->rx_agg_buf_ring)
360 return;
361
362 max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT;
363
364 for (i = 0; i < max_idx; i++) {
365 struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i];
366 netmem_ref netmem = rx_agg_buf->netmem;
367
368 if (!netmem)
369 continue;
370
371 rx_agg_buf->netmem = 0;
372 __clear_bit(i, rxr->rx_agg_bmap);
373
374 page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
375 }
376 }
377
bnge_free_one_tpa_info_data(struct bnge_net * bn,struct bnge_rx_ring_info * rxr)378 static void bnge_free_one_tpa_info_data(struct bnge_net *bn,
379 struct bnge_rx_ring_info *rxr)
380 {
381 int i;
382
383 for (i = 0; i < bn->max_tpa; i++) {
384 struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[i];
385 u8 *data = tpa_info->data;
386
387 if (!data)
388 continue;
389
390 tpa_info->data = NULL;
391 page_pool_free_va(rxr->head_pool, data, false);
392 }
393 }
394
bnge_free_one_rx_ring_pair_bufs(struct bnge_net * bn,struct bnge_rx_ring_info * rxr)395 static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
396 struct bnge_rx_ring_info *rxr)
397 {
398 struct bnge_tpa_idx_map *map;
399
400 if (rxr->rx_tpa)
401 bnge_free_one_tpa_info_data(bn, rxr);
402
403 bnge_free_one_rx_ring_bufs(bn, rxr);
404 bnge_free_one_agg_ring_bufs(bn, rxr);
405
406 map = rxr->rx_tpa_idx_map;
407 if (map)
408 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
409 }
410
bnge_free_rx_ring_pair_bufs(struct bnge_net * bn)411 static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
412 {
413 struct bnge_dev *bd = bn->bd;
414 int i;
415
416 if (!bn->rx_ring)
417 return;
418
419 for (i = 0; i < bd->rx_nr_rings; i++)
420 bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
421 }
422
bnge_free_tx_skbs(struct bnge_net * bn)423 static void bnge_free_tx_skbs(struct bnge_net *bn)
424 {
425 struct bnge_dev *bd = bn->bd;
426 u16 max_idx;
427 int i;
428
429 max_idx = bn->tx_nr_pages * TX_DESC_CNT;
430 for (i = 0; i < bd->tx_nr_rings; i++) {
431 struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
432 int j;
433
434 if (!txr->tx_buf_ring)
435 continue;
436
437 for (j = 0; j < max_idx;) {
438 struct bnge_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
439 struct sk_buff *skb;
440 int k, last;
441
442 skb = tx_buf->skb;
443 if (!skb) {
444 j++;
445 continue;
446 }
447
448 tx_buf->skb = NULL;
449
450 dma_unmap_single(bd->dev,
451 dma_unmap_addr(tx_buf, mapping),
452 skb_headlen(skb),
453 DMA_TO_DEVICE);
454
455 last = tx_buf->nr_frags;
456 j += 2;
457 for (k = 0; k < last; k++, j++) {
458 int ring_idx = j & bn->tx_ring_mask;
459 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
460
461 tx_buf = &txr->tx_buf_ring[ring_idx];
462 dma_unmap_page(bd->dev,
463 dma_unmap_addr(tx_buf, mapping),
464 skb_frag_size(frag),
465 DMA_TO_DEVICE);
466 }
467 dev_kfree_skb(skb);
468 }
469 netdev_tx_reset_queue(netdev_get_tx_queue(bd->netdev, i));
470 }
471 }
472
bnge_free_all_rings_bufs(struct bnge_net * bn)473 static void bnge_free_all_rings_bufs(struct bnge_net *bn)
474 {
475 bnge_free_rx_ring_pair_bufs(bn);
476 bnge_free_tx_skbs(bn);
477 }
478
bnge_free_tpa_info(struct bnge_net * bn)479 static void bnge_free_tpa_info(struct bnge_net *bn)
480 {
481 struct bnge_dev *bd = bn->bd;
482 int i, j;
483
484 for (i = 0; i < bd->rx_nr_rings; i++) {
485 struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
486
487 kfree(rxr->rx_tpa_idx_map);
488 rxr->rx_tpa_idx_map = NULL;
489 if (rxr->rx_tpa) {
490 for (j = 0; j < bn->max_tpa; j++) {
491 kfree(rxr->rx_tpa[j].agg_arr);
492 rxr->rx_tpa[j].agg_arr = NULL;
493 }
494 }
495 kfree(rxr->rx_tpa);
496 rxr->rx_tpa = NULL;
497 }
498 }
499
bnge_alloc_tpa_info(struct bnge_net * bn)500 static int bnge_alloc_tpa_info(struct bnge_net *bn)
501 {
502 struct bnge_dev *bd = bn->bd;
503 int i, j;
504
505 if (!bd->max_tpa_v2)
506 return 0;
507
508 bn->max_tpa = max_t(u16, bd->max_tpa_v2, MAX_TPA);
509 for (i = 0; i < bd->rx_nr_rings; i++) {
510 struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
511
512 rxr->rx_tpa = kzalloc_objs(struct bnge_tpa_info, bn->max_tpa);
513 if (!rxr->rx_tpa)
514 goto err_free_tpa_info;
515
516 for (j = 0; j < bn->max_tpa; j++) {
517 struct rx_agg_cmp *agg;
518
519 agg = kzalloc_objs(*agg, MAX_SKB_FRAGS);
520 if (!agg)
521 goto err_free_tpa_info;
522 rxr->rx_tpa[j].agg_arr = agg;
523 }
524 rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map);
525 if (!rxr->rx_tpa_idx_map)
526 goto err_free_tpa_info;
527 }
528 return 0;
529
530 err_free_tpa_info:
531 bnge_free_tpa_info(bn);
532 return -ENOMEM;
533 }
534
bnge_free_rx_rings(struct bnge_net * bn)535 static void bnge_free_rx_rings(struct bnge_net *bn)
536 {
537 struct bnge_dev *bd = bn->bd;
538 int i;
539
540 bnge_free_tpa_info(bn);
541 for (i = 0; i < bd->rx_nr_rings; i++) {
542 struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
543 struct bnge_ring_struct *ring;
544
545 page_pool_destroy(rxr->page_pool);
546 page_pool_destroy(rxr->head_pool);
547 rxr->page_pool = rxr->head_pool = NULL;
548
549 kfree(rxr->rx_agg_bmap);
550 rxr->rx_agg_bmap = NULL;
551
552 ring = &rxr->rx_ring_struct;
553 bnge_free_ring(bd, &ring->ring_mem);
554
555 ring = &rxr->rx_agg_ring_struct;
556 bnge_free_ring(bd, &ring->ring_mem);
557 }
558 }
559
bnge_alloc_rx_page_pool(struct bnge_net * bn,struct bnge_rx_ring_info * rxr,int numa_node)560 static int bnge_alloc_rx_page_pool(struct bnge_net *bn,
561 struct bnge_rx_ring_info *rxr,
562 int numa_node)
563 {
564 const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE;
565 const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
566 struct page_pool_params pp = { 0 };
567 struct bnge_dev *bd = bn->bd;
568 struct page_pool *pool;
569
570 pp.pool_size = bn->rx_agg_ring_size / agg_size_fac;
571 pp.nid = numa_node;
572 pp.netdev = bn->netdev;
573 pp.dev = bd->dev;
574 pp.dma_dir = bn->rx_dir;
575 pp.max_len = PAGE_SIZE;
576 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
577 PP_FLAG_ALLOW_UNREADABLE_NETMEM;
578 pp.queue_idx = rxr->bnapi->index;
579
580 pool = page_pool_create(&pp);
581 if (IS_ERR(pool))
582 return PTR_ERR(pool);
583 rxr->page_pool = pool;
584
585 rxr->need_head_pool = page_pool_is_unreadable(pool);
586 if (bnge_separate_head_pool(rxr)) {
587 pp.pool_size = min(bn->rx_ring_size / rx_size_fac, 1024);
588 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
589 pool = page_pool_create(&pp);
590 if (IS_ERR(pool))
591 goto err_destroy_pp;
592 } else {
593 page_pool_get(pool);
594 }
595 rxr->head_pool = pool;
596 return 0;
597
598 err_destroy_pp:
599 page_pool_destroy(rxr->page_pool);
600 rxr->page_pool = NULL;
601 return PTR_ERR(pool);
602 }
603
bnge_enable_rx_page_pool(struct bnge_rx_ring_info * rxr)604 static void bnge_enable_rx_page_pool(struct bnge_rx_ring_info *rxr)
605 {
606 page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
607 page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
608 }
609
bnge_alloc_rx_agg_bmap(struct bnge_net * bn,struct bnge_rx_ring_info * rxr)610 static int bnge_alloc_rx_agg_bmap(struct bnge_net *bn,
611 struct bnge_rx_ring_info *rxr)
612 {
613 u16 mem_size;
614
615 rxr->rx_agg_bmap_size = bn->rx_agg_ring_mask + 1;
616 mem_size = rxr->rx_agg_bmap_size / 8;
617 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
618 if (!rxr->rx_agg_bmap)
619 return -ENOMEM;
620
621 return 0;
622 }
623
bnge_alloc_rx_rings(struct bnge_net * bn)624 static int bnge_alloc_rx_rings(struct bnge_net *bn)
625 {
626 int i, rc = 0, agg_rings = 0, cpu;
627 struct bnge_dev *bd = bn->bd;
628
629 if (bnge_is_agg_reqd(bd))
630 agg_rings = 1;
631
632 for (i = 0; i < bd->rx_nr_rings; i++) {
633 struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
634 struct bnge_ring_struct *ring;
635 int cpu_node;
636
637 ring = &rxr->rx_ring_struct;
638
639 cpu = cpumask_local_spread(i, dev_to_node(bd->dev));
640 cpu_node = cpu_to_node(cpu);
641 netdev_dbg(bn->netdev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
642 i, cpu_node);
643 rc = bnge_alloc_rx_page_pool(bn, rxr, cpu_node);
644 if (rc)
645 goto err_free_rx_rings;
646 bnge_enable_rx_page_pool(rxr);
647
648 rc = bnge_alloc_ring(bd, &ring->ring_mem);
649 if (rc)
650 goto err_free_rx_rings;
651
652 ring->grp_idx = i;
653 if (agg_rings) {
654 ring = &rxr->rx_agg_ring_struct;
655 rc = bnge_alloc_ring(bd, &ring->ring_mem);
656 if (rc)
657 goto err_free_rx_rings;
658
659 ring->grp_idx = i;
660 rc = bnge_alloc_rx_agg_bmap(bn, rxr);
661 if (rc)
662 goto err_free_rx_rings;
663 }
664 }
665
666 if (bn->priv_flags & BNGE_NET_EN_TPA) {
667 rc = bnge_alloc_tpa_info(bn);
668 if (rc)
669 goto err_free_rx_rings;
670 }
671 return rc;
672
673 err_free_rx_rings:
674 bnge_free_rx_rings(bn);
675 return rc;
676 }
677
bnge_free_tx_rings(struct bnge_net * bn)678 static void bnge_free_tx_rings(struct bnge_net *bn)
679 {
680 struct bnge_dev *bd = bn->bd;
681 int i;
682
683 for (i = 0; i < bd->tx_nr_rings; i++) {
684 struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
685 struct bnge_ring_struct *ring;
686
687 ring = &txr->tx_ring_struct;
688
689 bnge_free_ring(bd, &ring->ring_mem);
690 }
691 }
692
bnge_alloc_tx_rings(struct bnge_net * bn)693 static int bnge_alloc_tx_rings(struct bnge_net *bn)
694 {
695 struct bnge_dev *bd = bn->bd;
696 int i, j, rc;
697
698 for (i = 0, j = 0; i < bd->tx_nr_rings; i++) {
699 struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
700 struct bnge_ring_struct *ring;
701 u8 qidx;
702
703 ring = &txr->tx_ring_struct;
704
705 rc = bnge_alloc_ring(bd, &ring->ring_mem);
706 if (rc)
707 goto err_free_tx_rings;
708
709 ring->grp_idx = txr->bnapi->index;
710 qidx = bd->tc_to_qidx[j];
711 ring->queue_id = bd->q_info[qidx].queue_id;
712 if (BNGE_RING_TO_TC_OFF(bd, i) == (bd->tx_nr_rings_per_tc - 1))
713 j++;
714 }
715 return 0;
716
717 err_free_tx_rings:
718 bnge_free_tx_rings(bn);
719 return rc;
720 }
721
bnge_free_vnic_attributes(struct bnge_net * bn)722 static void bnge_free_vnic_attributes(struct bnge_net *bn)
723 {
724 struct pci_dev *pdev = bn->bd->pdev;
725 struct bnge_vnic_info *vnic;
726 int i;
727
728 if (!bn->vnic_info)
729 return;
730
731 for (i = 0; i < bn->nr_vnics; i++) {
732 vnic = &bn->vnic_info[i];
733
734 kfree(vnic->uc_list);
735 vnic->uc_list = NULL;
736
737 if (vnic->mc_list) {
738 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
739 vnic->mc_list, vnic->mc_list_mapping);
740 vnic->mc_list = NULL;
741 }
742
743 if (vnic->rss_table) {
744 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
745 vnic->rss_table,
746 vnic->rss_table_dma_addr);
747 vnic->rss_table = NULL;
748 }
749
750 vnic->rss_hash_key = NULL;
751 vnic->flags = 0;
752 }
753 }
754
bnge_alloc_vnic_attributes(struct bnge_net * bn)755 static int bnge_alloc_vnic_attributes(struct bnge_net *bn)
756 {
757 struct bnge_dev *bd = bn->bd;
758 struct bnge_vnic_info *vnic;
759 int i, size;
760
761 for (i = 0; i < bn->nr_vnics; i++) {
762 vnic = &bn->vnic_info[i];
763
764 if (vnic->flags & BNGE_VNIC_UCAST_FLAG) {
765 int mem_size = (BNGE_MAX_UC_ADDRS - 1) * ETH_ALEN;
766
767 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
768 if (!vnic->uc_list)
769 goto err_free_vnic_attributes;
770 }
771
772 if (vnic->flags & BNGE_VNIC_MCAST_FLAG) {
773 vnic->mc_list_size = BNGE_MAX_MC_ADDRS * ETH_ALEN;
774 vnic->mc_list =
775 dma_alloc_coherent(bd->dev,
776 vnic->mc_list_size,
777 &vnic->mc_list_mapping,
778 GFP_KERNEL);
779 if (!vnic->mc_list)
780 goto err_free_vnic_attributes;
781 }
782
783 /* Allocate rss table and hash key */
784 size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE);
785
786 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
787 vnic->rss_table = dma_alloc_coherent(bd->dev,
788 vnic->rss_table_size,
789 &vnic->rss_table_dma_addr,
790 GFP_KERNEL);
791 if (!vnic->rss_table)
792 goto err_free_vnic_attributes;
793
794 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
795 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
796 }
797 return 0;
798
799 err_free_vnic_attributes:
800 bnge_free_vnic_attributes(bn);
801 return -ENOMEM;
802 }
803
bnge_alloc_vnics(struct bnge_net * bn)804 static int bnge_alloc_vnics(struct bnge_net *bn)
805 {
806 int num_vnics;
807
808 /* Allocate only 1 VNIC for now
809 * Additional VNICs will be added based on RFS/NTUPLE in future patches
810 */
811 num_vnics = 1;
812
813 bn->vnic_info = kzalloc_objs(struct bnge_vnic_info, num_vnics);
814 if (!bn->vnic_info)
815 return -ENOMEM;
816
817 bn->nr_vnics = num_vnics;
818
819 return 0;
820 }
821
bnge_free_vnics(struct bnge_net * bn)822 static void bnge_free_vnics(struct bnge_net *bn)
823 {
824 kfree(bn->vnic_info);
825 bn->vnic_info = NULL;
826 bn->nr_vnics = 0;
827 }
828
bnge_free_ring_grps(struct bnge_net * bn)829 static void bnge_free_ring_grps(struct bnge_net *bn)
830 {
831 kfree(bn->grp_info);
832 bn->grp_info = NULL;
833 }
834
bnge_init_ring_grps(struct bnge_net * bn)835 static int bnge_init_ring_grps(struct bnge_net *bn)
836 {
837 struct bnge_dev *bd = bn->bd;
838 int i;
839
840 bn->grp_info = kzalloc_objs(struct bnge_ring_grp_info, bd->nq_nr_rings);
841 if (!bn->grp_info)
842 return -ENOMEM;
843 for (i = 0; i < bd->nq_nr_rings; i++) {
844 bn->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
845 bn->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
846 bn->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
847 bn->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
848 bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
849 }
850
851 return 0;
852 }
853
bnge_free_core(struct bnge_net * bn)854 static void bnge_free_core(struct bnge_net *bn)
855 {
856 bnge_free_vnic_attributes(bn);
857 bnge_free_tx_rings(bn);
858 bnge_free_rx_rings(bn);
859 bnge_free_nq_tree(bn);
860 bnge_free_nq_arrays(bn);
861 bnge_free_ring_stats(bn);
862 bnge_free_ring_grps(bn);
863 bnge_free_vnics(bn);
864 kfree(bn->tx_ring_map);
865 bn->tx_ring_map = NULL;
866 kfree(bn->tx_ring);
867 bn->tx_ring = NULL;
868 kfree(bn->rx_ring);
869 bn->rx_ring = NULL;
870 kfree(bn->bnapi);
871 bn->bnapi = NULL;
872 }
873
bnge_alloc_core(struct bnge_net * bn)874 static int bnge_alloc_core(struct bnge_net *bn)
875 {
876 struct bnge_dev *bd = bn->bd;
877 int i, j, size, arr_size;
878 int rc = -ENOMEM;
879 void *bnapi;
880
881 arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) *
882 bd->nq_nr_rings);
883 size = L1_CACHE_ALIGN(sizeof(struct bnge_napi));
884 bnapi = kzalloc(arr_size + size * bd->nq_nr_rings, GFP_KERNEL);
885 if (!bnapi)
886 return rc;
887
888 bn->bnapi = bnapi;
889 bnapi += arr_size;
890 for (i = 0; i < bd->nq_nr_rings; i++, bnapi += size) {
891 struct bnge_nq_ring_info *nqr;
892
893 bn->bnapi[i] = bnapi;
894 bn->bnapi[i]->index = i;
895 bn->bnapi[i]->bn = bn;
896 nqr = &bn->bnapi[i]->nq_ring;
897 nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
898 }
899
900 bn->rx_ring = kzalloc_objs(struct bnge_rx_ring_info, bd->rx_nr_rings);
901 if (!bn->rx_ring)
902 goto err_free_core;
903
904 for (i = 0; i < bd->rx_nr_rings; i++) {
905 struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
906
907 rxr->rx_ring_struct.ring_mem.flags =
908 BNGE_RMEM_RING_PTE_FLAG;
909 rxr->rx_agg_ring_struct.ring_mem.flags =
910 BNGE_RMEM_RING_PTE_FLAG;
911 rxr->bnapi = bn->bnapi[i];
912 bn->bnapi[i]->rx_ring = &bn->rx_ring[i];
913 }
914
915 bn->tx_ring = kzalloc_objs(struct bnge_tx_ring_info, bd->tx_nr_rings);
916 if (!bn->tx_ring)
917 goto err_free_core;
918
919 bn->tx_ring_map = kcalloc(bd->tx_nr_rings, sizeof(u16),
920 GFP_KERNEL);
921 if (!bn->tx_ring_map)
922 goto err_free_core;
923
924 if (bd->flags & BNGE_EN_SHARED_CHNL)
925 j = 0;
926 else
927 j = bd->rx_nr_rings;
928
929 for (i = 0; i < bd->tx_nr_rings; i++) {
930 struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
931 struct bnge_napi *bnapi2;
932 int k;
933
934 txr->tx_ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
935 bn->tx_ring_map[i] = i;
936 k = j + BNGE_RING_TO_TC_OFF(bd, i);
937
938 bnapi2 = bn->bnapi[k];
939 txr->txq_index = i;
940 txr->tx_napi_idx =
941 BNGE_RING_TO_TC(bd, txr->txq_index);
942 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
943 txr->bnapi = bnapi2;
944 }
945
946 rc = bnge_alloc_ring_stats(bn);
947 if (rc)
948 goto err_free_core;
949
950 rc = bnge_alloc_vnics(bn);
951 if (rc)
952 goto err_free_core;
953
954 rc = bnge_alloc_nq_arrays(bn);
955 if (rc)
956 goto err_free_core;
957
958 bnge_init_ring_struct(bn);
959
960 rc = bnge_alloc_rx_rings(bn);
961 if (rc)
962 goto err_free_core;
963
964 rc = bnge_alloc_tx_rings(bn);
965 if (rc)
966 goto err_free_core;
967
968 rc = bnge_alloc_nq_tree(bn);
969 if (rc)
970 goto err_free_core;
971
972 bn->vnic_info[BNGE_VNIC_DEFAULT].flags |= BNGE_VNIC_RSS_FLAG |
973 BNGE_VNIC_MCAST_FLAG |
974 BNGE_VNIC_UCAST_FLAG;
975 rc = bnge_alloc_vnic_attributes(bn);
976 if (rc)
977 goto err_free_core;
978 return 0;
979
980 err_free_core:
981 bnge_free_core(bn);
982 return rc;
983 }
984
bnge_cp_ring_for_rx(struct bnge_rx_ring_info * rxr)985 u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr)
986 {
987 return rxr->rx_cpr->ring_struct.fw_ring_id;
988 }
989
bnge_cp_ring_for_tx(struct bnge_tx_ring_info * txr)990 u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
991 {
992 return txr->tx_cpr->ring_struct.fw_ring_id;
993 }
994
bnge_db_nq_arm(struct bnge_net * bn,struct bnge_db_info * db,u32 idx)995 static void bnge_db_nq_arm(struct bnge_net *bn,
996 struct bnge_db_info *db, u32 idx)
997 {
998 bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_ARM |
999 DB_RING_IDX(db, idx), db->doorbell);
1000 }
1001
bnge_db_nq(struct bnge_net * bn,struct bnge_db_info * db,u32 idx)1002 static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
1003 {
1004 bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
1005 DB_RING_IDX(db, idx), db->doorbell);
1006 }
1007
bnge_db_cq(struct bnge_net * bn,struct bnge_db_info * db,u32 idx)1008 static void bnge_db_cq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
1009 {
1010 bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_CQ_ARMALL |
1011 DB_RING_IDX(db, idx), db->doorbell);
1012 }
1013
bnge_cp_num_to_irq_num(struct bnge_net * bn,int n)1014 static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
1015 {
1016 struct bnge_napi *bnapi = bn->bnapi[n];
1017 struct bnge_nq_ring_info *nqr;
1018
1019 nqr = &bnapi->nq_ring;
1020
1021 return nqr->ring_struct.map_idx;
1022 }
1023
bnge_init_nq_tree(struct bnge_net * bn)1024 static void bnge_init_nq_tree(struct bnge_net *bn)
1025 {
1026 struct bnge_dev *bd = bn->bd;
1027 int i, j;
1028
1029 for (i = 0; i < bd->nq_nr_rings; i++) {
1030 struct bnge_nq_ring_info *nqr = &bn->bnapi[i]->nq_ring;
1031 struct bnge_ring_struct *ring = &nqr->ring_struct;
1032
1033 ring->fw_ring_id = INVALID_HW_RING_ID;
1034 for (j = 0; j < nqr->cp_ring_count; j++) {
1035 struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
1036
1037 ring = &cpr->ring_struct;
1038 ring->fw_ring_id = INVALID_HW_RING_ID;
1039 }
1040 }
1041 }
1042
__bnge_alloc_rx_netmem(struct bnge_net * bn,dma_addr_t * mapping,struct bnge_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)1043 static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
1044 dma_addr_t *mapping,
1045 struct bnge_rx_ring_info *rxr,
1046 unsigned int *offset,
1047 gfp_t gfp)
1048 {
1049 netmem_ref netmem;
1050
1051 if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
1052 netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
1053 BNGE_RX_PAGE_SIZE, gfp);
1054 } else {
1055 netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
1056 *offset = 0;
1057 }
1058 if (!netmem)
1059 return 0;
1060
1061 *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
1062 return netmem;
1063 }
1064
__bnge_alloc_rx_frag(struct bnge_net * bn,dma_addr_t * mapping,struct bnge_rx_ring_info * rxr,gfp_t gfp)1065 u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
1066 struct bnge_rx_ring_info *rxr,
1067 gfp_t gfp)
1068 {
1069 unsigned int offset;
1070 struct page *page;
1071
1072 page = page_pool_alloc_frag(rxr->head_pool, &offset,
1073 bn->rx_buf_size, gfp);
1074 if (!page)
1075 return NULL;
1076
1077 *mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
1078 return page_address(page) + offset;
1079 }
1080
bnge_alloc_rx_data(struct bnge_net * bn,struct bnge_rx_ring_info * rxr,u16 prod,gfp_t gfp)1081 int bnge_alloc_rx_data(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
1082 u16 prod, gfp_t gfp)
1083 {
1084 struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
1085 struct rx_bd *rxbd;
1086 dma_addr_t mapping;
1087 u8 *data;
1088
1089 rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
1090 data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
1091 if (!data)
1092 return -ENOMEM;
1093
1094 rx_buf->data = data;
1095 rx_buf->data_ptr = data + bn->rx_offset;
1096 rx_buf->mapping = mapping;
1097
1098 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1099
1100 return 0;
1101 }
1102
bnge_alloc_one_rx_ring_bufs(struct bnge_net * bn,struct bnge_rx_ring_info * rxr,int ring_nr)1103 static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
1104 struct bnge_rx_ring_info *rxr,
1105 int ring_nr)
1106 {
1107 u32 prod = rxr->rx_prod;
1108 int i, rc = 0;
1109
1110 for (i = 0; i < bn->rx_ring_size; i++) {
1111 rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL);
1112 if (rc)
1113 break;
1114 prod = NEXT_RX(prod);
1115 }
1116
1117 /* Abort if not a single buffer can be allocated */
1118 if (rc && !i) {
1119 netdev_err(bn->netdev,
1120 "RX ring %d: allocated %d/%d buffers, abort\n",
1121 ring_nr, i, bn->rx_ring_size);
1122 return rc;
1123 }
1124
1125 rxr->rx_prod = prod;
1126
1127 if (i < bn->rx_ring_size)
1128 netdev_warn(bn->netdev,
1129 "RX ring %d: allocated %d/%d buffers, continuing\n",
1130 ring_nr, i, bn->rx_ring_size);
1131 return 0;
1132 }
1133
bnge_find_next_agg_idx(struct bnge_rx_ring_info * rxr,u16 idx)1134 u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
1135 {
1136 u16 next, max = rxr->rx_agg_bmap_size;
1137
1138 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1139 if (next >= max)
1140 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1141 return next;
1142 }
1143
bnge_alloc_rx_netmem(struct bnge_net * bn,struct bnge_rx_ring_info * rxr,u16 prod,gfp_t gfp)1144 int bnge_alloc_rx_netmem(struct bnge_net *bn,
1145 struct bnge_rx_ring_info *rxr,
1146 u16 prod, gfp_t gfp)
1147 {
1148 struct bnge_sw_rx_agg_bd *rx_agg_buf;
1149 u16 sw_prod = rxr->rx_sw_agg_prod;
1150 unsigned int offset = 0;
1151 struct rx_bd *rxbd;
1152 dma_addr_t mapping;
1153 netmem_ref netmem;
1154
1155 rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)];
1156 netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
1157 if (!netmem)
1158 return -ENOMEM;
1159
1160 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1161 sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
1162
1163 __set_bit(sw_prod, rxr->rx_agg_bmap);
1164 rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod];
1165 rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
1166
1167 rx_agg_buf->netmem = netmem;
1168 rx_agg_buf->offset = offset;
1169 rx_agg_buf->mapping = mapping;
1170 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1171 rxbd->rx_bd_opaque = sw_prod;
1172 return 0;
1173 }
1174
bnge_alloc_one_agg_ring_bufs(struct bnge_net * bn,struct bnge_rx_ring_info * rxr,int ring_nr)1175 static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn,
1176 struct bnge_rx_ring_info *rxr,
1177 int ring_nr)
1178 {
1179 u32 prod = rxr->rx_agg_prod;
1180 int i, rc = 0;
1181
1182 for (i = 0; i < bn->rx_agg_ring_size; i++) {
1183 rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL);
1184 if (rc)
1185 break;
1186 prod = NEXT_RX_AGG(prod);
1187 }
1188
1189 if (rc && i < MAX_SKB_FRAGS) {
1190 netdev_err(bn->netdev,
1191 "Agg ring %d: allocated %d/%d buffers (min %d), abort\n",
1192 ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS);
1193 goto err_free_one_agg_ring_bufs;
1194 }
1195
1196 rxr->rx_agg_prod = prod;
1197
1198 if (i < bn->rx_agg_ring_size)
1199 netdev_warn(bn->netdev,
1200 "Agg ring %d: allocated %d/%d buffers, continuing\n",
1201 ring_nr, i, bn->rx_agg_ring_size);
1202 return 0;
1203
1204 err_free_one_agg_ring_bufs:
1205 bnge_free_one_agg_ring_bufs(bn, rxr);
1206 return -ENOMEM;
1207 }
1208
bnge_alloc_one_tpa_info_data(struct bnge_net * bn,struct bnge_rx_ring_info * rxr)1209 static int bnge_alloc_one_tpa_info_data(struct bnge_net *bn,
1210 struct bnge_rx_ring_info *rxr)
1211 {
1212 dma_addr_t mapping;
1213 u8 *data;
1214 int i;
1215
1216 for (i = 0; i < bn->max_tpa; i++) {
1217 data = __bnge_alloc_rx_frag(bn, &mapping, rxr,
1218 GFP_KERNEL);
1219 if (!data)
1220 goto err_free_tpa_info_data;
1221
1222 rxr->rx_tpa[i].data = data;
1223 rxr->rx_tpa[i].data_ptr = data + bn->rx_offset;
1224 rxr->rx_tpa[i].mapping = mapping;
1225 }
1226 return 0;
1227
1228 err_free_tpa_info_data:
1229 bnge_free_one_tpa_info_data(bn, rxr);
1230 return -ENOMEM;
1231 }
1232
bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net * bn,int ring_nr)1233 static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
1234 {
1235 struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
1236 int rc;
1237
1238 rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr);
1239 if (rc)
1240 return rc;
1241
1242 if (bnge_is_agg_reqd(bn->bd)) {
1243 rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr);
1244 if (rc)
1245 goto err_free_one_rx_ring_bufs;
1246 }
1247
1248 if (rxr->rx_tpa) {
1249 rc = bnge_alloc_one_tpa_info_data(bn, rxr);
1250 if (rc)
1251 goto err_free_one_agg_ring_bufs;
1252 }
1253
1254 return 0;
1255
1256 err_free_one_agg_ring_bufs:
1257 bnge_free_one_agg_ring_bufs(bn, rxr);
1258 err_free_one_rx_ring_bufs:
1259 bnge_free_one_rx_ring_bufs(bn, rxr);
1260 return rc;
1261 }
1262
bnge_init_rxbd_pages(struct bnge_ring_struct * ring,u32 type)1263 static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type)
1264 {
1265 struct rx_bd **rx_desc_ring;
1266 u32 prod;
1267 int i;
1268
1269 rx_desc_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
1270 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
1271 struct rx_bd *rxbd = rx_desc_ring[i];
1272 int j;
1273
1274 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
1275 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
1276 rxbd->rx_bd_opaque = prod;
1277 }
1278 }
1279 }
1280
bnge_init_one_rx_ring_rxbd(struct bnge_net * bn,struct bnge_rx_ring_info * rxr)1281 static void bnge_init_one_rx_ring_rxbd(struct bnge_net *bn,
1282 struct bnge_rx_ring_info *rxr)
1283 {
1284 struct bnge_ring_struct *ring;
1285 u32 type;
1286
1287 type = (bn->rx_buf_use_size << RX_BD_LEN_SHIFT) |
1288 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
1289
1290 if (NET_IP_ALIGN == 2)
1291 type |= RX_BD_FLAGS_SOP;
1292
1293 ring = &rxr->rx_ring_struct;
1294 bnge_init_rxbd_pages(ring, type);
1295 ring->fw_ring_id = INVALID_HW_RING_ID;
1296 }
1297
bnge_init_one_agg_ring_rxbd(struct bnge_net * bn,struct bnge_rx_ring_info * rxr)1298 static void bnge_init_one_agg_ring_rxbd(struct bnge_net *bn,
1299 struct bnge_rx_ring_info *rxr)
1300 {
1301 struct bnge_ring_struct *ring;
1302 u32 type;
1303
1304 ring = &rxr->rx_agg_ring_struct;
1305 ring->fw_ring_id = INVALID_HW_RING_ID;
1306 if (bnge_is_agg_reqd(bn->bd)) {
1307 type = ((u32)BNGE_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
1308 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
1309
1310 bnge_init_rxbd_pages(ring, type);
1311 }
1312 }
1313
bnge_init_one_rx_ring_pair(struct bnge_net * bn,int ring_nr)1314 static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr)
1315 {
1316 struct bnge_rx_ring_info *rxr;
1317
1318 rxr = &bn->rx_ring[ring_nr];
1319 bnge_init_one_rx_ring_rxbd(bn, rxr);
1320
1321 netif_queue_set_napi(bn->netdev, ring_nr, NETDEV_QUEUE_TYPE_RX,
1322 &rxr->bnapi->napi);
1323
1324 bnge_init_one_agg_ring_rxbd(bn, rxr);
1325 }
1326
bnge_alloc_rx_ring_pair_bufs(struct bnge_net * bn)1327 static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn)
1328 {
1329 int i, rc;
1330
1331 for (i = 0; i < bn->bd->rx_nr_rings; i++) {
1332 rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i);
1333 if (rc)
1334 goto err_free_rx_ring_pair_bufs;
1335 }
1336 return 0;
1337
1338 err_free_rx_ring_pair_bufs:
1339 bnge_free_rx_ring_pair_bufs(bn);
1340 return rc;
1341 }
1342
bnge_init_rx_rings(struct bnge_net * bn)1343 static void bnge_init_rx_rings(struct bnge_net *bn)
1344 {
1345 int i;
1346
1347 #define BNGE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
1348 #define BNGE_RX_DMA_OFFSET NET_SKB_PAD
1349 bn->rx_offset = BNGE_RX_OFFSET;
1350 bn->rx_dma_offset = BNGE_RX_DMA_OFFSET;
1351
1352 for (i = 0; i < bn->bd->rx_nr_rings; i++)
1353 bnge_init_one_rx_ring_pair(bn, i);
1354 }
1355
bnge_init_tx_rings(struct bnge_net * bn)1356 static void bnge_init_tx_rings(struct bnge_net *bn)
1357 {
1358 int i;
1359
1360 bn->tx_wake_thresh = max(bn->tx_ring_size / 2, BNGE_MIN_TX_DESC_CNT);
1361
1362 for (i = 0; i < bn->bd->tx_nr_rings; i++) {
1363 struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
1364 struct bnge_ring_struct *ring = &txr->tx_ring_struct;
1365
1366 ring->fw_ring_id = INVALID_HW_RING_ID;
1367
1368 netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX,
1369 &txr->bnapi->napi);
1370 }
1371 }
1372
bnge_init_vnics(struct bnge_net * bn)1373 static void bnge_init_vnics(struct bnge_net *bn)
1374 {
1375 struct bnge_vnic_info *vnic0 = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1376 int i;
1377
1378 for (i = 0; i < bn->nr_vnics; i++) {
1379 struct bnge_vnic_info *vnic = &bn->vnic_info[i];
1380 int j;
1381
1382 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1383 vnic->vnic_id = i;
1384 for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++)
1385 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
1386
1387 if (bn->vnic_info[i].rss_hash_key) {
1388 if (i == BNGE_VNIC_DEFAULT) {
1389 u8 *key = (void *)vnic->rss_hash_key;
1390 int k;
1391
1392 if (!bn->rss_hash_key_valid &&
1393 !bn->rss_hash_key_updated) {
1394 get_random_bytes(bn->rss_hash_key,
1395 HW_HASH_KEY_SIZE);
1396 bn->rss_hash_key_updated = true;
1397 }
1398
1399 memcpy(vnic->rss_hash_key, bn->rss_hash_key,
1400 HW_HASH_KEY_SIZE);
1401
1402 if (!bn->rss_hash_key_updated)
1403 continue;
1404
1405 bn->rss_hash_key_updated = false;
1406 bn->rss_hash_key_valid = true;
1407
1408 bn->toeplitz_prefix = 0;
1409 for (k = 0; k < 8; k++) {
1410 bn->toeplitz_prefix <<= 8;
1411 bn->toeplitz_prefix |= key[k];
1412 }
1413 } else {
1414 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
1415 HW_HASH_KEY_SIZE);
1416 }
1417 }
1418 }
1419 }
1420
bnge_set_db_mask(struct bnge_net * bn,struct bnge_db_info * db,u32 ring_type)1421 static void bnge_set_db_mask(struct bnge_net *bn, struct bnge_db_info *db,
1422 u32 ring_type)
1423 {
1424 switch (ring_type) {
1425 case HWRM_RING_ALLOC_TX:
1426 db->db_ring_mask = bn->tx_ring_mask;
1427 break;
1428 case HWRM_RING_ALLOC_RX:
1429 db->db_ring_mask = bn->rx_ring_mask;
1430 break;
1431 case HWRM_RING_ALLOC_AGG:
1432 db->db_ring_mask = bn->rx_agg_ring_mask;
1433 break;
1434 case HWRM_RING_ALLOC_CMPL:
1435 case HWRM_RING_ALLOC_NQ:
1436 db->db_ring_mask = bn->cp_ring_mask;
1437 break;
1438 }
1439 db->db_epoch_mask = db->db_ring_mask + 1;
1440 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
1441 }
1442
bnge_set_db(struct bnge_net * bn,struct bnge_db_info * db,u32 ring_type,u32 map_idx,u32 xid)1443 static void bnge_set_db(struct bnge_net *bn, struct bnge_db_info *db,
1444 u32 ring_type, u32 map_idx, u32 xid)
1445 {
1446 struct bnge_dev *bd = bn->bd;
1447
1448 switch (ring_type) {
1449 case HWRM_RING_ALLOC_TX:
1450 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
1451 break;
1452 case HWRM_RING_ALLOC_RX:
1453 case HWRM_RING_ALLOC_AGG:
1454 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
1455 break;
1456 case HWRM_RING_ALLOC_CMPL:
1457 db->db_key64 = DBR_PATH_L2;
1458 break;
1459 case HWRM_RING_ALLOC_NQ:
1460 db->db_key64 = DBR_PATH_L2;
1461 break;
1462 }
1463 db->db_key64 |= ((u64)xid << DBR_XID_SFT) | DBR_VALID;
1464
1465 db->doorbell = bd->bar1 + bd->db_offset;
1466 bnge_set_db_mask(bn, db, ring_type);
1467 }
1468
bnge_hwrm_cp_ring_alloc(struct bnge_net * bn,struct bnge_cp_ring_info * cpr)1469 static int bnge_hwrm_cp_ring_alloc(struct bnge_net *bn,
1470 struct bnge_cp_ring_info *cpr)
1471 {
1472 const u32 type = HWRM_RING_ALLOC_CMPL;
1473 struct bnge_napi *bnapi = cpr->bnapi;
1474 struct bnge_ring_struct *ring;
1475 u32 map_idx = bnapi->index;
1476 int rc;
1477
1478 ring = &cpr->ring_struct;
1479 ring->handle = BNGE_SET_NQ_HDL(cpr);
1480 rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1481 if (rc)
1482 return rc;
1483
1484 bnge_set_db(bn, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
1485 bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
1486
1487 return 0;
1488 }
1489
bnge_hwrm_tx_ring_alloc(struct bnge_net * bn,struct bnge_tx_ring_info * txr,u32 tx_idx)1490 static int bnge_hwrm_tx_ring_alloc(struct bnge_net *bn,
1491 struct bnge_tx_ring_info *txr, u32 tx_idx)
1492 {
1493 struct bnge_ring_struct *ring = &txr->tx_ring_struct;
1494 const u32 type = HWRM_RING_ALLOC_TX;
1495 int rc;
1496
1497 rc = hwrm_ring_alloc_send_msg(bn, ring, type, tx_idx);
1498 if (rc)
1499 return rc;
1500
1501 bnge_set_db(bn, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
1502
1503 return 0;
1504 }
1505
bnge_hwrm_rx_agg_ring_alloc(struct bnge_net * bn,struct bnge_rx_ring_info * rxr)1506 static int bnge_hwrm_rx_agg_ring_alloc(struct bnge_net *bn,
1507 struct bnge_rx_ring_info *rxr)
1508 {
1509 struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
1510 u32 type = HWRM_RING_ALLOC_AGG;
1511 struct bnge_dev *bd = bn->bd;
1512 u32 grp_idx = ring->grp_idx;
1513 u32 map_idx;
1514 int rc;
1515
1516 map_idx = grp_idx + bd->rx_nr_rings;
1517 rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1518 if (rc)
1519 return rc;
1520
1521 bnge_set_db(bn, &rxr->rx_agg_db, type, map_idx,
1522 ring->fw_ring_id);
1523 bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
1524 bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
1525 bn->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
1526
1527 return 0;
1528 }
1529
bnge_hwrm_rx_ring_alloc(struct bnge_net * bn,struct bnge_rx_ring_info * rxr)1530 static int bnge_hwrm_rx_ring_alloc(struct bnge_net *bn,
1531 struct bnge_rx_ring_info *rxr)
1532 {
1533 struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
1534 struct bnge_napi *bnapi = rxr->bnapi;
1535 u32 type = HWRM_RING_ALLOC_RX;
1536 u32 map_idx = bnapi->index;
1537 int rc;
1538
1539 rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1540 if (rc)
1541 return rc;
1542
1543 bnge_set_db(bn, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
1544 bn->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
1545
1546 return 0;
1547 }
1548
bnge_hwrm_ring_alloc(struct bnge_net * bn)1549 static int bnge_hwrm_ring_alloc(struct bnge_net *bn)
1550 {
1551 struct bnge_dev *bd = bn->bd;
1552 bool agg_rings;
1553 int i, rc = 0;
1554
1555 agg_rings = !!(bnge_is_agg_reqd(bd));
1556 for (i = 0; i < bd->nq_nr_rings; i++) {
1557 struct bnge_napi *bnapi = bn->bnapi[i];
1558 struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
1559 struct bnge_ring_struct *ring = &nqr->ring_struct;
1560 u32 type = HWRM_RING_ALLOC_NQ;
1561 u32 map_idx = ring->map_idx;
1562 unsigned int vector;
1563
1564 vector = bd->irq_tbl[map_idx].vector;
1565 disable_irq_nosync(vector);
1566 rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1567 if (rc) {
1568 enable_irq(vector);
1569 goto err_out;
1570 }
1571 bnge_set_db(bn, &nqr->nq_db, type, map_idx, ring->fw_ring_id);
1572 bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
1573 enable_irq(vector);
1574 bn->grp_info[i].nq_fw_ring_id = ring->fw_ring_id;
1575
1576 if (!i) {
1577 rc = bnge_hwrm_set_async_event_cr(bd, ring->fw_ring_id);
1578 if (rc)
1579 netdev_warn(bn->netdev, "Failed to set async event completion ring.\n");
1580 }
1581 }
1582
1583 for (i = 0; i < bd->tx_nr_rings; i++) {
1584 struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
1585
1586 rc = bnge_hwrm_cp_ring_alloc(bn, txr->tx_cpr);
1587 if (rc)
1588 goto err_out;
1589 rc = bnge_hwrm_tx_ring_alloc(bn, txr, i);
1590 if (rc)
1591 goto err_out;
1592 }
1593
1594 for (i = 0; i < bd->rx_nr_rings; i++) {
1595 struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
1596 struct bnge_cp_ring_info *cpr;
1597 struct bnge_ring_struct *ring;
1598 struct bnge_napi *bnapi;
1599 u32 map_idx, type;
1600
1601 rc = bnge_hwrm_rx_ring_alloc(bn, rxr);
1602 if (rc)
1603 goto err_out;
1604 /* If we have agg rings, post agg buffers first. */
1605 if (!agg_rings)
1606 bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
1607
1608 cpr = rxr->rx_cpr;
1609 bnapi = rxr->bnapi;
1610 type = HWRM_RING_ALLOC_CMPL;
1611 map_idx = bnapi->index;
1612
1613 ring = &cpr->ring_struct;
1614 ring->handle = BNGE_SET_NQ_HDL(cpr);
1615 rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
1616 if (rc)
1617 goto err_out;
1618 bnge_set_db(bn, &cpr->cp_db, type, map_idx,
1619 ring->fw_ring_id);
1620 bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
1621 }
1622
1623 if (agg_rings) {
1624 for (i = 0; i < bd->rx_nr_rings; i++) {
1625 rc = bnge_hwrm_rx_agg_ring_alloc(bn, &bn->rx_ring[i]);
1626 if (rc)
1627 goto err_out;
1628 }
1629 }
1630 err_out:
1631 return rc;
1632 }
1633
bnge_fill_hw_rss_tbl(struct bnge_net * bn,struct bnge_vnic_info * vnic)1634 void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic)
1635 {
1636 __le16 *ring_tbl = vnic->rss_table;
1637 struct bnge_rx_ring_info *rxr;
1638 struct bnge_dev *bd = bn->bd;
1639 u16 tbl_size, i;
1640
1641 tbl_size = bnge_get_rxfh_indir_size(bd);
1642
1643 for (i = 0; i < tbl_size; i++) {
1644 u16 ring_id, j;
1645
1646 j = bd->rss_indir_tbl[i];
1647 rxr = &bn->rx_ring[j];
1648
1649 ring_id = rxr->rx_ring_struct.fw_ring_id;
1650 *ring_tbl++ = cpu_to_le16(ring_id);
1651 ring_id = bnge_cp_ring_for_rx(rxr);
1652 *ring_tbl++ = cpu_to_le16(ring_id);
1653 }
1654 }
1655
bnge_hwrm_vnic_rss_cfg(struct bnge_net * bn,struct bnge_vnic_info * vnic)1656 static int bnge_hwrm_vnic_rss_cfg(struct bnge_net *bn,
1657 struct bnge_vnic_info *vnic)
1658 {
1659 int rc;
1660
1661 rc = bnge_hwrm_vnic_set_rss(bn, vnic, true);
1662 if (rc) {
1663 netdev_err(bn->netdev, "hwrm vnic %d set rss failure rc: %d\n",
1664 vnic->vnic_id, rc);
1665 return rc;
1666 }
1667 rc = bnge_hwrm_vnic_cfg(bn, vnic);
1668 if (rc)
1669 netdev_err(bn->netdev, "hwrm vnic %d cfg failure rc: %d\n",
1670 vnic->vnic_id, rc);
1671 return rc;
1672 }
1673
bnge_setup_vnic(struct bnge_net * bn,struct bnge_vnic_info * vnic)1674 static int bnge_setup_vnic(struct bnge_net *bn, struct bnge_vnic_info *vnic)
1675 {
1676 struct bnge_dev *bd = bn->bd;
1677 int rc, i, nr_ctxs;
1678
1679 nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
1680 for (i = 0; i < nr_ctxs; i++) {
1681 rc = bnge_hwrm_vnic_ctx_alloc(bd, vnic, i);
1682 if (rc) {
1683 netdev_err(bn->netdev, "hwrm vnic %d ctx %d alloc failure rc: %d\n",
1684 vnic->vnic_id, i, rc);
1685 return -ENOMEM;
1686 }
1687 bn->rsscos_nr_ctxs++;
1688 }
1689
1690 rc = bnge_hwrm_vnic_rss_cfg(bn, vnic);
1691 if (rc)
1692 return rc;
1693
1694 if (bnge_is_agg_reqd(bd)) {
1695 rc = bnge_hwrm_vnic_set_hds(bn, vnic);
1696 if (rc)
1697 netdev_err(bn->netdev, "hwrm vnic %d set hds failure rc: %d\n",
1698 vnic->vnic_id, rc);
1699 }
1700 return rc;
1701 }
1702
bnge_del_l2_filter(struct bnge_net * bn,struct bnge_l2_filter * fltr)1703 static void bnge_del_l2_filter(struct bnge_net *bn, struct bnge_l2_filter *fltr)
1704 {
1705 if (!refcount_dec_and_test(&fltr->refcnt))
1706 return;
1707 hlist_del_rcu(&fltr->base.hash);
1708 kfree_rcu(fltr, base.rcu);
1709 }
1710
bnge_init_l2_filter(struct bnge_net * bn,struct bnge_l2_filter * fltr,struct bnge_l2_key * key,u32 idx)1711 static void bnge_init_l2_filter(struct bnge_net *bn,
1712 struct bnge_l2_filter *fltr,
1713 struct bnge_l2_key *key, u32 idx)
1714 {
1715 struct hlist_head *head;
1716
1717 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
1718 fltr->l2_key.vlan = key->vlan;
1719 fltr->base.type = BNGE_FLTR_TYPE_L2;
1720
1721 head = &bn->l2_fltr_hash_tbl[idx];
1722 hlist_add_head_rcu(&fltr->base.hash, head);
1723 refcount_set(&fltr->refcnt, 1);
1724 }
1725
__bnge_lookup_l2_filter(struct bnge_net * bn,struct bnge_l2_key * key,u32 idx)1726 static struct bnge_l2_filter *__bnge_lookup_l2_filter(struct bnge_net *bn,
1727 struct bnge_l2_key *key,
1728 u32 idx)
1729 {
1730 struct bnge_l2_filter *fltr;
1731 struct hlist_head *head;
1732
1733 head = &bn->l2_fltr_hash_tbl[idx];
1734 hlist_for_each_entry_rcu(fltr, head, base.hash) {
1735 struct bnge_l2_key *l2_key = &fltr->l2_key;
1736
1737 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
1738 l2_key->vlan == key->vlan)
1739 return fltr;
1740 }
1741 return NULL;
1742 }
1743
bnge_lookup_l2_filter(struct bnge_net * bn,struct bnge_l2_key * key,u32 idx)1744 static struct bnge_l2_filter *bnge_lookup_l2_filter(struct bnge_net *bn,
1745 struct bnge_l2_key *key,
1746 u32 idx)
1747 {
1748 struct bnge_l2_filter *fltr;
1749
1750 rcu_read_lock();
1751 fltr = __bnge_lookup_l2_filter(bn, key, idx);
1752 if (fltr)
1753 refcount_inc(&fltr->refcnt);
1754 rcu_read_unlock();
1755 return fltr;
1756 }
1757
bnge_alloc_l2_filter(struct bnge_net * bn,struct bnge_l2_key * key,gfp_t gfp)1758 static struct bnge_l2_filter *bnge_alloc_l2_filter(struct bnge_net *bn,
1759 struct bnge_l2_key *key,
1760 gfp_t gfp)
1761 {
1762 struct bnge_l2_filter *fltr;
1763 u32 idx;
1764
1765 idx = jhash2(&key->filter_key, BNGE_L2_KEY_SIZE, bn->hash_seed) &
1766 BNGE_L2_FLTR_HASH_MASK;
1767 fltr = bnge_lookup_l2_filter(bn, key, idx);
1768 if (fltr)
1769 return fltr;
1770
1771 fltr = kzalloc_obj(*fltr, gfp);
1772 if (!fltr)
1773 return ERR_PTR(-ENOMEM);
1774
1775 bnge_init_l2_filter(bn, fltr, key, idx);
1776 return fltr;
1777 }
1778
bnge_hwrm_set_vnic_filter(struct bnge_net * bn,u16 vnic_id,u16 idx,const u8 * mac_addr)1779 static int bnge_hwrm_set_vnic_filter(struct bnge_net *bn, u16 vnic_id, u16 idx,
1780 const u8 *mac_addr)
1781 {
1782 struct bnge_l2_filter *fltr;
1783 struct bnge_l2_key key;
1784 int rc;
1785
1786 ether_addr_copy(key.dst_mac_addr, mac_addr);
1787 key.vlan = 0;
1788 fltr = bnge_alloc_l2_filter(bn, &key, GFP_KERNEL);
1789 if (IS_ERR(fltr))
1790 return PTR_ERR(fltr);
1791
1792 fltr->base.fw_vnic_id = bn->vnic_info[vnic_id].fw_vnic_id;
1793 rc = bnge_hwrm_l2_filter_alloc(bn->bd, fltr);
1794 if (rc)
1795 goto err_del_l2_filter;
1796 bn->vnic_info[vnic_id].l2_filters[idx] = fltr;
1797 return rc;
1798
1799 err_del_l2_filter:
1800 bnge_del_l2_filter(bn, fltr);
1801 return rc;
1802 }
1803
bnge_mc_list_updated(struct bnge_net * bn,u32 * rx_mask)1804 static bool bnge_mc_list_updated(struct bnge_net *bn, u32 *rx_mask)
1805 {
1806 struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1807 struct net_device *dev = bn->netdev;
1808 struct netdev_hw_addr *ha;
1809 int mc_count = 0, off = 0;
1810 bool update = false;
1811 u8 *haddr;
1812
1813 netdev_for_each_mc_addr(ha, dev) {
1814 if (mc_count >= BNGE_MAX_MC_ADDRS) {
1815 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1816 vnic->mc_list_count = 0;
1817 return false;
1818 }
1819 haddr = ha->addr;
1820 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
1821 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
1822 update = true;
1823 }
1824 off += ETH_ALEN;
1825 mc_count++;
1826 }
1827 if (mc_count)
1828 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1829
1830 if (mc_count != vnic->mc_list_count) {
1831 vnic->mc_list_count = mc_count;
1832 update = true;
1833 }
1834 return update;
1835 }
1836
bnge_uc_list_updated(struct bnge_net * bn)1837 static bool bnge_uc_list_updated(struct bnge_net *bn)
1838 {
1839 struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1840 struct net_device *dev = bn->netdev;
1841 struct netdev_hw_addr *ha;
1842 int off = 0;
1843
1844 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
1845 return true;
1846
1847 netdev_for_each_uc_addr(ha, dev) {
1848 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
1849 return true;
1850
1851 off += ETH_ALEN;
1852 }
1853 return false;
1854 }
1855
bnge_promisc_ok(struct bnge_net * bn)1856 static bool bnge_promisc_ok(struct bnge_net *bn)
1857 {
1858 return true;
1859 }
1860
bnge_cfg_def_vnic(struct bnge_net * bn)1861 static int bnge_cfg_def_vnic(struct bnge_net *bn)
1862 {
1863 struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
1864 struct net_device *dev = bn->netdev;
1865 struct bnge_dev *bd = bn->bd;
1866 struct netdev_hw_addr *ha;
1867 int i, off = 0, rc;
1868 bool uc_update;
1869
1870 netif_addr_lock_bh(dev);
1871 uc_update = bnge_uc_list_updated(bn);
1872 netif_addr_unlock_bh(dev);
1873
1874 if (!uc_update)
1875 goto skip_uc;
1876
1877 for (i = 1; i < vnic->uc_filter_count; i++) {
1878 struct bnge_l2_filter *fltr = vnic->l2_filters[i];
1879
1880 bnge_hwrm_l2_filter_free(bd, fltr);
1881 bnge_del_l2_filter(bn, fltr);
1882 }
1883
1884 vnic->uc_filter_count = 1;
1885
1886 netif_addr_lock_bh(dev);
1887 if (netdev_uc_count(dev) > (BNGE_MAX_UC_ADDRS - 1)) {
1888 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1889 } else {
1890 netdev_for_each_uc_addr(ha, dev) {
1891 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
1892 off += ETH_ALEN;
1893 vnic->uc_filter_count++;
1894 }
1895 }
1896 netif_addr_unlock_bh(dev);
1897
1898 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
1899 rc = bnge_hwrm_set_vnic_filter(bn, 0, i, vnic->uc_list + off);
1900 if (rc) {
1901 netdev_err(dev, "HWRM vnic filter failure rc: %d\n", rc);
1902 vnic->uc_filter_count = i;
1903 return rc;
1904 }
1905 }
1906
1907 skip_uc:
1908 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
1909 !bnge_promisc_ok(bn))
1910 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1911 rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
1912 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
1913 netdev_info(dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
1914 rc);
1915 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1916 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1917 vnic->mc_list_count = 0;
1918 rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
1919 }
1920 if (rc)
1921 netdev_err(dev, "HWRM cfa l2 rx mask failure rc: %d\n",
1922 rc);
1923
1924 return rc;
1925 }
1926
bnge_disable_int(struct bnge_net * bn)1927 static void bnge_disable_int(struct bnge_net *bn)
1928 {
1929 struct bnge_dev *bd = bn->bd;
1930 int i;
1931
1932 if (!bn->bnapi)
1933 return;
1934
1935 for (i = 0; i < bd->nq_nr_rings; i++) {
1936 struct bnge_napi *bnapi = bn->bnapi[i];
1937 struct bnge_nq_ring_info *nqr;
1938 struct bnge_ring_struct *ring;
1939
1940 nqr = &bnapi->nq_ring;
1941 ring = &nqr->ring_struct;
1942
1943 if (ring->fw_ring_id != INVALID_HW_RING_ID)
1944 bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
1945 }
1946 }
1947
bnge_disable_int_sync(struct bnge_net * bn)1948 static void bnge_disable_int_sync(struct bnge_net *bn)
1949 {
1950 struct bnge_dev *bd = bn->bd;
1951 int i;
1952
1953 bnge_disable_int(bn);
1954 for (i = 0; i < bd->nq_nr_rings; i++) {
1955 int map_idx = bnge_cp_num_to_irq_num(bn, i);
1956
1957 synchronize_irq(bd->irq_tbl[map_idx].vector);
1958 }
1959 }
1960
bnge_enable_int(struct bnge_net * bn)1961 static void bnge_enable_int(struct bnge_net *bn)
1962 {
1963 struct bnge_dev *bd = bn->bd;
1964 int i;
1965
1966 for (i = 0; i < bd->nq_nr_rings; i++) {
1967 struct bnge_napi *bnapi = bn->bnapi[i];
1968 struct bnge_nq_ring_info *nqr;
1969
1970 nqr = &bnapi->nq_ring;
1971 bnge_db_nq_arm(bn, &nqr->nq_db, nqr->nq_raw_cons);
1972 }
1973 }
1974
bnge_disable_napi(struct bnge_net * bn)1975 static void bnge_disable_napi(struct bnge_net *bn)
1976 {
1977 struct bnge_dev *bd = bn->bd;
1978 int i;
1979
1980 if (test_and_set_bit(BNGE_STATE_NAPI_DISABLED, &bn->state))
1981 return;
1982
1983 for (i = 0; i < bd->nq_nr_rings; i++) {
1984 struct bnge_napi *bnapi = bn->bnapi[i];
1985
1986 napi_disable_locked(&bnapi->napi);
1987 }
1988 }
1989
bnge_enable_napi(struct bnge_net * bn)1990 static void bnge_enable_napi(struct bnge_net *bn)
1991 {
1992 struct bnge_dev *bd = bn->bd;
1993 int i;
1994
1995 clear_bit(BNGE_STATE_NAPI_DISABLED, &bn->state);
1996 for (i = 0; i < bd->nq_nr_rings; i++) {
1997 struct bnge_napi *bnapi = bn->bnapi[i];
1998
1999 bnapi->in_reset = false;
2000 bnapi->tx_fault = 0;
2001
2002 napi_enable_locked(&bnapi->napi);
2003 }
2004 }
2005
bnge_hwrm_vnic_free(struct bnge_net * bn)2006 static void bnge_hwrm_vnic_free(struct bnge_net *bn)
2007 {
2008 int i;
2009
2010 for (i = 0; i < bn->nr_vnics; i++)
2011 bnge_hwrm_vnic_free_one(bn->bd, &bn->vnic_info[i]);
2012 }
2013
bnge_hwrm_vnic_ctx_free(struct bnge_net * bn)2014 static void bnge_hwrm_vnic_ctx_free(struct bnge_net *bn)
2015 {
2016 int i, j;
2017
2018 for (i = 0; i < bn->nr_vnics; i++) {
2019 struct bnge_vnic_info *vnic = &bn->vnic_info[i];
2020
2021 for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) {
2022 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
2023 bnge_hwrm_vnic_ctx_free_one(bn->bd, vnic, j);
2024 }
2025 }
2026 bn->rsscos_nr_ctxs = 0;
2027 }
2028
bnge_hwrm_clear_vnic_filter(struct bnge_net * bn)2029 static void bnge_hwrm_clear_vnic_filter(struct bnge_net *bn)
2030 {
2031 struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
2032 int i;
2033
2034 for (i = 0; i < vnic->uc_filter_count; i++) {
2035 struct bnge_l2_filter *fltr = vnic->l2_filters[i];
2036
2037 bnge_hwrm_l2_filter_free(bn->bd, fltr);
2038 bnge_del_l2_filter(bn, fltr);
2039 }
2040
2041 vnic->uc_filter_count = 0;
2042 }
2043
bnge_clear_vnic(struct bnge_net * bn)2044 static void bnge_clear_vnic(struct bnge_net *bn)
2045 {
2046 bnge_hwrm_clear_vnic_filter(bn);
2047 bnge_hwrm_vnic_free(bn);
2048 bnge_hwrm_vnic_ctx_free(bn);
2049 }
2050
bnge_hwrm_rx_ring_free(struct bnge_net * bn,struct bnge_rx_ring_info * rxr,bool close_path)2051 static void bnge_hwrm_rx_ring_free(struct bnge_net *bn,
2052 struct bnge_rx_ring_info *rxr,
2053 bool close_path)
2054 {
2055 struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
2056 u32 grp_idx = rxr->bnapi->index;
2057 u32 cmpl_ring_id;
2058
2059 if (ring->fw_ring_id == INVALID_HW_RING_ID)
2060 return;
2061
2062 cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
2063 hwrm_ring_free_send_msg(bn, ring,
2064 RING_FREE_REQ_RING_TYPE_RX,
2065 close_path ? cmpl_ring_id :
2066 INVALID_HW_RING_ID);
2067 ring->fw_ring_id = INVALID_HW_RING_ID;
2068 bn->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
2069 }
2070
bnge_hwrm_rx_agg_ring_free(struct bnge_net * bn,struct bnge_rx_ring_info * rxr,bool close_path)2071 static void bnge_hwrm_rx_agg_ring_free(struct bnge_net *bn,
2072 struct bnge_rx_ring_info *rxr,
2073 bool close_path)
2074 {
2075 struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
2076 u32 grp_idx = rxr->bnapi->index;
2077 u32 cmpl_ring_id;
2078
2079 if (ring->fw_ring_id == INVALID_HW_RING_ID)
2080 return;
2081
2082 cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
2083 hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_RX_AGG,
2084 close_path ? cmpl_ring_id :
2085 INVALID_HW_RING_ID);
2086 ring->fw_ring_id = INVALID_HW_RING_ID;
2087 bn->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
2088 }
2089
bnge_hwrm_tx_ring_free(struct bnge_net * bn,struct bnge_tx_ring_info * txr,bool close_path)2090 static void bnge_hwrm_tx_ring_free(struct bnge_net *bn,
2091 struct bnge_tx_ring_info *txr,
2092 bool close_path)
2093 {
2094 struct bnge_ring_struct *ring = &txr->tx_ring_struct;
2095 u32 cmpl_ring_id;
2096
2097 if (ring->fw_ring_id == INVALID_HW_RING_ID)
2098 return;
2099
2100 cmpl_ring_id = close_path ? bnge_cp_ring_for_tx(txr) :
2101 INVALID_HW_RING_ID;
2102 hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_TX,
2103 cmpl_ring_id);
2104 ring->fw_ring_id = INVALID_HW_RING_ID;
2105 }
2106
bnge_hwrm_cp_ring_free(struct bnge_net * bn,struct bnge_cp_ring_info * cpr)2107 static void bnge_hwrm_cp_ring_free(struct bnge_net *bn,
2108 struct bnge_cp_ring_info *cpr)
2109 {
2110 struct bnge_ring_struct *ring;
2111
2112 ring = &cpr->ring_struct;
2113 if (ring->fw_ring_id == INVALID_HW_RING_ID)
2114 return;
2115
2116 hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
2117 INVALID_HW_RING_ID);
2118 ring->fw_ring_id = INVALID_HW_RING_ID;
2119 }
2120
bnge_hwrm_ring_free(struct bnge_net * bn,bool close_path)2121 static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
2122 {
2123 struct bnge_dev *bd = bn->bd;
2124 int i;
2125
2126 if (!bn->bnapi)
2127 return;
2128
2129 for (i = 0; i < bd->tx_nr_rings; i++)
2130 bnge_hwrm_tx_ring_free(bn, &bn->tx_ring[i], close_path);
2131
2132 for (i = 0; i < bd->rx_nr_rings; i++) {
2133 bnge_hwrm_rx_ring_free(bn, &bn->rx_ring[i], close_path);
2134 bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
2135 }
2136
2137 /* The completion rings are about to be freed. After that the
2138 * IRQ doorbell will not work anymore. So we need to disable
2139 * IRQ here.
2140 */
2141 bnge_disable_int_sync(bn);
2142
2143 for (i = 0; i < bd->nq_nr_rings; i++) {
2144 struct bnge_napi *bnapi = bn->bnapi[i];
2145 struct bnge_nq_ring_info *nqr;
2146 struct bnge_ring_struct *ring;
2147 int j;
2148
2149 nqr = &bnapi->nq_ring;
2150 for (j = 0; j < nqr->cp_ring_count && nqr->cp_ring_arr; j++)
2151 bnge_hwrm_cp_ring_free(bn, &nqr->cp_ring_arr[j]);
2152
2153 ring = &nqr->ring_struct;
2154 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2155 hwrm_ring_free_send_msg(bn, ring,
2156 RING_FREE_REQ_RING_TYPE_NQ,
2157 INVALID_HW_RING_ID);
2158 ring->fw_ring_id = INVALID_HW_RING_ID;
2159 bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
2160 }
2161 }
2162 }
2163
bnge_setup_msix(struct bnge_net * bn)2164 static void bnge_setup_msix(struct bnge_net *bn)
2165 {
2166 struct net_device *dev = bn->netdev;
2167 struct bnge_dev *bd = bn->bd;
2168 int len, i;
2169
2170 len = sizeof(bd->irq_tbl[0].name);
2171 for (i = 0; i < bd->nq_nr_rings; i++) {
2172 int map_idx = bnge_cp_num_to_irq_num(bn, i);
2173 char *attr;
2174
2175 if (bd->flags & BNGE_EN_SHARED_CHNL)
2176 attr = "TxRx";
2177 else if (i < bd->rx_nr_rings)
2178 attr = "rx";
2179 else
2180 attr = "tx";
2181
2182 snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
2183 attr, i);
2184 bd->irq_tbl[map_idx].handler = bnge_msix;
2185 }
2186 }
2187
bnge_setup_interrupts(struct bnge_net * bn)2188 static int bnge_setup_interrupts(struct bnge_net *bn)
2189 {
2190 struct net_device *dev = bn->netdev;
2191 struct bnge_dev *bd = bn->bd;
2192
2193 bnge_setup_msix(bn);
2194
2195 return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
2196 }
2197
bnge_hwrm_resource_free(struct bnge_net * bn,bool close_path)2198 static void bnge_hwrm_resource_free(struct bnge_net *bn, bool close_path)
2199 {
2200 bnge_clear_vnic(bn);
2201 bnge_hwrm_ring_free(bn, close_path);
2202 bnge_hwrm_stat_ctx_free(bn);
2203 }
2204
bnge_free_irq(struct bnge_net * bn)2205 static void bnge_free_irq(struct bnge_net *bn)
2206 {
2207 struct bnge_dev *bd = bn->bd;
2208 struct bnge_irq *irq;
2209 int i;
2210
2211 for (i = 0; i < bd->nq_nr_rings; i++) {
2212 int map_idx = bnge_cp_num_to_irq_num(bn, i);
2213
2214 irq = &bd->irq_tbl[map_idx];
2215 if (irq->requested) {
2216 if (irq->have_cpumask) {
2217 irq_set_affinity_hint(irq->vector, NULL);
2218 free_cpumask_var(irq->cpu_mask);
2219 irq->have_cpumask = 0;
2220 }
2221 free_irq(irq->vector, bn->bnapi[i]);
2222 }
2223
2224 irq->requested = 0;
2225 }
2226 }
2227
bnge_request_irq(struct bnge_net * bn)2228 static int bnge_request_irq(struct bnge_net *bn)
2229 {
2230 struct bnge_dev *bd = bn->bd;
2231 int i, rc;
2232
2233 rc = bnge_setup_interrupts(bn);
2234 if (rc) {
2235 netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
2236 return rc;
2237 }
2238 for (i = 0; i < bd->nq_nr_rings; i++) {
2239 int map_idx = bnge_cp_num_to_irq_num(bn, i);
2240 struct bnge_irq *irq = &bd->irq_tbl[map_idx];
2241
2242 rc = request_irq(irq->vector, irq->handler, 0, irq->name,
2243 bn->bnapi[i]);
2244 if (rc)
2245 goto err_free_irq;
2246
2247 netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
2248 irq->requested = 1;
2249
2250 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
2251 int numa_node = dev_to_node(&bd->pdev->dev);
2252
2253 irq->have_cpumask = 1;
2254 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2255 irq->cpu_mask);
2256 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
2257 if (rc) {
2258 netdev_warn(bn->netdev,
2259 "Set affinity failed, IRQ = %d\n",
2260 irq->vector);
2261 goto err_free_irq;
2262 }
2263 }
2264 }
2265 return 0;
2266
2267 err_free_irq:
2268 bnge_free_irq(bn);
2269 return rc;
2270 }
2271
bnge_set_tpa(struct bnge_net * bn,bool set_tpa)2272 static int bnge_set_tpa(struct bnge_net *bn, bool set_tpa)
2273 {
2274 u32 tpa_flags = 0;
2275 int rc, i;
2276
2277 if (set_tpa)
2278 tpa_flags = bn->priv_flags & BNGE_NET_EN_TPA;
2279 else if (BNGE_NO_FW_ACCESS(bn->bd))
2280 return 0;
2281 for (i = 0; i < bn->nr_vnics; i++) {
2282 rc = bnge_hwrm_vnic_set_tpa(bn->bd, &bn->vnic_info[i],
2283 tpa_flags);
2284 if (rc) {
2285 netdev_err(bn->netdev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
2286 i, rc);
2287 return rc;
2288 }
2289 }
2290 return 0;
2291 }
2292
bnge_init_chip(struct bnge_net * bn)2293 static int bnge_init_chip(struct bnge_net *bn)
2294 {
2295 struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
2296 struct bnge_dev *bd = bn->bd;
2297 int rc;
2298
2299 #define BNGE_DEF_STATS_COAL_TICKS 1000000
2300 bn->stats_coal_ticks = BNGE_DEF_STATS_COAL_TICKS;
2301
2302 rc = bnge_hwrm_stat_ctx_alloc(bn);
2303 if (rc) {
2304 netdev_err(bn->netdev, "hwrm stat ctx alloc failure rc: %d\n", rc);
2305 goto err_out;
2306 }
2307
2308 rc = bnge_hwrm_ring_alloc(bn);
2309 if (rc) {
2310 netdev_err(bn->netdev, "hwrm ring alloc failure rc: %d\n", rc);
2311 goto err_out;
2312 }
2313
2314 rc = bnge_hwrm_vnic_alloc(bd, vnic, bd->rx_nr_rings);
2315 if (rc) {
2316 netdev_err(bn->netdev, "hwrm vnic alloc failure rc: %d\n", rc);
2317 goto err_out;
2318 }
2319
2320 rc = bnge_setup_vnic(bn, vnic);
2321 if (rc)
2322 goto err_out;
2323
2324 if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
2325 bnge_hwrm_update_rss_hash_cfg(bn);
2326
2327 if (bn->priv_flags & BNGE_NET_EN_TPA) {
2328 rc = bnge_set_tpa(bn, true);
2329 if (rc)
2330 goto err_out;
2331 }
2332
2333 /* Filter for default vnic 0 */
2334 rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
2335 if (rc) {
2336 netdev_err(bn->netdev, "HWRM vnic filter failure rc: %d\n", rc);
2337 goto err_out;
2338 }
2339 vnic->uc_filter_count = 1;
2340
2341 vnic->rx_mask = 0;
2342
2343 if (bn->netdev->flags & IFF_BROADCAST)
2344 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
2345
2346 if (bn->netdev->flags & IFF_PROMISC)
2347 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
2348
2349 if (bn->netdev->flags & IFF_ALLMULTI) {
2350 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
2351 vnic->mc_list_count = 0;
2352 } else if (bn->netdev->flags & IFF_MULTICAST) {
2353 u32 mask = 0;
2354
2355 bnge_mc_list_updated(bn, &mask);
2356 vnic->rx_mask |= mask;
2357 }
2358
2359 rc = bnge_cfg_def_vnic(bn);
2360 if (rc)
2361 goto err_out;
2362 return 0;
2363
2364 err_out:
2365 bnge_hwrm_resource_free(bn, 0);
2366 return rc;
2367 }
2368
bnge_init_napi(struct bnge_net * bn)2369 static void bnge_init_napi(struct bnge_net *bn)
2370 {
2371 struct bnge_dev *bd = bn->bd;
2372 struct bnge_napi *bnapi;
2373 int i;
2374
2375 for (i = 0; i < bd->nq_nr_rings; i++) {
2376 bnapi = bn->bnapi[i];
2377 netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
2378 bnge_napi_poll, bnapi->index);
2379 }
2380 }
2381
bnge_del_napi(struct bnge_net * bn)2382 static void bnge_del_napi(struct bnge_net *bn)
2383 {
2384 struct bnge_dev *bd = bn->bd;
2385 int i;
2386
2387 for (i = 0; i < bd->rx_nr_rings; i++)
2388 netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
2389 for (i = 0; i < bd->tx_nr_rings; i++)
2390 netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
2391
2392 for (i = 0; i < bd->nq_nr_rings; i++) {
2393 struct bnge_napi *bnapi = bn->bnapi[i];
2394
2395 __netif_napi_del_locked(&bnapi->napi);
2396 }
2397
2398 /* Wait for RCU grace period after removing NAPI instances */
2399 synchronize_net();
2400 }
2401
bnge_init_nic(struct bnge_net * bn)2402 static int bnge_init_nic(struct bnge_net *bn)
2403 {
2404 int rc;
2405
2406 bnge_init_nq_tree(bn);
2407
2408 bnge_init_rx_rings(bn);
2409 rc = bnge_alloc_rx_ring_pair_bufs(bn);
2410 if (rc)
2411 return rc;
2412
2413 bnge_init_tx_rings(bn);
2414
2415 rc = bnge_init_ring_grps(bn);
2416 if (rc)
2417 goto err_free_rx_ring_pair_bufs;
2418
2419 bnge_init_vnics(bn);
2420
2421 rc = bnge_init_chip(bn);
2422 if (rc)
2423 goto err_free_ring_grps;
2424 return rc;
2425
2426 err_free_ring_grps:
2427 bnge_free_ring_grps(bn);
2428 return rc;
2429
2430 err_free_rx_ring_pair_bufs:
2431 bnge_free_rx_ring_pair_bufs(bn);
2432 return rc;
2433 }
2434
bnge_tx_disable(struct bnge_net * bn)2435 static void bnge_tx_disable(struct bnge_net *bn)
2436 {
2437 struct bnge_tx_ring_info *txr;
2438 int i;
2439
2440 if (bn->tx_ring) {
2441 for (i = 0; i < bn->bd->tx_nr_rings; i++) {
2442 txr = &bn->tx_ring[i];
2443 WRITE_ONCE(txr->dev_state, BNGE_DEV_STATE_CLOSING);
2444 }
2445 }
2446 /* Make sure napi polls see @dev_state change */
2447 synchronize_net();
2448
2449 if (!bn->netdev)
2450 return;
2451 /* Drop carrier first to prevent TX timeout */
2452 netif_carrier_off(bn->netdev);
2453 /* Stop all TX queues */
2454 netif_tx_disable(bn->netdev);
2455 }
2456
bnge_tx_enable(struct bnge_net * bn)2457 static void bnge_tx_enable(struct bnge_net *bn)
2458 {
2459 struct bnge_tx_ring_info *txr;
2460 int i;
2461
2462 for (i = 0; i < bn->bd->tx_nr_rings; i++) {
2463 txr = &bn->tx_ring[i];
2464 WRITE_ONCE(txr->dev_state, 0);
2465 }
2466 /* Make sure napi polls see @dev_state change */
2467 synchronize_net();
2468 netif_tx_wake_all_queues(bn->netdev);
2469 }
2470
bnge_open_core(struct bnge_net * bn)2471 static int bnge_open_core(struct bnge_net *bn)
2472 {
2473 struct bnge_dev *bd = bn->bd;
2474 int rc;
2475
2476 netif_carrier_off(bn->netdev);
2477
2478 rc = bnge_reserve_rings(bd);
2479 if (rc) {
2480 netdev_err(bn->netdev, "bnge_reserve_rings err: %d\n", rc);
2481 return rc;
2482 }
2483
2484 rc = bnge_alloc_core(bn);
2485 if (rc) {
2486 netdev_err(bn->netdev, "bnge_alloc_core err: %d\n", rc);
2487 return rc;
2488 }
2489
2490 bnge_init_napi(bn);
2491 rc = bnge_request_irq(bn);
2492 if (rc) {
2493 netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
2494 goto err_del_napi;
2495 }
2496
2497 rc = bnge_init_nic(bn);
2498 if (rc) {
2499 netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
2500 goto err_free_irq;
2501 }
2502
2503 bnge_enable_napi(bn);
2504
2505 set_bit(BNGE_STATE_OPEN, &bd->state);
2506
2507 bnge_enable_int(bn);
2508
2509 bnge_tx_enable(bn);
2510 return 0;
2511
2512 err_free_irq:
2513 bnge_free_irq(bn);
2514 err_del_napi:
2515 bnge_del_napi(bn);
2516 bnge_free_core(bn);
2517 return rc;
2518 }
2519
bnge_open(struct net_device * dev)2520 static int bnge_open(struct net_device *dev)
2521 {
2522 struct bnge_net *bn = netdev_priv(dev);
2523 int rc;
2524
2525 rc = bnge_open_core(bn);
2526 if (rc)
2527 netdev_err(dev, "bnge_open_core err: %d\n", rc);
2528
2529 return rc;
2530 }
2531
bnge_shutdown_nic(struct bnge_net * bn)2532 static int bnge_shutdown_nic(struct bnge_net *bn)
2533 {
2534 bnge_hwrm_resource_free(bn, 1);
2535 return 0;
2536 }
2537
bnge_close_core(struct bnge_net * bn)2538 static void bnge_close_core(struct bnge_net *bn)
2539 {
2540 struct bnge_dev *bd = bn->bd;
2541
2542 bnge_tx_disable(bn);
2543
2544 clear_bit(BNGE_STATE_OPEN, &bd->state);
2545 bnge_shutdown_nic(bn);
2546 bnge_disable_napi(bn);
2547 bnge_free_all_rings_bufs(bn);
2548 bnge_free_irq(bn);
2549 bnge_del_napi(bn);
2550
2551 bnge_free_core(bn);
2552 }
2553
bnge_close(struct net_device * dev)2554 static int bnge_close(struct net_device *dev)
2555 {
2556 struct bnge_net *bn = netdev_priv(dev);
2557
2558 bnge_close_core(bn);
2559
2560 return 0;
2561 }
2562
2563 static const struct net_device_ops bnge_netdev_ops = {
2564 .ndo_open = bnge_open,
2565 .ndo_stop = bnge_close,
2566 .ndo_start_xmit = bnge_start_xmit,
2567 .ndo_features_check = bnge_features_check,
2568 };
2569
bnge_init_mac_addr(struct bnge_dev * bd)2570 static void bnge_init_mac_addr(struct bnge_dev *bd)
2571 {
2572 eth_hw_addr_set(bd->netdev, bd->pf.mac_addr);
2573 }
2574
bnge_set_tpa_flags(struct bnge_dev * bd)2575 static void bnge_set_tpa_flags(struct bnge_dev *bd)
2576 {
2577 struct bnge_net *bn = netdev_priv(bd->netdev);
2578
2579 bn->priv_flags &= ~BNGE_NET_EN_TPA;
2580
2581 if (bd->netdev->features & NETIF_F_LRO)
2582 bn->priv_flags |= BNGE_NET_EN_LRO;
2583 else if (bd->netdev->features & NETIF_F_GRO_HW)
2584 bn->priv_flags |= BNGE_NET_EN_GRO;
2585 }
2586
bnge_init_l2_fltr_tbl(struct bnge_net * bn)2587 static void bnge_init_l2_fltr_tbl(struct bnge_net *bn)
2588 {
2589 int i;
2590
2591 for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++)
2592 INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]);
2593 get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed));
2594 }
2595
bnge_set_ring_params(struct bnge_dev * bd)2596 void bnge_set_ring_params(struct bnge_dev *bd)
2597 {
2598 struct bnge_net *bn = netdev_priv(bd->netdev);
2599 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
2600 u32 agg_factor = 0, agg_ring_size = 0;
2601
2602 /* 8 for CRC and VLAN */
2603 rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2604
2605 rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
2606 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2607
2608 ring_size = bn->rx_ring_size;
2609 bn->rx_agg_ring_size = 0;
2610 bn->rx_agg_nr_pages = 0;
2611
2612 if (bn->priv_flags & BNGE_NET_EN_TPA)
2613 agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE);
2614
2615 bn->priv_flags &= ~BNGE_NET_EN_JUMBO;
2616 if (rx_space > PAGE_SIZE) {
2617 u32 jumbo_factor;
2618
2619 bn->priv_flags |= BNGE_NET_EN_JUMBO;
2620 jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT;
2621 if (jumbo_factor > agg_factor)
2622 agg_factor = jumbo_factor;
2623 }
2624 if (agg_factor) {
2625 if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) {
2626 ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA;
2627 netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n",
2628 bn->rx_ring_size, ring_size);
2629 bn->rx_ring_size = ring_size;
2630 }
2631 agg_ring_size = ring_size * agg_factor;
2632
2633 bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size,
2634 RX_DESC_CNT);
2635 if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2636 u32 tmp = agg_ring_size;
2637
2638 bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2639 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2640 netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n",
2641 tmp, agg_ring_size);
2642 }
2643 bn->rx_agg_ring_size = agg_ring_size;
2644 bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2645
2646 rx_size = max3(BNGE_DEFAULT_RX_COPYBREAK,
2647 bn->rx_copybreak,
2648 bn->netdev->cfg_pending->hds_thresh);
2649 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
2650 rx_space = rx_size + NET_SKB_PAD +
2651 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2652 }
2653
2654 bn->rx_buf_use_size = rx_size;
2655 bn->rx_buf_size = rx_space;
2656
2657 bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT);
2658 bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1;
2659
2660 ring_size = bn->tx_ring_size;
2661 bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT);
2662 bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1;
2663
2664 max_rx_cmpl = bn->rx_ring_size;
2665
2666 if (bn->priv_flags & BNGE_NET_EN_TPA)
2667 max_rx_cmpl += bd->max_tpa_v2;
2668 ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size;
2669 bn->cp_ring_size = ring_size;
2670
2671 bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT);
2672 if (bn->cp_nr_pages > MAX_CP_PAGES) {
2673 bn->cp_nr_pages = MAX_CP_PAGES;
2674 bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2675 netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n",
2676 ring_size, bn->cp_ring_size);
2677 }
2678 bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT;
2679 bn->cp_ring_mask = bn->cp_bit - 1;
2680 }
2681
bnge_init_ring_params(struct bnge_net * bn)2682 static void bnge_init_ring_params(struct bnge_net *bn)
2683 {
2684 u32 rx_size;
2685
2686 bn->rx_copybreak = BNGE_DEFAULT_RX_COPYBREAK;
2687 /* Try to fit 4 chunks into a 4k page */
2688 rx_size = SZ_1K -
2689 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2690 bn->netdev->cfg->hds_thresh = max(BNGE_DEFAULT_RX_COPYBREAK, rx_size);
2691 }
2692
bnge_netdev_alloc(struct bnge_dev * bd,int max_irqs)2693 int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
2694 {
2695 struct net_device *netdev;
2696 struct bnge_net *bn;
2697 int rc;
2698
2699 netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE,
2700 max_irqs);
2701 if (!netdev)
2702 return -ENOMEM;
2703
2704 SET_NETDEV_DEV(netdev, bd->dev);
2705 bd->netdev = netdev;
2706
2707 netdev->netdev_ops = &bnge_netdev_ops;
2708
2709 bnge_set_ethtool_ops(netdev);
2710
2711 bn = netdev_priv(netdev);
2712 bn->netdev = netdev;
2713 bn->bd = bd;
2714
2715 netdev->min_mtu = ETH_ZLEN;
2716 netdev->max_mtu = bd->max_mtu;
2717
2718 netdev->hw_features = NETIF_F_IP_CSUM |
2719 NETIF_F_IPV6_CSUM |
2720 NETIF_F_SG |
2721 NETIF_F_TSO |
2722 NETIF_F_TSO6 |
2723 NETIF_F_GSO_UDP_TUNNEL |
2724 NETIF_F_GSO_GRE |
2725 NETIF_F_GSO_IPXIP4 |
2726 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2727 NETIF_F_GSO_GRE_CSUM |
2728 NETIF_F_GSO_PARTIAL |
2729 NETIF_F_RXHASH |
2730 NETIF_F_RXCSUM |
2731 NETIF_F_GRO;
2732
2733 if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
2734 netdev->hw_features |= NETIF_F_GSO_UDP_L4;
2735
2736 if (BNGE_SUPPORTS_TPA(bd))
2737 netdev->hw_features |= NETIF_F_LRO;
2738
2739 netdev->hw_enc_features = NETIF_F_IP_CSUM |
2740 NETIF_F_IPV6_CSUM |
2741 NETIF_F_SG |
2742 NETIF_F_TSO |
2743 NETIF_F_TSO6 |
2744 NETIF_F_GSO_UDP_TUNNEL |
2745 NETIF_F_GSO_GRE |
2746 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2747 NETIF_F_GSO_GRE_CSUM |
2748 NETIF_F_GSO_IPXIP4 |
2749 NETIF_F_GSO_PARTIAL;
2750
2751 if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
2752 netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
2753
2754 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
2755 NETIF_F_GSO_GRE_CSUM;
2756
2757 netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA;
2758 if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP)
2759 netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX;
2760 if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT)
2761 netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX;
2762
2763 if (BNGE_SUPPORTS_TPA(bd))
2764 netdev->hw_features |= NETIF_F_GRO_HW;
2765
2766 netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
2767
2768 if (netdev->features & NETIF_F_GRO_HW)
2769 netdev->features &= ~NETIF_F_LRO;
2770
2771 netdev->priv_flags |= IFF_UNICAST_FLT;
2772
2773 netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
2774 if (bd->tso_max_segs)
2775 netif_set_tso_max_segs(netdev, bd->tso_max_segs);
2776
2777 bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
2778 bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
2779 bn->rx_dir = DMA_FROM_DEVICE;
2780
2781 bnge_set_tpa_flags(bd);
2782 bnge_init_ring_params(bn);
2783 bnge_set_ring_params(bd);
2784
2785 bnge_init_l2_fltr_tbl(bn);
2786 bnge_init_mac_addr(bd);
2787
2788 netdev->request_ops_lock = true;
2789 rc = register_netdev(netdev);
2790 if (rc) {
2791 dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
2792 goto err_netdev;
2793 }
2794
2795 return 0;
2796
2797 err_netdev:
2798 free_netdev(netdev);
2799 return rc;
2800 }
2801
bnge_netdev_free(struct bnge_dev * bd)2802 void bnge_netdev_free(struct bnge_dev *bd)
2803 {
2804 struct net_device *netdev = bd->netdev;
2805
2806 unregister_netdev(netdev);
2807 free_netdev(netdev);
2808 bd->netdev = NULL;
2809 }
2810