1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/ipv6.h>
6 #include <linux/types.h>
7 #include <net/netdev_queues.h>
8
9 #include "fbnic.h"
10 #include "fbnic_netdev.h"
11 #include "fbnic_txrx.h"
12
__fbnic_open(struct fbnic_net * fbn)13 int __fbnic_open(struct fbnic_net *fbn)
14 {
15 struct fbnic_dev *fbd = fbn->fbd;
16 int err;
17
18 err = fbnic_alloc_napi_vectors(fbn);
19 if (err)
20 return err;
21
22 err = fbnic_alloc_resources(fbn);
23 if (err)
24 goto free_napi_vectors;
25
26 err = fbnic_set_netif_queues(fbn);
27 if (err)
28 goto free_resources;
29
30 /* Send ownership message and flush to verify FW has seen it */
31 err = fbnic_fw_xmit_ownership_msg(fbd, true);
32 if (err) {
33 dev_warn(fbd->dev,
34 "Error %d sending host ownership message to the firmware\n",
35 err);
36 goto err_reset_queues;
37 }
38
39 err = fbnic_time_start(fbn);
40 if (err)
41 goto release_ownership;
42
43 err = fbnic_fw_init_heartbeat(fbd, false);
44 if (err)
45 goto time_stop;
46
47 err = fbnic_mac_request_irq(fbd);
48 if (err)
49 goto time_stop;
50
51 /* Pull the BMC config and initialize the RPC */
52 fbnic_bmc_rpc_init(fbd);
53 fbnic_rss_reinit(fbd, fbn);
54
55 phylink_resume(fbn->phylink);
56
57 return 0;
58 time_stop:
59 fbnic_time_stop(fbn);
60 release_ownership:
61 fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
62 err_reset_queues:
63 fbnic_reset_netif_queues(fbn);
64 free_resources:
65 fbnic_free_resources(fbn);
66 free_napi_vectors:
67 fbnic_free_napi_vectors(fbn);
68 return err;
69 }
70
fbnic_open(struct net_device * netdev)71 static int fbnic_open(struct net_device *netdev)
72 {
73 struct fbnic_net *fbn = netdev_priv(netdev);
74 int err;
75
76 fbnic_napi_name_irqs(fbn->fbd);
77
78 err = __fbnic_open(fbn);
79 if (!err)
80 fbnic_up(fbn);
81
82 return err;
83 }
84
fbnic_stop(struct net_device * netdev)85 static int fbnic_stop(struct net_device *netdev)
86 {
87 struct fbnic_net *fbn = netdev_priv(netdev);
88
89 fbnic_mac_free_irq(fbn->fbd);
90 phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd));
91
92 fbnic_down(fbn);
93
94 fbnic_time_stop(fbn);
95 fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
96
97 fbnic_reset_netif_queues(fbn);
98 fbnic_free_resources(fbn);
99 fbnic_free_napi_vectors(fbn);
100
101 return 0;
102 }
103
fbnic_uc_sync(struct net_device * netdev,const unsigned char * addr)104 static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr)
105 {
106 struct fbnic_net *fbn = netdev_priv(netdev);
107 struct fbnic_mac_addr *avail_addr;
108
109 if (WARN_ON(!is_valid_ether_addr(addr)))
110 return -EADDRNOTAVAIL;
111
112 avail_addr = __fbnic_uc_sync(fbn->fbd, addr);
113 if (!avail_addr)
114 return -ENOSPC;
115
116 /* Add type flag indicating this address is in use by the host */
117 set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam);
118
119 return 0;
120 }
121
fbnic_uc_unsync(struct net_device * netdev,const unsigned char * addr)122 static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr)
123 {
124 struct fbnic_net *fbn = netdev_priv(netdev);
125 struct fbnic_dev *fbd = fbn->fbd;
126 int i, ret;
127
128 /* Scan from middle of list to bottom, filling bottom up.
129 * Skip the first entry which is reserved for dev_addr and
130 * leave the last entry to use for promiscuous filtering.
131 */
132 for (i = fbd->mac_addr_boundary, ret = -ENOENT;
133 i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) {
134 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
135
136 if (!ether_addr_equal(mac_addr->value.addr8, addr))
137 continue;
138
139 ret = __fbnic_uc_unsync(mac_addr);
140 }
141
142 return ret;
143 }
144
fbnic_mc_sync(struct net_device * netdev,const unsigned char * addr)145 static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr)
146 {
147 struct fbnic_net *fbn = netdev_priv(netdev);
148 struct fbnic_mac_addr *avail_addr;
149
150 if (WARN_ON(!is_multicast_ether_addr(addr)))
151 return -EADDRNOTAVAIL;
152
153 avail_addr = __fbnic_mc_sync(fbn->fbd, addr);
154 if (!avail_addr)
155 return -ENOSPC;
156
157 /* Add type flag indicating this address is in use by the host */
158 set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam);
159
160 return 0;
161 }
162
fbnic_mc_unsync(struct net_device * netdev,const unsigned char * addr)163 static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
164 {
165 struct fbnic_net *fbn = netdev_priv(netdev);
166 struct fbnic_dev *fbd = fbn->fbd;
167 int i, ret;
168
169 /* Scan from middle of list to top, filling top down.
170 * Skip over the address reserved for the BMC MAC and
171 * exclude index 0 as that belongs to the broadcast address
172 */
173 for (i = fbd->mac_addr_boundary, ret = -ENOENT;
174 --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) {
175 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
176
177 if (!ether_addr_equal(mac_addr->value.addr8, addr))
178 continue;
179
180 ret = __fbnic_mc_unsync(mac_addr);
181 }
182
183 return ret;
184 }
185
__fbnic_set_rx_mode(struct fbnic_dev * fbd,struct netdev_hw_addr_list * uc,struct netdev_hw_addr_list * mc)186 void __fbnic_set_rx_mode(struct fbnic_dev *fbd,
187 struct netdev_hw_addr_list *uc,
188 struct netdev_hw_addr_list *mc)
189 {
190 bool uc_promisc = false, mc_promisc = false;
191 struct net_device *netdev = fbd->netdev;
192 struct fbnic_mac_addr *mac_addr;
193 int err;
194
195 /* Populate host address from dev_addr */
196 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX];
197 if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) ||
198 mac_addr->state != FBNIC_TCAM_S_VALID) {
199 ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr);
200 mac_addr->state = FBNIC_TCAM_S_UPDATE;
201 set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam);
202 }
203
204 /* Populate broadcast address if broadcast is enabled */
205 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
206 if (netdev->flags & IFF_BROADCAST) {
207 if (!is_broadcast_ether_addr(mac_addr->value.addr8) ||
208 mac_addr->state != FBNIC_TCAM_S_VALID) {
209 eth_broadcast_addr(mac_addr->value.addr8);
210 mac_addr->state = FBNIC_TCAM_S_ADD;
211 }
212 set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam);
213 } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
214 __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST);
215 }
216
217 /* Synchronize unicast and multicast address lists */
218 err = __hw_addr_sync_dev(uc, netdev, fbnic_uc_sync, fbnic_uc_unsync);
219 if (err == -ENOSPC)
220 uc_promisc = true;
221 err = __hw_addr_sync_dev(mc, netdev, fbnic_mc_sync, fbnic_mc_unsync);
222 if (err == -ENOSPC)
223 mc_promisc = true;
224
225 uc_promisc |= !!(netdev->flags & IFF_PROMISC);
226 mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
227
228 /* Update the promiscuous rules */
229 fbnic_promisc_sync(fbd, uc_promisc, mc_promisc);
230
231 /* Add rules for BMC all multicast if it is enabled */
232 fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
233
234 /* Sift out any unshared BMC rules and place them in BMC only section */
235 fbnic_sift_macda(fbd);
236
237 /* Write updates to hardware */
238 fbnic_write_rules(fbd);
239 fbnic_write_macda(fbd);
240 fbnic_write_tce_tcam(fbd);
241 }
242
fbnic_set_rx_mode(struct net_device * netdev,struct netdev_hw_addr_list * uc,struct netdev_hw_addr_list * mc)243 static void fbnic_set_rx_mode(struct net_device *netdev,
244 struct netdev_hw_addr_list *uc,
245 struct netdev_hw_addr_list *mc)
246 {
247 struct fbnic_net *fbn = netdev_priv(netdev);
248 struct fbnic_dev *fbd = fbn->fbd;
249
250 /* No need to update the hardware if we are not running */
251 if (netif_running(netdev))
252 __fbnic_set_rx_mode(fbd, uc, mc);
253 }
254
fbnic_set_mac(struct net_device * netdev,void * p)255 static int fbnic_set_mac(struct net_device *netdev, void *p)
256 {
257 struct fbnic_net *fbn = netdev_priv(netdev);
258 struct sockaddr *addr = p;
259
260 if (!is_valid_ether_addr(addr->sa_data))
261 return -EADDRNOTAVAIL;
262
263 eth_hw_addr_set(netdev, addr->sa_data);
264
265 if (netif_running(netdev))
266 __fbnic_set_rx_mode(fbn->fbd, &netdev->uc, &netdev->mc);
267
268 return 0;
269 }
270
fbnic_change_mtu(struct net_device * dev,int new_mtu)271 static int fbnic_change_mtu(struct net_device *dev, int new_mtu)
272 {
273 struct fbnic_net *fbn = netdev_priv(dev);
274
275 if (fbnic_check_split_frames(fbn->xdp_prog, new_mtu, fbn->hds_thresh)) {
276 dev_err(&dev->dev,
277 "MTU %d is larger than HDS threshold %d in XDP mode\n",
278 new_mtu, fbn->hds_thresh);
279
280 return -EINVAL;
281 }
282
283 WRITE_ONCE(dev->mtu, new_mtu);
284
285 return 0;
286 }
287
fbnic_clear_rx_mode(struct fbnic_dev * fbd)288 void fbnic_clear_rx_mode(struct fbnic_dev *fbd)
289 {
290 struct net_device *netdev = fbd->netdev;
291 int idx;
292
293 for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
294 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
295
296 if (mac_addr->state != FBNIC_TCAM_S_VALID)
297 continue;
298
299 bitmap_clear(mac_addr->act_tcam,
300 FBNIC_MAC_ADDR_T_HOST_START,
301 FBNIC_MAC_ADDR_T_HOST_LEN);
302
303 if (bitmap_empty(mac_addr->act_tcam,
304 FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
305 mac_addr->state = FBNIC_TCAM_S_DELETE;
306 }
307
308 /* Write updates to hardware */
309 fbnic_write_macda(fbd);
310
311 __dev_uc_unsync(netdev, NULL);
312 __dev_mc_unsync(netdev, NULL);
313 }
314
fbnic_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)315 static int fbnic_hwtstamp_get(struct net_device *netdev,
316 struct kernel_hwtstamp_config *config)
317 {
318 struct fbnic_net *fbn = netdev_priv(netdev);
319
320 *config = fbn->hwtstamp_config;
321
322 return 0;
323 }
324
fbnic_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)325 static int fbnic_hwtstamp_set(struct net_device *netdev,
326 struct kernel_hwtstamp_config *config,
327 struct netlink_ext_ack *extack)
328 {
329 struct fbnic_net *fbn = netdev_priv(netdev);
330 int old_rx_filter;
331
332 if (config->source != HWTSTAMP_SOURCE_NETDEV)
333 return -EOPNOTSUPP;
334
335 if (!kernel_hwtstamp_config_changed(config, &fbn->hwtstamp_config))
336 return 0;
337
338 /* Upscale the filters */
339 switch (config->rx_filter) {
340 case HWTSTAMP_FILTER_NONE:
341 case HWTSTAMP_FILTER_ALL:
342 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
343 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
344 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
345 case HWTSTAMP_FILTER_PTP_V2_EVENT:
346 break;
347 case HWTSTAMP_FILTER_NTP_ALL:
348 config->rx_filter = HWTSTAMP_FILTER_ALL;
349 break;
350 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
351 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
352 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
353 break;
354 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
355 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
356 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
357 break;
358 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
359 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
360 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
361 break;
362 case HWTSTAMP_FILTER_PTP_V2_SYNC:
363 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
364 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
365 break;
366 default:
367 return -ERANGE;
368 }
369
370 /* Configure */
371 old_rx_filter = fbn->hwtstamp_config.rx_filter;
372 memcpy(&fbn->hwtstamp_config, config, sizeof(*config));
373
374 if (old_rx_filter != config->rx_filter && netif_running(fbn->netdev)) {
375 fbnic_rss_reinit(fbn->fbd, fbn);
376 fbnic_write_rules(fbn->fbd);
377 }
378
379 /* Save / report back filter configuration
380 * Note that our filter configuration is inexact. Instead of
381 * filtering for a specific UDP port or L2 Ethertype we are
382 * filtering in all UDP or all non-IP packets for timestamping. So
383 * if anything other than FILTER_ALL is requested we report
384 * FILTER_SOME indicating that we will be timestamping a few
385 * additional packets.
386 */
387 if (config->rx_filter > HWTSTAMP_FILTER_ALL)
388 config->rx_filter = HWTSTAMP_FILTER_SOME;
389
390 return 0;
391 }
392
fbnic_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats64)393 static void fbnic_get_stats64(struct net_device *dev,
394 struct rtnl_link_stats64 *stats64)
395 {
396 u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
397 u64 rx_over = 0, rx_missed = 0, rx_length = 0;
398 u64 tx_bytes, tx_packets, tx_dropped = 0;
399 struct fbnic_net *fbn = netdev_priv(dev);
400 struct fbnic_dev *fbd = fbn->fbd;
401 struct fbnic_queue_stats *stats;
402
403 unsigned int start, i;
404
405 fbnic_get_hw_stats(fbd);
406
407 stats = &fbn->tx_stats;
408
409 tx_bytes = stats->bytes;
410 tx_packets = stats->packets;
411 tx_dropped = stats->dropped;
412
413 /* Record drops from Tx HW Datapath */
414 spin_lock(&fbd->hw_stats.lock);
415 tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
416 fbd->hw_stats.tti.cm_drop.frames.value +
417 fbd->hw_stats.tti.frame_drop.frames.value +
418 fbd->hw_stats.tti.tbi_drop.frames.value;
419 spin_unlock(&fbd->hw_stats.lock);
420
421 stats64->tx_bytes = tx_bytes;
422 stats64->tx_packets = tx_packets;
423 stats64->tx_dropped = tx_dropped;
424
425 for (i = 0; i < fbn->num_tx_queues; i++) {
426 struct fbnic_ring *txr = fbn->tx[i];
427
428 if (!txr)
429 continue;
430
431 stats = &txr->stats;
432 do {
433 start = u64_stats_fetch_begin(&stats->syncp);
434 tx_bytes = stats->bytes;
435 tx_packets = stats->packets;
436 tx_dropped = stats->dropped;
437 } while (u64_stats_fetch_retry(&stats->syncp, start));
438
439 stats64->tx_bytes += tx_bytes;
440 stats64->tx_packets += tx_packets;
441 stats64->tx_dropped += tx_dropped;
442 }
443
444 stats = &fbn->rx_stats;
445
446 rx_bytes = stats->bytes;
447 rx_packets = stats->packets;
448 rx_dropped = stats->dropped;
449
450 spin_lock(&fbd->hw_stats.lock);
451 /* Record drops for the host FIFOs.
452 * 4: network to Host, 6: BMC to Host
453 * Exclude the BMC and MC FIFOs as those stats may contain drops
454 * due to unrelated items such as TCAM misses. They are still
455 * accessible through the ethtool stats.
456 */
457 i = FBNIC_RXB_FIFO_HOST;
458 rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
459 i = FBNIC_RXB_FIFO_BMC_TO_HOST;
460 rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
461
462 for (i = 0; i < fbd->max_num_queues; i++) {
463 /* Report packets dropped due to CQ/BDQ being full/empty */
464 rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value;
465 rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value;
466
467 /* Report packets with errors */
468 rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
469 }
470 spin_unlock(&fbd->hw_stats.lock);
471
472 stats64->rx_bytes = rx_bytes;
473 stats64->rx_packets = rx_packets;
474 stats64->rx_dropped = rx_dropped;
475 stats64->rx_over_errors = rx_over;
476 stats64->rx_errors = rx_errors;
477 stats64->rx_missed_errors = rx_missed;
478
479 for (i = 0; i < fbn->num_rx_queues; i++) {
480 struct fbnic_ring *xdpr = fbn->tx[FBNIC_MAX_TXQS + i];
481 struct fbnic_ring *rxr = fbn->rx[i];
482
483 if (!rxr)
484 continue;
485
486 stats = &rxr->stats;
487 do {
488 start = u64_stats_fetch_begin(&stats->syncp);
489 rx_bytes = stats->bytes;
490 rx_packets = stats->packets;
491 rx_dropped = stats->dropped;
492 rx_length = stats->rx.length_errors;
493 } while (u64_stats_fetch_retry(&stats->syncp, start));
494
495 stats64->rx_bytes += rx_bytes;
496 stats64->rx_packets += rx_packets;
497 stats64->rx_dropped += rx_dropped;
498 stats64->rx_errors += rx_length;
499 stats64->rx_length_errors += rx_length;
500
501 if (!xdpr)
502 continue;
503
504 stats = &xdpr->stats;
505 do {
506 start = u64_stats_fetch_begin(&stats->syncp);
507 tx_bytes = stats->bytes;
508 tx_packets = stats->packets;
509 tx_dropped = stats->dropped;
510 } while (u64_stats_fetch_retry(&stats->syncp, start));
511
512 stats64->tx_bytes += tx_bytes;
513 stats64->tx_packets += tx_packets;
514 stats64->tx_dropped += tx_dropped;
515 }
516 }
517
fbnic_check_split_frames(struct bpf_prog * prog,unsigned int mtu,u32 hds_thresh)518 bool fbnic_check_split_frames(struct bpf_prog *prog, unsigned int mtu,
519 u32 hds_thresh)
520 {
521 if (!prog)
522 return false;
523
524 if (prog->aux->xdp_has_frags)
525 return false;
526
527 return mtu + ETH_HLEN > hds_thresh;
528 }
529
fbnic_bpf(struct net_device * netdev,struct netdev_bpf * bpf)530 static int fbnic_bpf(struct net_device *netdev, struct netdev_bpf *bpf)
531 {
532 struct bpf_prog *prog = bpf->prog, *prev_prog;
533 struct fbnic_net *fbn = netdev_priv(netdev);
534
535 if (bpf->command != XDP_SETUP_PROG)
536 return -EINVAL;
537
538 if (fbnic_check_split_frames(prog, netdev->mtu,
539 fbn->hds_thresh)) {
540 NL_SET_ERR_MSG_MOD(bpf->extack,
541 "MTU too high, or HDS threshold is too low for single buffer XDP");
542 return -EOPNOTSUPP;
543 }
544
545 prev_prog = xchg(&fbn->xdp_prog, prog);
546 if (prev_prog)
547 bpf_prog_put(prev_prog);
548
549 return 0;
550 }
551
552 static const struct net_device_ops fbnic_netdev_ops = {
553 .ndo_open = fbnic_open,
554 .ndo_stop = fbnic_stop,
555 .ndo_validate_addr = eth_validate_addr,
556 .ndo_start_xmit = fbnic_xmit_frame,
557 .ndo_features_check = fbnic_features_check,
558 .ndo_set_mac_address = fbnic_set_mac,
559 .ndo_change_mtu = fbnic_change_mtu,
560 .ndo_set_rx_mode_async = fbnic_set_rx_mode,
561 .ndo_get_stats64 = fbnic_get_stats64,
562 .ndo_bpf = fbnic_bpf,
563 .ndo_hwtstamp_get = fbnic_hwtstamp_get,
564 .ndo_hwtstamp_set = fbnic_hwtstamp_set,
565 };
566
fbnic_get_queue_stats_rx(struct net_device * dev,int idx,struct netdev_queue_stats_rx * rx)567 static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
568 struct netdev_queue_stats_rx *rx)
569 {
570 u64 bytes, packets, alloc_fail, alloc_fail_bdq;
571 struct fbnic_net *fbn = netdev_priv(dev);
572 struct fbnic_ring *rxr = fbn->rx[idx];
573 struct fbnic_dev *fbd = fbn->fbd;
574 struct fbnic_queue_stats *stats;
575 u64 csum_complete, csum_none;
576 struct fbnic_q_triad *qt;
577 unsigned int start;
578
579 if (!rxr)
580 return;
581
582 /* fbn->rx points to completion queues */
583 qt = container_of(rxr, struct fbnic_q_triad, cmpl);
584
585 stats = &rxr->stats;
586 do {
587 start = u64_stats_fetch_begin(&stats->syncp);
588 bytes = stats->bytes;
589 packets = stats->packets;
590 alloc_fail = stats->rx.alloc_failed;
591 csum_complete = stats->rx.csum_complete;
592 csum_none = stats->rx.csum_none;
593 } while (u64_stats_fetch_retry(&stats->syncp, start));
594
595 stats = &qt->sub0.stats;
596 do {
597 start = u64_stats_fetch_begin(&stats->syncp);
598 alloc_fail_bdq = stats->bdq.alloc_failed;
599 } while (u64_stats_fetch_retry(&stats->syncp, start));
600 alloc_fail += alloc_fail_bdq;
601
602 stats = &qt->sub1.stats;
603 do {
604 start = u64_stats_fetch_begin(&stats->syncp);
605 alloc_fail_bdq = stats->bdq.alloc_failed;
606 } while (u64_stats_fetch_retry(&stats->syncp, start));
607 alloc_fail += alloc_fail_bdq;
608
609 rx->bytes = bytes;
610 rx->packets = packets;
611 rx->alloc_fail = alloc_fail;
612 rx->csum_complete = csum_complete;
613 rx->csum_none = csum_none;
614
615 fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
616
617 spin_lock(&fbd->hw_stats.lock);
618 rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
619 fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
620 rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
621 rx->hw_drop_overruns;
622 spin_unlock(&fbd->hw_stats.lock);
623 }
624
fbnic_get_queue_stats_tx(struct net_device * dev,int idx,struct netdev_queue_stats_tx * tx)625 static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
626 struct netdev_queue_stats_tx *tx)
627 {
628 struct fbnic_net *fbn = netdev_priv(dev);
629 struct fbnic_ring *txr = fbn->tx[idx];
630 struct fbnic_queue_stats *stats;
631 u64 stop, wake, csum, lso;
632 struct fbnic_ring *xdpr;
633 unsigned int start;
634 u64 bytes, packets;
635
636 if (!txr)
637 return;
638
639 stats = &txr->stats;
640 do {
641 start = u64_stats_fetch_begin(&stats->syncp);
642 bytes = stats->bytes;
643 packets = stats->packets;
644 csum = stats->twq.csum_partial;
645 lso = stats->twq.lso;
646 stop = stats->twq.stop;
647 wake = stats->twq.wake;
648 } while (u64_stats_fetch_retry(&stats->syncp, start));
649
650 tx->bytes = bytes;
651 tx->packets = packets;
652 tx->needs_csum = csum + lso;
653 tx->hw_gso_wire_packets = lso;
654 tx->stop = stop;
655 tx->wake = wake;
656
657 xdpr = fbn->tx[FBNIC_MAX_TXQS + idx];
658 if (xdpr) {
659 stats = &xdpr->stats;
660 do {
661 start = u64_stats_fetch_begin(&stats->syncp);
662 bytes = stats->bytes;
663 packets = stats->packets;
664 } while (u64_stats_fetch_retry(&stats->syncp, start));
665
666 tx->bytes += bytes;
667 tx->packets += packets;
668 }
669 }
670
fbnic_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)671 static void fbnic_get_base_stats(struct net_device *dev,
672 struct netdev_queue_stats_rx *rx,
673 struct netdev_queue_stats_tx *tx)
674 {
675 struct fbnic_net *fbn = netdev_priv(dev);
676
677 tx->bytes = fbn->tx_stats.bytes;
678 tx->packets = fbn->tx_stats.packets;
679 tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso;
680 tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso;
681 tx->stop = fbn->tx_stats.twq.stop;
682 tx->wake = fbn->tx_stats.twq.wake;
683
684 rx->bytes = fbn->rx_stats.bytes;
685 rx->packets = fbn->rx_stats.packets;
686 rx->alloc_fail = fbn->rx_stats.rx.alloc_failed +
687 fbn->bdq_stats.bdq.alloc_failed;
688 rx->csum_complete = fbn->rx_stats.rx.csum_complete;
689 rx->csum_none = fbn->rx_stats.rx.csum_none;
690 }
691
692 static const struct netdev_stat_ops fbnic_stat_ops = {
693 .get_queue_stats_rx = fbnic_get_queue_stats_rx,
694 .get_queue_stats_tx = fbnic_get_queue_stats_tx,
695 .get_base_stats = fbnic_get_base_stats,
696 };
697
fbnic_reset_queues(struct fbnic_net * fbn,unsigned int tx,unsigned int rx)698 void fbnic_reset_queues(struct fbnic_net *fbn,
699 unsigned int tx, unsigned int rx)
700 {
701 struct fbnic_dev *fbd = fbn->fbd;
702 unsigned int max_napis;
703
704 max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
705
706 tx = min(tx, max_napis);
707 fbn->num_tx_queues = tx;
708
709 rx = min(rx, max_napis);
710 fbn->num_rx_queues = rx;
711
712 fbn->num_napi = max(tx, rx);
713 }
714
715 /**
716 * fbnic_netdev_free - Free the netdev associate with fbnic
717 * @fbd: Driver specific structure to free netdev from
718 *
719 * Allocate and initialize the netdev and netdev private structure. Bind
720 * together the hardware, netdev, and pci data structures.
721 **/
fbnic_netdev_free(struct fbnic_dev * fbd)722 void fbnic_netdev_free(struct fbnic_dev *fbd)
723 {
724 fbnic_phylink_destroy(fbd->netdev);
725
726 free_netdev(fbd->netdev);
727 fbd->netdev = NULL;
728 }
729
730 /**
731 * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic
732 * @fbd: Driver specific structure to associate netdev with
733 *
734 * Allocate and initialize the netdev and netdev private structure. Bind
735 * together the hardware, netdev, and pci data structures.
736 *
737 * Return: Pointer to net_device on success, NULL on failure
738 **/
fbnic_netdev_alloc(struct fbnic_dev * fbd)739 struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
740 {
741 struct net_device *netdev;
742 struct fbnic_net *fbn;
743 int default_queues;
744
745 netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS);
746 if (!netdev)
747 return NULL;
748
749 SET_NETDEV_DEV(netdev, fbd->dev);
750 fbd->netdev = netdev;
751
752 netdev->netdev_ops = &fbnic_netdev_ops;
753 netdev->stat_ops = &fbnic_stat_ops;
754 netdev->queue_mgmt_ops = &fbnic_queue_mgmt_ops;
755 netdev->netmem_tx = true;
756
757 fbnic_set_ethtool_ops(netdev);
758
759 fbn = netdev_priv(netdev);
760
761 fbn->netdev = netdev;
762 fbn->fbd = fbd;
763
764 fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
765 fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
766 fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
767 fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
768
769 fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT;
770 fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
771 fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
772
773 /* Initialize the hds_thresh */
774 netdev->cfg->hds_thresh = FBNIC_HDS_THRESH_DEFAULT;
775 fbn->hds_thresh = FBNIC_HDS_THRESH_DEFAULT;
776
777 default_queues = netif_get_num_default_rss_queues();
778 if (default_queues > fbd->max_num_queues)
779 default_queues = fbd->max_num_queues;
780
781 fbnic_reset_queues(fbn, default_queues, default_queues);
782
783 fbnic_reset_indir_tbl(fbn);
784 fbnic_rss_key_fill(fbn->rss_key);
785 fbnic_rss_init_en_mask(fbn);
786
787 netdev->priv_flags |= IFF_UNICAST_FLT;
788
789 netdev->gso_partial_features =
790 NETIF_F_GSO_GRE |
791 NETIF_F_GSO_GRE_CSUM |
792 NETIF_F_GSO_IPXIP4 |
793 NETIF_F_GSO_UDP_TUNNEL |
794 NETIF_F_GSO_UDP_TUNNEL_CSUM;
795
796 netdev->features |=
797 netdev->gso_partial_features |
798 FBNIC_TUN_GSO_FEATURES |
799 NETIF_F_RXHASH |
800 NETIF_F_SG |
801 NETIF_F_HW_CSUM |
802 NETIF_F_RXCSUM |
803 NETIF_F_TSO |
804 NETIF_F_TSO_ECN |
805 NETIF_F_TSO6 |
806 NETIF_F_GSO_PARTIAL |
807 NETIF_F_GSO_UDP_L4;
808
809 netdev->hw_features |= netdev->features;
810 netdev->vlan_features |= netdev->features;
811 netdev->hw_enc_features |= netdev->features;
812 netdev->features |= NETIF_F_NTUPLE;
813
814 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_RX_SG;
815
816 netdev->min_mtu = IPV6_MIN_MTU;
817 netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
818
819 /* TBD: This is workaround for BMC as phylink doesn't have support
820 * for leavling the link enabled if a BMC is present.
821 */
822 netdev->ethtool->wol_enabled = true;
823
824 netif_carrier_off(netdev);
825
826 netif_tx_stop_all_queues(netdev);
827
828 if (fbnic_phylink_create(netdev)) {
829 free_netdev(netdev);
830 fbd->netdev = NULL;
831 return NULL;
832 }
833
834 return netdev;
835 }
836
fbnic_dsn_to_mac_addr(u64 dsn,char * addr)837 static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr)
838 {
839 addr[0] = (dsn >> 56) & 0xFF;
840 addr[1] = (dsn >> 48) & 0xFF;
841 addr[2] = (dsn >> 40) & 0xFF;
842 addr[3] = (dsn >> 16) & 0xFF;
843 addr[4] = (dsn >> 8) & 0xFF;
844 addr[5] = dsn & 0xFF;
845
846 return is_valid_ether_addr(addr) ? 0 : -EINVAL;
847 }
848
849 /**
850 * fbnic_netdev_register - Initialize general software structures
851 * @netdev: Netdev containing structure to initialize and register
852 *
853 * Initialize the MAC address for the netdev and register it.
854 *
855 * Return: 0 on success, negative on failure
856 **/
fbnic_netdev_register(struct net_device * netdev)857 int fbnic_netdev_register(struct net_device *netdev)
858 {
859 struct fbnic_net *fbn = netdev_priv(netdev);
860 struct fbnic_dev *fbd = fbn->fbd;
861 u64 dsn = fbd->dsn;
862 u8 addr[ETH_ALEN];
863 int err;
864
865 err = fbnic_dsn_to_mac_addr(dsn, addr);
866 if (!err) {
867 ether_addr_copy(netdev->perm_addr, addr);
868 eth_hw_addr_set(netdev, addr);
869 } else {
870 /* A randomly assigned MAC address will cause provisioning
871 * issues so instead just fail to spawn the netdev and
872 * avoid any confusion.
873 */
874 dev_err(fbd->dev, "MAC addr %pM invalid\n", addr);
875 return err;
876 }
877
878 return register_netdev(netdev);
879 }
880
fbnic_netdev_unregister(struct net_device * netdev)881 void fbnic_netdev_unregister(struct net_device *netdev)
882 {
883 unregister_netdev(netdev);
884 }
885