1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/ipv6.h>
6 #include <linux/types.h>
7 #include <net/netdev_queues.h>
8
9 #include "fbnic.h"
10 #include "fbnic_netdev.h"
11 #include "fbnic_txrx.h"
12
__fbnic_open(struct fbnic_net * fbn)13 int __fbnic_open(struct fbnic_net *fbn)
14 {
15 struct fbnic_dev *fbd = fbn->fbd;
16 int err;
17
18 err = fbnic_alloc_napi_vectors(fbn);
19 if (err)
20 return err;
21
22 err = fbnic_alloc_resources(fbn);
23 if (err)
24 goto free_napi_vectors;
25
26 err = fbnic_set_netif_queues(fbn);
27 if (err)
28 goto free_resources;
29
30 /* Send ownership message and flush to verify FW has seen it */
31 err = fbnic_fw_xmit_ownership_msg(fbd, true);
32 if (err) {
33 dev_warn(fbd->dev,
34 "Error %d sending host ownership message to the firmware\n",
35 err);
36 goto err_reset_queues;
37 }
38
39 err = fbnic_time_start(fbn);
40 if (err)
41 goto release_ownership;
42
43 err = fbnic_fw_init_heartbeat(fbd, false);
44 if (err)
45 goto time_stop;
46
47 err = fbnic_pcs_request_irq(fbd);
48 if (err)
49 goto time_stop;
50
51 /* Pull the BMC config and initialize the RPC */
52 fbnic_bmc_rpc_init(fbd);
53 fbnic_rss_reinit(fbd, fbn);
54
55 return 0;
56 time_stop:
57 fbnic_time_stop(fbn);
58 release_ownership:
59 fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
60 err_reset_queues:
61 fbnic_reset_netif_queues(fbn);
62 free_resources:
63 fbnic_free_resources(fbn);
64 free_napi_vectors:
65 fbnic_free_napi_vectors(fbn);
66 return err;
67 }
68
fbnic_open(struct net_device * netdev)69 static int fbnic_open(struct net_device *netdev)
70 {
71 struct fbnic_net *fbn = netdev_priv(netdev);
72 int err;
73
74 fbnic_napi_name_irqs(fbn->fbd);
75
76 err = __fbnic_open(fbn);
77 if (!err)
78 fbnic_up(fbn);
79
80 return err;
81 }
82
fbnic_stop(struct net_device * netdev)83 static int fbnic_stop(struct net_device *netdev)
84 {
85 struct fbnic_net *fbn = netdev_priv(netdev);
86
87 fbnic_down(fbn);
88 fbnic_pcs_free_irq(fbn->fbd);
89
90 fbnic_time_stop(fbn);
91 fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
92
93 fbnic_reset_netif_queues(fbn);
94 fbnic_free_resources(fbn);
95 fbnic_free_napi_vectors(fbn);
96
97 return 0;
98 }
99
fbnic_uc_sync(struct net_device * netdev,const unsigned char * addr)100 static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr)
101 {
102 struct fbnic_net *fbn = netdev_priv(netdev);
103 struct fbnic_mac_addr *avail_addr;
104
105 if (WARN_ON(!is_valid_ether_addr(addr)))
106 return -EADDRNOTAVAIL;
107
108 avail_addr = __fbnic_uc_sync(fbn->fbd, addr);
109 if (!avail_addr)
110 return -ENOSPC;
111
112 /* Add type flag indicating this address is in use by the host */
113 set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam);
114
115 return 0;
116 }
117
fbnic_uc_unsync(struct net_device * netdev,const unsigned char * addr)118 static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr)
119 {
120 struct fbnic_net *fbn = netdev_priv(netdev);
121 struct fbnic_dev *fbd = fbn->fbd;
122 int i, ret;
123
124 /* Scan from middle of list to bottom, filling bottom up.
125 * Skip the first entry which is reserved for dev_addr and
126 * leave the last entry to use for promiscuous filtering.
127 */
128 for (i = fbd->mac_addr_boundary, ret = -ENOENT;
129 i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) {
130 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
131
132 if (!ether_addr_equal(mac_addr->value.addr8, addr))
133 continue;
134
135 ret = __fbnic_uc_unsync(mac_addr);
136 }
137
138 return ret;
139 }
140
fbnic_mc_sync(struct net_device * netdev,const unsigned char * addr)141 static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr)
142 {
143 struct fbnic_net *fbn = netdev_priv(netdev);
144 struct fbnic_mac_addr *avail_addr;
145
146 if (WARN_ON(!is_multicast_ether_addr(addr)))
147 return -EADDRNOTAVAIL;
148
149 avail_addr = __fbnic_mc_sync(fbn->fbd, addr);
150 if (!avail_addr)
151 return -ENOSPC;
152
153 /* Add type flag indicating this address is in use by the host */
154 set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam);
155
156 return 0;
157 }
158
fbnic_mc_unsync(struct net_device * netdev,const unsigned char * addr)159 static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
160 {
161 struct fbnic_net *fbn = netdev_priv(netdev);
162 struct fbnic_dev *fbd = fbn->fbd;
163 int i, ret;
164
165 /* Scan from middle of list to top, filling top down.
166 * Skip over the address reserved for the BMC MAC and
167 * exclude index 0 as that belongs to the broadcast address
168 */
169 for (i = fbd->mac_addr_boundary, ret = -ENOENT;
170 --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) {
171 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
172
173 if (!ether_addr_equal(mac_addr->value.addr8, addr))
174 continue;
175
176 ret = __fbnic_mc_unsync(mac_addr);
177 }
178
179 return ret;
180 }
181
__fbnic_set_rx_mode(struct net_device * netdev)182 void __fbnic_set_rx_mode(struct net_device *netdev)
183 {
184 struct fbnic_net *fbn = netdev_priv(netdev);
185 bool uc_promisc = false, mc_promisc = false;
186 struct fbnic_dev *fbd = fbn->fbd;
187 struct fbnic_mac_addr *mac_addr;
188 int err;
189
190 /* Populate host address from dev_addr */
191 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX];
192 if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) ||
193 mac_addr->state != FBNIC_TCAM_S_VALID) {
194 ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr);
195 mac_addr->state = FBNIC_TCAM_S_UPDATE;
196 set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam);
197 }
198
199 /* Populate broadcast address if broadcast is enabled */
200 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
201 if (netdev->flags & IFF_BROADCAST) {
202 if (!is_broadcast_ether_addr(mac_addr->value.addr8) ||
203 mac_addr->state != FBNIC_TCAM_S_VALID) {
204 eth_broadcast_addr(mac_addr->value.addr8);
205 mac_addr->state = FBNIC_TCAM_S_ADD;
206 }
207 set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam);
208 } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
209 __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST);
210 }
211
212 /* Synchronize unicast and multicast address lists */
213 err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync);
214 if (err == -ENOSPC)
215 uc_promisc = true;
216 err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync);
217 if (err == -ENOSPC)
218 mc_promisc = true;
219
220 uc_promisc |= !!(netdev->flags & IFF_PROMISC);
221 mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
222
223 /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
224 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
225 if (uc_promisc) {
226 if (!is_zero_ether_addr(mac_addr->value.addr8) ||
227 mac_addr->state != FBNIC_TCAM_S_VALID) {
228 eth_zero_addr(mac_addr->value.addr8);
229 eth_broadcast_addr(mac_addr->mask.addr8);
230 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
231 mac_addr->act_tcam);
232 set_bit(FBNIC_MAC_ADDR_T_PROMISC,
233 mac_addr->act_tcam);
234 mac_addr->state = FBNIC_TCAM_S_ADD;
235 }
236 } else if (mc_promisc &&
237 (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
238 /* We have to add a special handler for multicast as the
239 * BMC may have an all-multi rule already in place. As such
240 * adding a rule ourselves won't do any good so we will have
241 * to modify the rules for the ALL MULTI below if the BMC
242 * already has the rule in place.
243 */
244 if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
245 mac_addr->state != FBNIC_TCAM_S_VALID) {
246 eth_zero_addr(mac_addr->value.addr8);
247 eth_broadcast_addr(mac_addr->mask.addr8);
248 mac_addr->value.addr8[0] ^= 1;
249 mac_addr->mask.addr8[0] ^= 1;
250 set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
251 mac_addr->act_tcam);
252 clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
253 mac_addr->act_tcam);
254 mac_addr->state = FBNIC_TCAM_S_ADD;
255 }
256 } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
257 if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
258 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
259 mac_addr->act_tcam);
260 clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
261 mac_addr->act_tcam);
262 } else {
263 mac_addr->state = FBNIC_TCAM_S_DELETE;
264 }
265 }
266
267 /* Add rules for BMC all multicast if it is enabled */
268 fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
269
270 /* Sift out any unshared BMC rules and place them in BMC only section */
271 fbnic_sift_macda(fbd);
272
273 /* Write updates to hardware */
274 fbnic_write_rules(fbd);
275 fbnic_write_macda(fbd);
276 fbnic_write_tce_tcam(fbd);
277 }
278
fbnic_set_rx_mode(struct net_device * netdev)279 static void fbnic_set_rx_mode(struct net_device *netdev)
280 {
281 /* No need to update the hardware if we are not running */
282 if (netif_running(netdev))
283 __fbnic_set_rx_mode(netdev);
284 }
285
fbnic_set_mac(struct net_device * netdev,void * p)286 static int fbnic_set_mac(struct net_device *netdev, void *p)
287 {
288 struct sockaddr *addr = p;
289
290 if (!is_valid_ether_addr(addr->sa_data))
291 return -EADDRNOTAVAIL;
292
293 eth_hw_addr_set(netdev, addr->sa_data);
294
295 fbnic_set_rx_mode(netdev);
296
297 return 0;
298 }
299
fbnic_clear_rx_mode(struct net_device * netdev)300 void fbnic_clear_rx_mode(struct net_device *netdev)
301 {
302 struct fbnic_net *fbn = netdev_priv(netdev);
303 struct fbnic_dev *fbd = fbn->fbd;
304 int idx;
305
306 for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
307 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
308
309 if (mac_addr->state != FBNIC_TCAM_S_VALID)
310 continue;
311
312 bitmap_clear(mac_addr->act_tcam,
313 FBNIC_MAC_ADDR_T_HOST_START,
314 FBNIC_MAC_ADDR_T_HOST_LEN);
315
316 if (bitmap_empty(mac_addr->act_tcam,
317 FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
318 mac_addr->state = FBNIC_TCAM_S_DELETE;
319 }
320
321 /* Write updates to hardware */
322 fbnic_write_macda(fbd);
323
324 __dev_uc_unsync(netdev, NULL);
325 __dev_mc_unsync(netdev, NULL);
326 }
327
fbnic_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)328 static int fbnic_hwtstamp_get(struct net_device *netdev,
329 struct kernel_hwtstamp_config *config)
330 {
331 struct fbnic_net *fbn = netdev_priv(netdev);
332
333 *config = fbn->hwtstamp_config;
334
335 return 0;
336 }
337
fbnic_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)338 static int fbnic_hwtstamp_set(struct net_device *netdev,
339 struct kernel_hwtstamp_config *config,
340 struct netlink_ext_ack *extack)
341 {
342 struct fbnic_net *fbn = netdev_priv(netdev);
343 int old_rx_filter;
344
345 if (config->source != HWTSTAMP_SOURCE_NETDEV)
346 return -EOPNOTSUPP;
347
348 if (!kernel_hwtstamp_config_changed(config, &fbn->hwtstamp_config))
349 return 0;
350
351 /* Upscale the filters */
352 switch (config->rx_filter) {
353 case HWTSTAMP_FILTER_NONE:
354 case HWTSTAMP_FILTER_ALL:
355 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
356 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
357 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
358 case HWTSTAMP_FILTER_PTP_V2_EVENT:
359 break;
360 case HWTSTAMP_FILTER_NTP_ALL:
361 config->rx_filter = HWTSTAMP_FILTER_ALL;
362 break;
363 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
364 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
365 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
366 break;
367 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
368 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
369 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
370 break;
371 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
372 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
373 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
374 break;
375 case HWTSTAMP_FILTER_PTP_V2_SYNC:
376 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
377 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
378 break;
379 default:
380 return -ERANGE;
381 }
382
383 /* Configure */
384 old_rx_filter = fbn->hwtstamp_config.rx_filter;
385 memcpy(&fbn->hwtstamp_config, config, sizeof(*config));
386
387 if (old_rx_filter != config->rx_filter && netif_running(fbn->netdev)) {
388 fbnic_rss_reinit(fbn->fbd, fbn);
389 fbnic_write_rules(fbn->fbd);
390 }
391
392 /* Save / report back filter configuration
393 * Note that our filter configuration is inexact. Instead of
394 * filtering for a specific UDP port or L2 Ethertype we are
395 * filtering in all UDP or all non-IP packets for timestamping. So
396 * if anything other than FILTER_ALL is requested we report
397 * FILTER_SOME indicating that we will be timestamping a few
398 * additional packets.
399 */
400 if (config->rx_filter > HWTSTAMP_FILTER_ALL)
401 config->rx_filter = HWTSTAMP_FILTER_SOME;
402
403 return 0;
404 }
405
fbnic_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats64)406 static void fbnic_get_stats64(struct net_device *dev,
407 struct rtnl_link_stats64 *stats64)
408 {
409 u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
410 u64 tx_bytes, tx_packets, tx_dropped = 0;
411 struct fbnic_net *fbn = netdev_priv(dev);
412 struct fbnic_dev *fbd = fbn->fbd;
413 struct fbnic_queue_stats *stats;
414 u64 rx_over = 0, rx_missed = 0;
415 unsigned int start, i;
416
417 fbnic_get_hw_stats(fbd);
418
419 stats = &fbn->tx_stats;
420
421 tx_bytes = stats->bytes;
422 tx_packets = stats->packets;
423 tx_dropped = stats->dropped;
424
425 /* Record drops from Tx HW Datapath */
426 spin_lock(&fbd->hw_stats_lock);
427 tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
428 fbd->hw_stats.tti.cm_drop.frames.value +
429 fbd->hw_stats.tti.frame_drop.frames.value +
430 fbd->hw_stats.tti.tbi_drop.frames.value;
431 spin_unlock(&fbd->hw_stats_lock);
432
433 stats64->tx_bytes = tx_bytes;
434 stats64->tx_packets = tx_packets;
435 stats64->tx_dropped = tx_dropped;
436
437 for (i = 0; i < fbn->num_tx_queues; i++) {
438 struct fbnic_ring *txr = fbn->tx[i];
439
440 if (!txr)
441 continue;
442
443 stats = &txr->stats;
444 do {
445 start = u64_stats_fetch_begin(&stats->syncp);
446 tx_bytes = stats->bytes;
447 tx_packets = stats->packets;
448 tx_dropped = stats->dropped;
449 } while (u64_stats_fetch_retry(&stats->syncp, start));
450
451 stats64->tx_bytes += tx_bytes;
452 stats64->tx_packets += tx_packets;
453 stats64->tx_dropped += tx_dropped;
454 }
455
456 stats = &fbn->rx_stats;
457
458 rx_bytes = stats->bytes;
459 rx_packets = stats->packets;
460 rx_dropped = stats->dropped;
461
462 spin_lock(&fbd->hw_stats_lock);
463 /* Record drops for the host FIFOs.
464 * 4: network to Host, 6: BMC to Host
465 * Exclude the BMC and MC FIFOs as those stats may contain drops
466 * due to unrelated items such as TCAM misses. They are still
467 * accessible through the ethtool stats.
468 */
469 i = FBNIC_RXB_FIFO_HOST;
470 rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
471 i = FBNIC_RXB_FIFO_BMC_TO_HOST;
472 rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
473
474 for (i = 0; i < fbd->max_num_queues; i++) {
475 /* Report packets dropped due to CQ/BDQ being full/empty */
476 rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value;
477 rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value;
478
479 /* Report packets with errors */
480 rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
481 }
482 spin_unlock(&fbd->hw_stats_lock);
483
484 stats64->rx_bytes = rx_bytes;
485 stats64->rx_packets = rx_packets;
486 stats64->rx_dropped = rx_dropped;
487 stats64->rx_over_errors = rx_over;
488 stats64->rx_errors = rx_errors;
489 stats64->rx_missed_errors = rx_missed;
490
491 for (i = 0; i < fbn->num_rx_queues; i++) {
492 struct fbnic_ring *rxr = fbn->rx[i];
493
494 if (!rxr)
495 continue;
496
497 stats = &rxr->stats;
498 do {
499 start = u64_stats_fetch_begin(&stats->syncp);
500 rx_bytes = stats->bytes;
501 rx_packets = stats->packets;
502 rx_dropped = stats->dropped;
503 } while (u64_stats_fetch_retry(&stats->syncp, start));
504
505 stats64->rx_bytes += rx_bytes;
506 stats64->rx_packets += rx_packets;
507 stats64->rx_dropped += rx_dropped;
508 }
509 }
510
511 static const struct net_device_ops fbnic_netdev_ops = {
512 .ndo_open = fbnic_open,
513 .ndo_stop = fbnic_stop,
514 .ndo_validate_addr = eth_validate_addr,
515 .ndo_start_xmit = fbnic_xmit_frame,
516 .ndo_features_check = fbnic_features_check,
517 .ndo_set_mac_address = fbnic_set_mac,
518 .ndo_set_rx_mode = fbnic_set_rx_mode,
519 .ndo_get_stats64 = fbnic_get_stats64,
520 .ndo_hwtstamp_get = fbnic_hwtstamp_get,
521 .ndo_hwtstamp_set = fbnic_hwtstamp_set,
522 };
523
fbnic_get_queue_stats_rx(struct net_device * dev,int idx,struct netdev_queue_stats_rx * rx)524 static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
525 struct netdev_queue_stats_rx *rx)
526 {
527 struct fbnic_net *fbn = netdev_priv(dev);
528 struct fbnic_ring *rxr = fbn->rx[idx];
529 struct fbnic_dev *fbd = fbn->fbd;
530 struct fbnic_queue_stats *stats;
531 u64 bytes, packets, alloc_fail;
532 u64 csum_complete, csum_none;
533 unsigned int start;
534
535 if (!rxr)
536 return;
537
538 stats = &rxr->stats;
539 do {
540 start = u64_stats_fetch_begin(&stats->syncp);
541 bytes = stats->bytes;
542 packets = stats->packets;
543 alloc_fail = stats->rx.alloc_failed;
544 csum_complete = stats->rx.csum_complete;
545 csum_none = stats->rx.csum_none;
546 } while (u64_stats_fetch_retry(&stats->syncp, start));
547
548 rx->bytes = bytes;
549 rx->packets = packets;
550 rx->alloc_fail = alloc_fail;
551 rx->csum_complete = csum_complete;
552 rx->csum_none = csum_none;
553
554 fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
555
556 spin_lock(&fbd->hw_stats_lock);
557 rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
558 fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
559 rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
560 rx->hw_drop_overruns;
561 spin_unlock(&fbd->hw_stats_lock);
562 }
563
fbnic_get_queue_stats_tx(struct net_device * dev,int idx,struct netdev_queue_stats_tx * tx)564 static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
565 struct netdev_queue_stats_tx *tx)
566 {
567 struct fbnic_net *fbn = netdev_priv(dev);
568 struct fbnic_ring *txr = fbn->tx[idx];
569 struct fbnic_queue_stats *stats;
570 u64 stop, wake, csum, lso;
571 unsigned int start;
572 u64 bytes, packets;
573
574 if (!txr)
575 return;
576
577 stats = &txr->stats;
578 do {
579 start = u64_stats_fetch_begin(&stats->syncp);
580 bytes = stats->bytes;
581 packets = stats->packets;
582 csum = stats->twq.csum_partial;
583 lso = stats->twq.lso;
584 stop = stats->twq.stop;
585 wake = stats->twq.wake;
586 } while (u64_stats_fetch_retry(&stats->syncp, start));
587
588 tx->bytes = bytes;
589 tx->packets = packets;
590 tx->needs_csum = csum + lso;
591 tx->hw_gso_wire_packets = lso;
592 tx->stop = stop;
593 tx->wake = wake;
594 }
595
fbnic_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)596 static void fbnic_get_base_stats(struct net_device *dev,
597 struct netdev_queue_stats_rx *rx,
598 struct netdev_queue_stats_tx *tx)
599 {
600 struct fbnic_net *fbn = netdev_priv(dev);
601
602 tx->bytes = fbn->tx_stats.bytes;
603 tx->packets = fbn->tx_stats.packets;
604 tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso;
605 tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso;
606 tx->stop = fbn->tx_stats.twq.stop;
607 tx->wake = fbn->tx_stats.twq.wake;
608
609 rx->bytes = fbn->rx_stats.bytes;
610 rx->packets = fbn->rx_stats.packets;
611 rx->alloc_fail = fbn->rx_stats.rx.alloc_failed;
612 rx->csum_complete = fbn->rx_stats.rx.csum_complete;
613 rx->csum_none = fbn->rx_stats.rx.csum_none;
614 }
615
616 static const struct netdev_stat_ops fbnic_stat_ops = {
617 .get_queue_stats_rx = fbnic_get_queue_stats_rx,
618 .get_queue_stats_tx = fbnic_get_queue_stats_tx,
619 .get_base_stats = fbnic_get_base_stats,
620 };
621
fbnic_reset_queues(struct fbnic_net * fbn,unsigned int tx,unsigned int rx)622 void fbnic_reset_queues(struct fbnic_net *fbn,
623 unsigned int tx, unsigned int rx)
624 {
625 struct fbnic_dev *fbd = fbn->fbd;
626 unsigned int max_napis;
627
628 max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
629
630 tx = min(tx, max_napis);
631 fbn->num_tx_queues = tx;
632
633 rx = min(rx, max_napis);
634 fbn->num_rx_queues = rx;
635
636 fbn->num_napi = max(tx, rx);
637 }
638
639 /**
640 * fbnic_netdev_free - Free the netdev associate with fbnic
641 * @fbd: Driver specific structure to free netdev from
642 *
643 * Allocate and initialize the netdev and netdev private structure. Bind
644 * together the hardware, netdev, and pci data structures.
645 **/
fbnic_netdev_free(struct fbnic_dev * fbd)646 void fbnic_netdev_free(struct fbnic_dev *fbd)
647 {
648 struct fbnic_net *fbn = netdev_priv(fbd->netdev);
649
650 if (fbn->phylink)
651 phylink_destroy(fbn->phylink);
652
653 free_netdev(fbd->netdev);
654 fbd->netdev = NULL;
655 }
656
657 /**
658 * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic
659 * @fbd: Driver specific structure to associate netdev with
660 *
661 * Allocate and initialize the netdev and netdev private structure. Bind
662 * together the hardware, netdev, and pci data structures.
663 *
664 * Return: Pointer to net_device on success, NULL on failure
665 **/
fbnic_netdev_alloc(struct fbnic_dev * fbd)666 struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
667 {
668 struct net_device *netdev;
669 struct fbnic_net *fbn;
670 int default_queues;
671
672 netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS);
673 if (!netdev)
674 return NULL;
675
676 SET_NETDEV_DEV(netdev, fbd->dev);
677 fbd->netdev = netdev;
678
679 netdev->netdev_ops = &fbnic_netdev_ops;
680 netdev->stat_ops = &fbnic_stat_ops;
681
682 fbnic_set_ethtool_ops(netdev);
683
684 fbn = netdev_priv(netdev);
685
686 fbn->netdev = netdev;
687 fbn->fbd = fbd;
688
689 fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
690 fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
691 fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
692 fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
693
694 fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT;
695 fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
696 fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
697
698 default_queues = netif_get_num_default_rss_queues();
699 if (default_queues > fbd->max_num_queues)
700 default_queues = fbd->max_num_queues;
701
702 fbnic_reset_queues(fbn, default_queues, default_queues);
703
704 fbnic_reset_indir_tbl(fbn);
705 fbnic_rss_key_fill(fbn->rss_key);
706 fbnic_rss_init_en_mask(fbn);
707
708 netdev->priv_flags |= IFF_UNICAST_FLT;
709
710 netdev->gso_partial_features =
711 NETIF_F_GSO_GRE |
712 NETIF_F_GSO_GRE_CSUM |
713 NETIF_F_GSO_IPXIP4 |
714 NETIF_F_GSO_UDP_TUNNEL |
715 NETIF_F_GSO_UDP_TUNNEL_CSUM;
716
717 netdev->features |=
718 netdev->gso_partial_features |
719 FBNIC_TUN_GSO_FEATURES |
720 NETIF_F_RXHASH |
721 NETIF_F_SG |
722 NETIF_F_HW_CSUM |
723 NETIF_F_RXCSUM |
724 NETIF_F_TSO |
725 NETIF_F_TSO_ECN |
726 NETIF_F_TSO6 |
727 NETIF_F_GSO_PARTIAL |
728 NETIF_F_GSO_UDP_L4;
729
730 netdev->hw_features |= netdev->features;
731 netdev->vlan_features |= netdev->features;
732 netdev->hw_enc_features |= netdev->features;
733 netdev->features |= NETIF_F_NTUPLE;
734
735 netdev->min_mtu = IPV6_MIN_MTU;
736 netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
737
738 /* TBD: This is workaround for BMC as phylink doesn't have support
739 * for leavling the link enabled if a BMC is present.
740 */
741 netdev->ethtool->wol_enabled = true;
742
743 netif_carrier_off(netdev);
744
745 netif_tx_stop_all_queues(netdev);
746
747 if (fbnic_phylink_init(netdev)) {
748 fbnic_netdev_free(fbd);
749 return NULL;
750 }
751
752 return netdev;
753 }
754
fbnic_dsn_to_mac_addr(u64 dsn,char * addr)755 static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr)
756 {
757 addr[0] = (dsn >> 56) & 0xFF;
758 addr[1] = (dsn >> 48) & 0xFF;
759 addr[2] = (dsn >> 40) & 0xFF;
760 addr[3] = (dsn >> 16) & 0xFF;
761 addr[4] = (dsn >> 8) & 0xFF;
762 addr[5] = dsn & 0xFF;
763
764 return is_valid_ether_addr(addr) ? 0 : -EINVAL;
765 }
766
767 /**
768 * fbnic_netdev_register - Initialize general software structures
769 * @netdev: Netdev containing structure to initialize and register
770 *
771 * Initialize the MAC address for the netdev and register it.
772 *
773 * Return: 0 on success, negative on failure
774 **/
fbnic_netdev_register(struct net_device * netdev)775 int fbnic_netdev_register(struct net_device *netdev)
776 {
777 struct fbnic_net *fbn = netdev_priv(netdev);
778 struct fbnic_dev *fbd = fbn->fbd;
779 u64 dsn = fbd->dsn;
780 u8 addr[ETH_ALEN];
781 int err;
782
783 err = fbnic_dsn_to_mac_addr(dsn, addr);
784 if (!err) {
785 ether_addr_copy(netdev->perm_addr, addr);
786 eth_hw_addr_set(netdev, addr);
787 } else {
788 /* A randomly assigned MAC address will cause provisioning
789 * issues so instead just fail to spawn the netdev and
790 * avoid any confusion.
791 */
792 dev_err(fbd->dev, "MAC addr %pM invalid\n", addr);
793 return err;
794 }
795
796 return register_netdev(netdev);
797 }
798
fbnic_netdev_unregister(struct net_device * netdev)799 void fbnic_netdev_unregister(struct net_device *netdev)
800 {
801 unregister_netdev(netdev);
802 }
803