1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/ipv6.h>
6 #include <linux/types.h>
7 #include <net/netdev_queues.h>
8
9 #include "fbnic.h"
10 #include "fbnic_netdev.h"
11 #include "fbnic_txrx.h"
12
__fbnic_open(struct fbnic_net * fbn)13 int __fbnic_open(struct fbnic_net *fbn)
14 {
15 struct fbnic_dev *fbd = fbn->fbd;
16 int err;
17
18 err = fbnic_alloc_napi_vectors(fbn);
19 if (err)
20 return err;
21
22 err = fbnic_alloc_resources(fbn);
23 if (err)
24 goto free_napi_vectors;
25
26 err = fbnic_set_netif_queues(fbn);
27 if (err)
28 goto free_resources;
29
30 /* Send ownership message and flush to verify FW has seen it */
31 err = fbnic_fw_xmit_ownership_msg(fbd, true);
32 if (err) {
33 dev_warn(fbd->dev,
34 "Error %d sending host ownership message to the firmware\n",
35 err);
36 goto free_resources;
37 }
38
39 err = fbnic_time_start(fbn);
40 if (err)
41 goto release_ownership;
42
43 err = fbnic_fw_init_heartbeat(fbd, false);
44 if (err)
45 goto time_stop;
46
47 err = fbnic_pcs_request_irq(fbd);
48 if (err)
49 goto time_stop;
50
51 /* Pull the BMC config and initialize the RPC */
52 fbnic_bmc_rpc_init(fbd);
53 fbnic_rss_reinit(fbd, fbn);
54
55 return 0;
56 time_stop:
57 fbnic_time_stop(fbn);
58 release_ownership:
59 fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
60 free_resources:
61 fbnic_free_resources(fbn);
62 free_napi_vectors:
63 fbnic_free_napi_vectors(fbn);
64 return err;
65 }
66
fbnic_open(struct net_device * netdev)67 static int fbnic_open(struct net_device *netdev)
68 {
69 struct fbnic_net *fbn = netdev_priv(netdev);
70 int err;
71
72 fbnic_napi_name_irqs(fbn->fbd);
73
74 err = __fbnic_open(fbn);
75 if (!err)
76 fbnic_up(fbn);
77
78 return err;
79 }
80
fbnic_stop(struct net_device * netdev)81 static int fbnic_stop(struct net_device *netdev)
82 {
83 struct fbnic_net *fbn = netdev_priv(netdev);
84
85 fbnic_down(fbn);
86 fbnic_pcs_free_irq(fbn->fbd);
87
88 fbnic_time_stop(fbn);
89 fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
90
91 fbnic_reset_netif_queues(fbn);
92 fbnic_free_resources(fbn);
93 fbnic_free_napi_vectors(fbn);
94
95 return 0;
96 }
97
fbnic_uc_sync(struct net_device * netdev,const unsigned char * addr)98 static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr)
99 {
100 struct fbnic_net *fbn = netdev_priv(netdev);
101 struct fbnic_mac_addr *avail_addr;
102
103 if (WARN_ON(!is_valid_ether_addr(addr)))
104 return -EADDRNOTAVAIL;
105
106 avail_addr = __fbnic_uc_sync(fbn->fbd, addr);
107 if (!avail_addr)
108 return -ENOSPC;
109
110 /* Add type flag indicating this address is in use by the host */
111 set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam);
112
113 return 0;
114 }
115
fbnic_uc_unsync(struct net_device * netdev,const unsigned char * addr)116 static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr)
117 {
118 struct fbnic_net *fbn = netdev_priv(netdev);
119 struct fbnic_dev *fbd = fbn->fbd;
120 int i, ret;
121
122 /* Scan from middle of list to bottom, filling bottom up.
123 * Skip the first entry which is reserved for dev_addr and
124 * leave the last entry to use for promiscuous filtering.
125 */
126 for (i = fbd->mac_addr_boundary, ret = -ENOENT;
127 i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) {
128 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
129
130 if (!ether_addr_equal(mac_addr->value.addr8, addr))
131 continue;
132
133 ret = __fbnic_uc_unsync(mac_addr);
134 }
135
136 return ret;
137 }
138
fbnic_mc_sync(struct net_device * netdev,const unsigned char * addr)139 static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr)
140 {
141 struct fbnic_net *fbn = netdev_priv(netdev);
142 struct fbnic_mac_addr *avail_addr;
143
144 if (WARN_ON(!is_multicast_ether_addr(addr)))
145 return -EADDRNOTAVAIL;
146
147 avail_addr = __fbnic_mc_sync(fbn->fbd, addr);
148 if (!avail_addr)
149 return -ENOSPC;
150
151 /* Add type flag indicating this address is in use by the host */
152 set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam);
153
154 return 0;
155 }
156
fbnic_mc_unsync(struct net_device * netdev,const unsigned char * addr)157 static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
158 {
159 struct fbnic_net *fbn = netdev_priv(netdev);
160 struct fbnic_dev *fbd = fbn->fbd;
161 int i, ret;
162
163 /* Scan from middle of list to top, filling top down.
164 * Skip over the address reserved for the BMC MAC and
165 * exclude index 0 as that belongs to the broadcast address
166 */
167 for (i = fbd->mac_addr_boundary, ret = -ENOENT;
168 --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) {
169 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
170
171 if (!ether_addr_equal(mac_addr->value.addr8, addr))
172 continue;
173
174 ret = __fbnic_mc_unsync(mac_addr);
175 }
176
177 return ret;
178 }
179
__fbnic_set_rx_mode(struct net_device * netdev)180 void __fbnic_set_rx_mode(struct net_device *netdev)
181 {
182 struct fbnic_net *fbn = netdev_priv(netdev);
183 bool uc_promisc = false, mc_promisc = false;
184 struct fbnic_dev *fbd = fbn->fbd;
185 struct fbnic_mac_addr *mac_addr;
186 int err;
187
188 /* Populate host address from dev_addr */
189 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX];
190 if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) ||
191 mac_addr->state != FBNIC_TCAM_S_VALID) {
192 ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr);
193 mac_addr->state = FBNIC_TCAM_S_UPDATE;
194 set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam);
195 }
196
197 /* Populate broadcast address if broadcast is enabled */
198 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
199 if (netdev->flags & IFF_BROADCAST) {
200 if (!is_broadcast_ether_addr(mac_addr->value.addr8) ||
201 mac_addr->state != FBNIC_TCAM_S_VALID) {
202 eth_broadcast_addr(mac_addr->value.addr8);
203 mac_addr->state = FBNIC_TCAM_S_ADD;
204 }
205 set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam);
206 } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
207 __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST);
208 }
209
210 /* Synchronize unicast and multicast address lists */
211 err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync);
212 if (err == -ENOSPC)
213 uc_promisc = true;
214 err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync);
215 if (err == -ENOSPC)
216 mc_promisc = true;
217
218 uc_promisc |= !!(netdev->flags & IFF_PROMISC);
219 mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
220
221 /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
222 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
223 if (uc_promisc) {
224 if (!is_zero_ether_addr(mac_addr->value.addr8) ||
225 mac_addr->state != FBNIC_TCAM_S_VALID) {
226 eth_zero_addr(mac_addr->value.addr8);
227 eth_broadcast_addr(mac_addr->mask.addr8);
228 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
229 mac_addr->act_tcam);
230 set_bit(FBNIC_MAC_ADDR_T_PROMISC,
231 mac_addr->act_tcam);
232 mac_addr->state = FBNIC_TCAM_S_ADD;
233 }
234 } else if (mc_promisc &&
235 (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
236 /* We have to add a special handler for multicast as the
237 * BMC may have an all-multi rule already in place. As such
238 * adding a rule ourselves won't do any good so we will have
239 * to modify the rules for the ALL MULTI below if the BMC
240 * already has the rule in place.
241 */
242 if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
243 mac_addr->state != FBNIC_TCAM_S_VALID) {
244 eth_zero_addr(mac_addr->value.addr8);
245 eth_broadcast_addr(mac_addr->mask.addr8);
246 mac_addr->value.addr8[0] ^= 1;
247 mac_addr->mask.addr8[0] ^= 1;
248 set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
249 mac_addr->act_tcam);
250 clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
251 mac_addr->act_tcam);
252 mac_addr->state = FBNIC_TCAM_S_ADD;
253 }
254 } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
255 if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
256 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
257 mac_addr->act_tcam);
258 clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
259 mac_addr->act_tcam);
260 } else {
261 mac_addr->state = FBNIC_TCAM_S_DELETE;
262 }
263 }
264
265 /* Add rules for BMC all multicast if it is enabled */
266 fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
267
268 /* Sift out any unshared BMC rules and place them in BMC only section */
269 fbnic_sift_macda(fbd);
270
271 /* Write updates to hardware */
272 fbnic_write_rules(fbd);
273 fbnic_write_macda(fbd);
274 fbnic_write_tce_tcam(fbd);
275 }
276
fbnic_set_rx_mode(struct net_device * netdev)277 static void fbnic_set_rx_mode(struct net_device *netdev)
278 {
279 /* No need to update the hardware if we are not running */
280 if (netif_running(netdev))
281 __fbnic_set_rx_mode(netdev);
282 }
283
fbnic_set_mac(struct net_device * netdev,void * p)284 static int fbnic_set_mac(struct net_device *netdev, void *p)
285 {
286 struct sockaddr *addr = p;
287
288 if (!is_valid_ether_addr(addr->sa_data))
289 return -EADDRNOTAVAIL;
290
291 eth_hw_addr_set(netdev, addr->sa_data);
292
293 fbnic_set_rx_mode(netdev);
294
295 return 0;
296 }
297
fbnic_clear_rx_mode(struct net_device * netdev)298 void fbnic_clear_rx_mode(struct net_device *netdev)
299 {
300 struct fbnic_net *fbn = netdev_priv(netdev);
301 struct fbnic_dev *fbd = fbn->fbd;
302 int idx;
303
304 for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
305 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
306
307 if (mac_addr->state != FBNIC_TCAM_S_VALID)
308 continue;
309
310 bitmap_clear(mac_addr->act_tcam,
311 FBNIC_MAC_ADDR_T_HOST_START,
312 FBNIC_MAC_ADDR_T_HOST_LEN);
313
314 if (bitmap_empty(mac_addr->act_tcam,
315 FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
316 mac_addr->state = FBNIC_TCAM_S_DELETE;
317 }
318
319 /* Write updates to hardware */
320 fbnic_write_macda(fbd);
321
322 __dev_uc_unsync(netdev, NULL);
323 __dev_mc_unsync(netdev, NULL);
324 }
325
fbnic_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)326 static int fbnic_hwtstamp_get(struct net_device *netdev,
327 struct kernel_hwtstamp_config *config)
328 {
329 struct fbnic_net *fbn = netdev_priv(netdev);
330
331 *config = fbn->hwtstamp_config;
332
333 return 0;
334 }
335
fbnic_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)336 static int fbnic_hwtstamp_set(struct net_device *netdev,
337 struct kernel_hwtstamp_config *config,
338 struct netlink_ext_ack *extack)
339 {
340 struct fbnic_net *fbn = netdev_priv(netdev);
341 int old_rx_filter;
342
343 if (config->source != HWTSTAMP_SOURCE_NETDEV)
344 return -EOPNOTSUPP;
345
346 if (!kernel_hwtstamp_config_changed(config, &fbn->hwtstamp_config))
347 return 0;
348
349 /* Upscale the filters */
350 switch (config->rx_filter) {
351 case HWTSTAMP_FILTER_NONE:
352 case HWTSTAMP_FILTER_ALL:
353 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
354 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
355 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
356 case HWTSTAMP_FILTER_PTP_V2_EVENT:
357 break;
358 case HWTSTAMP_FILTER_NTP_ALL:
359 config->rx_filter = HWTSTAMP_FILTER_ALL;
360 break;
361 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
362 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
363 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
364 break;
365 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
366 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
367 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
368 break;
369 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
370 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
371 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
372 break;
373 case HWTSTAMP_FILTER_PTP_V2_SYNC:
374 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
375 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
376 break;
377 default:
378 return -ERANGE;
379 }
380
381 /* Configure */
382 old_rx_filter = fbn->hwtstamp_config.rx_filter;
383 memcpy(&fbn->hwtstamp_config, config, sizeof(*config));
384
385 if (old_rx_filter != config->rx_filter && netif_running(fbn->netdev)) {
386 fbnic_rss_reinit(fbn->fbd, fbn);
387 fbnic_write_rules(fbn->fbd);
388 }
389
390 /* Save / report back filter configuration
391 * Note that our filter configuration is inexact. Instead of
392 * filtering for a specific UDP port or L2 Ethertype we are
393 * filtering in all UDP or all non-IP packets for timestamping. So
394 * if anything other than FILTER_ALL is requested we report
395 * FILTER_SOME indicating that we will be timestamping a few
396 * additional packets.
397 */
398 if (config->rx_filter > HWTSTAMP_FILTER_ALL)
399 config->rx_filter = HWTSTAMP_FILTER_SOME;
400
401 return 0;
402 }
403
fbnic_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats64)404 static void fbnic_get_stats64(struct net_device *dev,
405 struct rtnl_link_stats64 *stats64)
406 {
407 u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
408 u64 tx_bytes, tx_packets, tx_dropped = 0;
409 struct fbnic_net *fbn = netdev_priv(dev);
410 struct fbnic_dev *fbd = fbn->fbd;
411 struct fbnic_queue_stats *stats;
412 u64 rx_over = 0, rx_missed = 0;
413 unsigned int start, i;
414
415 fbnic_get_hw_stats(fbd);
416
417 stats = &fbn->tx_stats;
418
419 tx_bytes = stats->bytes;
420 tx_packets = stats->packets;
421 tx_dropped = stats->dropped;
422
423 stats64->tx_bytes = tx_bytes;
424 stats64->tx_packets = tx_packets;
425 stats64->tx_dropped = tx_dropped;
426
427 /* Record drops from Tx HW Datapath */
428 tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
429 fbd->hw_stats.tti.cm_drop.frames.value +
430 fbd->hw_stats.tti.frame_drop.frames.value +
431 fbd->hw_stats.tti.tbi_drop.frames.value;
432
433 for (i = 0; i < fbn->num_tx_queues; i++) {
434 struct fbnic_ring *txr = fbn->tx[i];
435
436 if (!txr)
437 continue;
438
439 stats = &txr->stats;
440 do {
441 start = u64_stats_fetch_begin(&stats->syncp);
442 tx_bytes = stats->bytes;
443 tx_packets = stats->packets;
444 tx_dropped = stats->dropped;
445 } while (u64_stats_fetch_retry(&stats->syncp, start));
446
447 stats64->tx_bytes += tx_bytes;
448 stats64->tx_packets += tx_packets;
449 stats64->tx_dropped += tx_dropped;
450 }
451
452 stats = &fbn->rx_stats;
453
454 rx_bytes = stats->bytes;
455 rx_packets = stats->packets;
456 rx_dropped = stats->dropped;
457
458 spin_lock(&fbd->hw_stats_lock);
459 /* Record drops for the host FIFOs.
460 * 4: network to Host, 6: BMC to Host
461 * Exclude the BMC and MC FIFOs as those stats may contain drops
462 * due to unrelated items such as TCAM misses. They are still
463 * accessible through the ethtool stats.
464 */
465 i = FBNIC_RXB_FIFO_HOST;
466 rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
467 i = FBNIC_RXB_FIFO_BMC_TO_HOST;
468 rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
469
470 for (i = 0; i < fbd->max_num_queues; i++) {
471 /* Report packets dropped due to CQ/BDQ being full/empty */
472 rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value;
473 rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value;
474
475 /* Report packets with errors */
476 rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
477 }
478 spin_unlock(&fbd->hw_stats_lock);
479
480 stats64->rx_bytes = rx_bytes;
481 stats64->rx_packets = rx_packets;
482 stats64->rx_dropped = rx_dropped;
483 stats64->rx_over_errors = rx_over;
484 stats64->rx_errors = rx_errors;
485 stats64->rx_missed_errors = rx_missed;
486
487 for (i = 0; i < fbn->num_rx_queues; i++) {
488 struct fbnic_ring *rxr = fbn->rx[i];
489
490 if (!rxr)
491 continue;
492
493 stats = &rxr->stats;
494 do {
495 start = u64_stats_fetch_begin(&stats->syncp);
496 rx_bytes = stats->bytes;
497 rx_packets = stats->packets;
498 rx_dropped = stats->dropped;
499 } while (u64_stats_fetch_retry(&stats->syncp, start));
500
501 stats64->rx_bytes += rx_bytes;
502 stats64->rx_packets += rx_packets;
503 stats64->rx_dropped += rx_dropped;
504 }
505 }
506
507 static const struct net_device_ops fbnic_netdev_ops = {
508 .ndo_open = fbnic_open,
509 .ndo_stop = fbnic_stop,
510 .ndo_validate_addr = eth_validate_addr,
511 .ndo_start_xmit = fbnic_xmit_frame,
512 .ndo_features_check = fbnic_features_check,
513 .ndo_set_mac_address = fbnic_set_mac,
514 .ndo_set_rx_mode = fbnic_set_rx_mode,
515 .ndo_get_stats64 = fbnic_get_stats64,
516 .ndo_hwtstamp_get = fbnic_hwtstamp_get,
517 .ndo_hwtstamp_set = fbnic_hwtstamp_set,
518 };
519
fbnic_get_queue_stats_rx(struct net_device * dev,int idx,struct netdev_queue_stats_rx * rx)520 static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
521 struct netdev_queue_stats_rx *rx)
522 {
523 struct fbnic_net *fbn = netdev_priv(dev);
524 struct fbnic_ring *rxr = fbn->rx[idx];
525 struct fbnic_dev *fbd = fbn->fbd;
526 struct fbnic_queue_stats *stats;
527 u64 bytes, packets, alloc_fail;
528 u64 csum_complete, csum_none;
529 unsigned int start;
530
531 if (!rxr)
532 return;
533
534 stats = &rxr->stats;
535 do {
536 start = u64_stats_fetch_begin(&stats->syncp);
537 bytes = stats->bytes;
538 packets = stats->packets;
539 alloc_fail = stats->rx.alloc_failed;
540 csum_complete = stats->rx.csum_complete;
541 csum_none = stats->rx.csum_none;
542 } while (u64_stats_fetch_retry(&stats->syncp, start));
543
544 rx->bytes = bytes;
545 rx->packets = packets;
546 rx->alloc_fail = alloc_fail;
547 rx->csum_complete = csum_complete;
548 rx->csum_none = csum_none;
549
550 fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
551
552 spin_lock(&fbd->hw_stats_lock);
553 rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
554 fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
555 rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
556 rx->hw_drop_overruns;
557 spin_unlock(&fbd->hw_stats_lock);
558 }
559
fbnic_get_queue_stats_tx(struct net_device * dev,int idx,struct netdev_queue_stats_tx * tx)560 static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
561 struct netdev_queue_stats_tx *tx)
562 {
563 struct fbnic_net *fbn = netdev_priv(dev);
564 struct fbnic_ring *txr = fbn->tx[idx];
565 struct fbnic_queue_stats *stats;
566 u64 stop, wake, csum, lso;
567 unsigned int start;
568 u64 bytes, packets;
569
570 if (!txr)
571 return;
572
573 stats = &txr->stats;
574 do {
575 start = u64_stats_fetch_begin(&stats->syncp);
576 bytes = stats->bytes;
577 packets = stats->packets;
578 csum = stats->twq.csum_partial;
579 lso = stats->twq.lso;
580 stop = stats->twq.stop;
581 wake = stats->twq.wake;
582 } while (u64_stats_fetch_retry(&stats->syncp, start));
583
584 tx->bytes = bytes;
585 tx->packets = packets;
586 tx->needs_csum = csum + lso;
587 tx->hw_gso_wire_packets = lso;
588 tx->stop = stop;
589 tx->wake = wake;
590 }
591
fbnic_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)592 static void fbnic_get_base_stats(struct net_device *dev,
593 struct netdev_queue_stats_rx *rx,
594 struct netdev_queue_stats_tx *tx)
595 {
596 struct fbnic_net *fbn = netdev_priv(dev);
597
598 tx->bytes = fbn->tx_stats.bytes;
599 tx->packets = fbn->tx_stats.packets;
600 tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso;
601 tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso;
602 tx->stop = fbn->tx_stats.twq.stop;
603 tx->wake = fbn->tx_stats.twq.wake;
604
605 rx->bytes = fbn->rx_stats.bytes;
606 rx->packets = fbn->rx_stats.packets;
607 rx->alloc_fail = fbn->rx_stats.rx.alloc_failed;
608 rx->csum_complete = fbn->rx_stats.rx.csum_complete;
609 rx->csum_none = fbn->rx_stats.rx.csum_none;
610 }
611
612 static const struct netdev_stat_ops fbnic_stat_ops = {
613 .get_queue_stats_rx = fbnic_get_queue_stats_rx,
614 .get_queue_stats_tx = fbnic_get_queue_stats_tx,
615 .get_base_stats = fbnic_get_base_stats,
616 };
617
fbnic_reset_queues(struct fbnic_net * fbn,unsigned int tx,unsigned int rx)618 void fbnic_reset_queues(struct fbnic_net *fbn,
619 unsigned int tx, unsigned int rx)
620 {
621 struct fbnic_dev *fbd = fbn->fbd;
622 unsigned int max_napis;
623
624 max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
625
626 tx = min(tx, max_napis);
627 fbn->num_tx_queues = tx;
628
629 rx = min(rx, max_napis);
630 fbn->num_rx_queues = rx;
631
632 fbn->num_napi = max(tx, rx);
633 }
634
635 /**
636 * fbnic_netdev_free - Free the netdev associate with fbnic
637 * @fbd: Driver specific structure to free netdev from
638 *
639 * Allocate and initialize the netdev and netdev private structure. Bind
640 * together the hardware, netdev, and pci data structures.
641 **/
fbnic_netdev_free(struct fbnic_dev * fbd)642 void fbnic_netdev_free(struct fbnic_dev *fbd)
643 {
644 struct fbnic_net *fbn = netdev_priv(fbd->netdev);
645
646 if (fbn->phylink)
647 phylink_destroy(fbn->phylink);
648
649 free_netdev(fbd->netdev);
650 fbd->netdev = NULL;
651 }
652
653 /**
654 * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic
655 * @fbd: Driver specific structure to associate netdev with
656 *
657 * Allocate and initialize the netdev and netdev private structure. Bind
658 * together the hardware, netdev, and pci data structures.
659 *
660 * Return: Pointer to net_device on success, NULL on failure
661 **/
fbnic_netdev_alloc(struct fbnic_dev * fbd)662 struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
663 {
664 struct net_device *netdev;
665 struct fbnic_net *fbn;
666 int default_queues;
667
668 netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS);
669 if (!netdev)
670 return NULL;
671
672 SET_NETDEV_DEV(netdev, fbd->dev);
673 fbd->netdev = netdev;
674
675 netdev->netdev_ops = &fbnic_netdev_ops;
676 netdev->stat_ops = &fbnic_stat_ops;
677
678 fbnic_set_ethtool_ops(netdev);
679
680 fbn = netdev_priv(netdev);
681
682 fbn->netdev = netdev;
683 fbn->fbd = fbd;
684
685 fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
686 fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
687 fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
688 fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
689
690 fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT;
691 fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
692 fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
693
694 default_queues = netif_get_num_default_rss_queues();
695 if (default_queues > fbd->max_num_queues)
696 default_queues = fbd->max_num_queues;
697
698 fbnic_reset_queues(fbn, default_queues, default_queues);
699
700 fbnic_reset_indir_tbl(fbn);
701 fbnic_rss_key_fill(fbn->rss_key);
702 fbnic_rss_init_en_mask(fbn);
703
704 netdev->priv_flags |= IFF_UNICAST_FLT;
705
706 netdev->gso_partial_features =
707 NETIF_F_GSO_GRE |
708 NETIF_F_GSO_GRE_CSUM |
709 NETIF_F_GSO_IPXIP4 |
710 NETIF_F_GSO_UDP_TUNNEL |
711 NETIF_F_GSO_UDP_TUNNEL_CSUM;
712
713 netdev->features |=
714 netdev->gso_partial_features |
715 FBNIC_TUN_GSO_FEATURES |
716 NETIF_F_RXHASH |
717 NETIF_F_SG |
718 NETIF_F_HW_CSUM |
719 NETIF_F_RXCSUM |
720 NETIF_F_TSO |
721 NETIF_F_TSO_ECN |
722 NETIF_F_TSO6 |
723 NETIF_F_GSO_PARTIAL |
724 NETIF_F_GSO_UDP_L4;
725
726 netdev->hw_features |= netdev->features;
727 netdev->vlan_features |= netdev->features;
728 netdev->hw_enc_features |= netdev->features;
729 netdev->features |= NETIF_F_NTUPLE;
730
731 netdev->min_mtu = IPV6_MIN_MTU;
732 netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
733
734 /* TBD: This is workaround for BMC as phylink doesn't have support
735 * for leavling the link enabled if a BMC is present.
736 */
737 netdev->ethtool->wol_enabled = true;
738
739 fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS;
740 fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2;
741 netif_carrier_off(netdev);
742
743 netif_tx_stop_all_queues(netdev);
744
745 if (fbnic_phylink_init(netdev)) {
746 fbnic_netdev_free(fbd);
747 return NULL;
748 }
749
750 return netdev;
751 }
752
fbnic_dsn_to_mac_addr(u64 dsn,char * addr)753 static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr)
754 {
755 addr[0] = (dsn >> 56) & 0xFF;
756 addr[1] = (dsn >> 48) & 0xFF;
757 addr[2] = (dsn >> 40) & 0xFF;
758 addr[3] = (dsn >> 16) & 0xFF;
759 addr[4] = (dsn >> 8) & 0xFF;
760 addr[5] = dsn & 0xFF;
761
762 return is_valid_ether_addr(addr) ? 0 : -EINVAL;
763 }
764
765 /**
766 * fbnic_netdev_register - Initialize general software structures
767 * @netdev: Netdev containing structure to initialize and register
768 *
769 * Initialize the MAC address for the netdev and register it.
770 *
771 * Return: 0 on success, negative on failure
772 **/
fbnic_netdev_register(struct net_device * netdev)773 int fbnic_netdev_register(struct net_device *netdev)
774 {
775 struct fbnic_net *fbn = netdev_priv(netdev);
776 struct fbnic_dev *fbd = fbn->fbd;
777 u64 dsn = fbd->dsn;
778 u8 addr[ETH_ALEN];
779 int err;
780
781 err = fbnic_dsn_to_mac_addr(dsn, addr);
782 if (!err) {
783 ether_addr_copy(netdev->perm_addr, addr);
784 eth_hw_addr_set(netdev, addr);
785 } else {
786 /* A randomly assigned MAC address will cause provisioning
787 * issues so instead just fail to spawn the netdev and
788 * avoid any confusion.
789 */
790 dev_err(fbd->dev, "MAC addr %pM invalid\n", addr);
791 return err;
792 }
793
794 return register_netdev(netdev);
795 }
796
fbnic_netdev_unregister(struct net_device * netdev)797 void fbnic_netdev_unregister(struct net_device *netdev)
798 {
799 unregister_netdev(netdev);
800 }
801