xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c (revision a0285236ab93fdfdd1008afaa04561d142d6c276)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/ipv6.h>
6 #include <linux/types.h>
7 #include <net/netdev_queues.h>
8 
9 #include "fbnic.h"
10 #include "fbnic_netdev.h"
11 #include "fbnic_txrx.h"
12 
13 int __fbnic_open(struct fbnic_net *fbn)
14 {
15 	struct fbnic_dev *fbd = fbn->fbd;
16 	int err;
17 
18 	err = fbnic_alloc_napi_vectors(fbn);
19 	if (err)
20 		return err;
21 
22 	err = fbnic_alloc_resources(fbn);
23 	if (err)
24 		goto free_napi_vectors;
25 
26 	err = fbnic_set_netif_queues(fbn);
27 	if (err)
28 		goto free_resources;
29 
30 	/* Send ownership message and flush to verify FW has seen it */
31 	err = fbnic_fw_xmit_ownership_msg(fbd, true);
32 	if (err) {
33 		dev_warn(fbd->dev,
34 			 "Error %d sending host ownership message to the firmware\n",
35 			 err);
36 		goto free_resources;
37 	}
38 
39 	err = fbnic_time_start(fbn);
40 	if (err)
41 		goto release_ownership;
42 
43 	err = fbnic_fw_init_heartbeat(fbd, false);
44 	if (err)
45 		goto time_stop;
46 
47 	err = fbnic_pcs_irq_enable(fbd);
48 	if (err)
49 		goto time_stop;
50 	/* Pull the BMC config and initialize the RPC */
51 	fbnic_bmc_rpc_init(fbd);
52 	fbnic_rss_reinit(fbd, fbn);
53 
54 	return 0;
55 time_stop:
56 	fbnic_time_stop(fbn);
57 release_ownership:
58 	fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
59 free_resources:
60 	fbnic_free_resources(fbn);
61 free_napi_vectors:
62 	fbnic_free_napi_vectors(fbn);
63 	return err;
64 }
65 
66 static int fbnic_open(struct net_device *netdev)
67 {
68 	struct fbnic_net *fbn = netdev_priv(netdev);
69 	int err;
70 
71 	fbnic_napi_name_irqs(fbn->fbd);
72 
73 	err = __fbnic_open(fbn);
74 	if (!err)
75 		fbnic_up(fbn);
76 
77 	return err;
78 }
79 
80 static int fbnic_stop(struct net_device *netdev)
81 {
82 	struct fbnic_net *fbn = netdev_priv(netdev);
83 
84 	fbnic_down(fbn);
85 	fbnic_pcs_irq_disable(fbn->fbd);
86 
87 	fbnic_time_stop(fbn);
88 	fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
89 
90 	fbnic_reset_netif_queues(fbn);
91 	fbnic_free_resources(fbn);
92 	fbnic_free_napi_vectors(fbn);
93 
94 	return 0;
95 }
96 
97 static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr)
98 {
99 	struct fbnic_net *fbn = netdev_priv(netdev);
100 	struct fbnic_mac_addr *avail_addr;
101 
102 	if (WARN_ON(!is_valid_ether_addr(addr)))
103 		return -EADDRNOTAVAIL;
104 
105 	avail_addr = __fbnic_uc_sync(fbn->fbd, addr);
106 	if (!avail_addr)
107 		return -ENOSPC;
108 
109 	/* Add type flag indicating this address is in use by the host */
110 	set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam);
111 
112 	return 0;
113 }
114 
115 static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr)
116 {
117 	struct fbnic_net *fbn = netdev_priv(netdev);
118 	struct fbnic_dev *fbd = fbn->fbd;
119 	int i, ret;
120 
121 	/* Scan from middle of list to bottom, filling bottom up.
122 	 * Skip the first entry which is reserved for dev_addr and
123 	 * leave the last entry to use for promiscuous filtering.
124 	 */
125 	for (i = fbd->mac_addr_boundary, ret = -ENOENT;
126 	     i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) {
127 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
128 
129 		if (!ether_addr_equal(mac_addr->value.addr8, addr))
130 			continue;
131 
132 		ret = __fbnic_uc_unsync(mac_addr);
133 	}
134 
135 	return ret;
136 }
137 
138 static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr)
139 {
140 	struct fbnic_net *fbn = netdev_priv(netdev);
141 	struct fbnic_mac_addr *avail_addr;
142 
143 	if (WARN_ON(!is_multicast_ether_addr(addr)))
144 		return -EADDRNOTAVAIL;
145 
146 	avail_addr = __fbnic_mc_sync(fbn->fbd, addr);
147 	if (!avail_addr)
148 		return -ENOSPC;
149 
150 	/* Add type flag indicating this address is in use by the host */
151 	set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam);
152 
153 	return 0;
154 }
155 
156 static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
157 {
158 	struct fbnic_net *fbn = netdev_priv(netdev);
159 	struct fbnic_dev *fbd = fbn->fbd;
160 	int i, ret;
161 
162 	/* Scan from middle of list to top, filling top down.
163 	 * Skip over the address reserved for the BMC MAC and
164 	 * exclude index 0 as that belongs to the broadcast address
165 	 */
166 	for (i = fbd->mac_addr_boundary, ret = -ENOENT;
167 	     --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) {
168 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
169 
170 		if (!ether_addr_equal(mac_addr->value.addr8, addr))
171 			continue;
172 
173 		ret = __fbnic_mc_unsync(mac_addr);
174 	}
175 
176 	return ret;
177 }
178 
179 void __fbnic_set_rx_mode(struct net_device *netdev)
180 {
181 	struct fbnic_net *fbn = netdev_priv(netdev);
182 	bool uc_promisc = false, mc_promisc = false;
183 	struct fbnic_dev *fbd = fbn->fbd;
184 	struct fbnic_mac_addr *mac_addr;
185 	int err;
186 
187 	/* Populate host address from dev_addr */
188 	mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX];
189 	if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) ||
190 	    mac_addr->state != FBNIC_TCAM_S_VALID) {
191 		ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr);
192 		mac_addr->state = FBNIC_TCAM_S_UPDATE;
193 		set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam);
194 	}
195 
196 	/* Populate broadcast address if broadcast is enabled */
197 	mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
198 	if (netdev->flags & IFF_BROADCAST) {
199 		if (!is_broadcast_ether_addr(mac_addr->value.addr8) ||
200 		    mac_addr->state != FBNIC_TCAM_S_VALID) {
201 			eth_broadcast_addr(mac_addr->value.addr8);
202 			mac_addr->state = FBNIC_TCAM_S_ADD;
203 		}
204 		set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam);
205 	} else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
206 		__fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST);
207 	}
208 
209 	/* Synchronize unicast and multicast address lists */
210 	err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync);
211 	if (err == -ENOSPC)
212 		uc_promisc = true;
213 	err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync);
214 	if (err == -ENOSPC)
215 		mc_promisc = true;
216 
217 	uc_promisc |= !!(netdev->flags & IFF_PROMISC);
218 	mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
219 
220 	/* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
221 	mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
222 	if (uc_promisc) {
223 		if (!is_zero_ether_addr(mac_addr->value.addr8) ||
224 		    mac_addr->state != FBNIC_TCAM_S_VALID) {
225 			eth_zero_addr(mac_addr->value.addr8);
226 			eth_broadcast_addr(mac_addr->mask.addr8);
227 			clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
228 				  mac_addr->act_tcam);
229 			set_bit(FBNIC_MAC_ADDR_T_PROMISC,
230 				mac_addr->act_tcam);
231 			mac_addr->state = FBNIC_TCAM_S_ADD;
232 		}
233 	} else if (mc_promisc &&
234 		   (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
235 		/* We have to add a special handler for multicast as the
236 		 * BMC may have an all-multi rule already in place. As such
237 		 * adding a rule ourselves won't do any good so we will have
238 		 * to modify the rules for the ALL MULTI below if the BMC
239 		 * already has the rule in place.
240 		 */
241 		if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
242 		    mac_addr->state != FBNIC_TCAM_S_VALID) {
243 			eth_zero_addr(mac_addr->value.addr8);
244 			eth_broadcast_addr(mac_addr->mask.addr8);
245 			mac_addr->value.addr8[0] ^= 1;
246 			mac_addr->mask.addr8[0] ^= 1;
247 			set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
248 				mac_addr->act_tcam);
249 			clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
250 				  mac_addr->act_tcam);
251 			mac_addr->state = FBNIC_TCAM_S_ADD;
252 		}
253 	} else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
254 		if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
255 			clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
256 				  mac_addr->act_tcam);
257 			clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
258 				  mac_addr->act_tcam);
259 		} else {
260 			mac_addr->state = FBNIC_TCAM_S_DELETE;
261 		}
262 	}
263 
264 	/* Add rules for BMC all multicast if it is enabled */
265 	fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
266 
267 	/* Sift out any unshared BMC rules and place them in BMC only section */
268 	fbnic_sift_macda(fbd);
269 
270 	/* Write updates to hardware */
271 	fbnic_write_rules(fbd);
272 	fbnic_write_macda(fbd);
273 	fbnic_write_tce_tcam(fbd);
274 }
275 
276 static void fbnic_set_rx_mode(struct net_device *netdev)
277 {
278 	/* No need to update the hardware if we are not running */
279 	if (netif_running(netdev))
280 		__fbnic_set_rx_mode(netdev);
281 }
282 
283 static int fbnic_set_mac(struct net_device *netdev, void *p)
284 {
285 	struct sockaddr *addr = p;
286 
287 	if (!is_valid_ether_addr(addr->sa_data))
288 		return -EADDRNOTAVAIL;
289 
290 	eth_hw_addr_set(netdev, addr->sa_data);
291 
292 	fbnic_set_rx_mode(netdev);
293 
294 	return 0;
295 }
296 
297 void fbnic_clear_rx_mode(struct net_device *netdev)
298 {
299 	struct fbnic_net *fbn = netdev_priv(netdev);
300 	struct fbnic_dev *fbd = fbn->fbd;
301 	int idx;
302 
303 	for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
304 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
305 
306 		if (mac_addr->state != FBNIC_TCAM_S_VALID)
307 			continue;
308 
309 		bitmap_clear(mac_addr->act_tcam,
310 			     FBNIC_MAC_ADDR_T_HOST_START,
311 			     FBNIC_MAC_ADDR_T_HOST_LEN);
312 
313 		if (bitmap_empty(mac_addr->act_tcam,
314 				 FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
315 			mac_addr->state = FBNIC_TCAM_S_DELETE;
316 	}
317 
318 	/* Write updates to hardware */
319 	fbnic_write_macda(fbd);
320 
321 	__dev_uc_unsync(netdev, NULL);
322 	__dev_mc_unsync(netdev, NULL);
323 }
324 
325 static int fbnic_hwtstamp_get(struct net_device *netdev,
326 			      struct kernel_hwtstamp_config *config)
327 {
328 	struct fbnic_net *fbn = netdev_priv(netdev);
329 
330 	*config = fbn->hwtstamp_config;
331 
332 	return 0;
333 }
334 
335 static int fbnic_hwtstamp_set(struct net_device *netdev,
336 			      struct kernel_hwtstamp_config *config,
337 			      struct netlink_ext_ack *extack)
338 {
339 	struct fbnic_net *fbn = netdev_priv(netdev);
340 	int old_rx_filter;
341 
342 	if (config->source != HWTSTAMP_SOURCE_NETDEV)
343 		return -EOPNOTSUPP;
344 
345 	if (!kernel_hwtstamp_config_changed(config, &fbn->hwtstamp_config))
346 		return 0;
347 
348 	/* Upscale the filters */
349 	switch (config->rx_filter) {
350 	case HWTSTAMP_FILTER_NONE:
351 	case HWTSTAMP_FILTER_ALL:
352 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
353 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
354 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
355 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
356 		break;
357 	case HWTSTAMP_FILTER_NTP_ALL:
358 		config->rx_filter = HWTSTAMP_FILTER_ALL;
359 		break;
360 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
361 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
362 		config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
363 		break;
364 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
365 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
366 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
367 		break;
368 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
369 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
370 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
371 		break;
372 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
373 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
374 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
375 		break;
376 	default:
377 		return -ERANGE;
378 	}
379 
380 	/* Configure */
381 	old_rx_filter = fbn->hwtstamp_config.rx_filter;
382 	memcpy(&fbn->hwtstamp_config, config, sizeof(*config));
383 
384 	if (old_rx_filter != config->rx_filter && netif_running(fbn->netdev)) {
385 		fbnic_rss_reinit(fbn->fbd, fbn);
386 		fbnic_write_rules(fbn->fbd);
387 	}
388 
389 	/* Save / report back filter configuration
390 	 * Note that our filter configuration is inexact. Instead of
391 	 * filtering for a specific UDP port or L2 Ethertype we are
392 	 * filtering in all UDP or all non-IP packets for timestamping. So
393 	 * if anything other than FILTER_ALL is requested we report
394 	 * FILTER_SOME indicating that we will be timestamping a few
395 	 * additional packets.
396 	 */
397 	if (config->rx_filter > HWTSTAMP_FILTER_ALL)
398 		config->rx_filter = HWTSTAMP_FILTER_SOME;
399 
400 	return 0;
401 }
402 
403 static void fbnic_get_stats64(struct net_device *dev,
404 			      struct rtnl_link_stats64 *stats64)
405 {
406 	u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
407 	u64 tx_bytes, tx_packets, tx_dropped = 0;
408 	struct fbnic_net *fbn = netdev_priv(dev);
409 	struct fbnic_dev *fbd = fbn->fbd;
410 	struct fbnic_queue_stats *stats;
411 	u64 rx_over = 0, rx_missed = 0;
412 	unsigned int start, i;
413 
414 	fbnic_get_hw_stats(fbd);
415 
416 	stats = &fbn->tx_stats;
417 
418 	tx_bytes = stats->bytes;
419 	tx_packets = stats->packets;
420 	tx_dropped = stats->dropped;
421 
422 	stats64->tx_bytes = tx_bytes;
423 	stats64->tx_packets = tx_packets;
424 	stats64->tx_dropped = tx_dropped;
425 
426 	/* Record drops from Tx HW Datapath */
427 	tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
428 		      fbd->hw_stats.tti.frame_drop.frames.value +
429 		      fbd->hw_stats.tti.tbi_drop.frames.value +
430 		      fbd->hw_stats.tmi.drop.frames.value;
431 
432 	for (i = 0; i < fbn->num_tx_queues; i++) {
433 		struct fbnic_ring *txr = fbn->tx[i];
434 
435 		if (!txr)
436 			continue;
437 
438 		stats = &txr->stats;
439 		do {
440 			start = u64_stats_fetch_begin(&stats->syncp);
441 			tx_bytes = stats->bytes;
442 			tx_packets = stats->packets;
443 			tx_dropped = stats->dropped;
444 		} while (u64_stats_fetch_retry(&stats->syncp, start));
445 
446 		stats64->tx_bytes += tx_bytes;
447 		stats64->tx_packets += tx_packets;
448 		stats64->tx_dropped += tx_dropped;
449 	}
450 
451 	stats = &fbn->rx_stats;
452 
453 	rx_bytes = stats->bytes;
454 	rx_packets = stats->packets;
455 	rx_dropped = stats->dropped;
456 
457 	spin_lock(&fbd->hw_stats_lock);
458 	/* Record drops for the host FIFOs.
459 	 * 4: network to Host,	6: BMC to Host
460 	 * Exclude the BMC and MC FIFOs as those stats may contain drops
461 	 * due to unrelated items such as TCAM misses. They are still
462 	 * accessible through the ethtool stats.
463 	 */
464 	i = FBNIC_RXB_FIFO_HOST;
465 	rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
466 	i = FBNIC_RXB_FIFO_BMC_TO_HOST;
467 	rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
468 
469 	for (i = 0; i < fbd->max_num_queues; i++) {
470 		/* Report packets dropped due to CQ/BDQ being full/empty */
471 		rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value;
472 		rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value;
473 
474 		/* Report packets with errors */
475 		rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
476 	}
477 	spin_unlock(&fbd->hw_stats_lock);
478 
479 	stats64->rx_bytes = rx_bytes;
480 	stats64->rx_packets = rx_packets;
481 	stats64->rx_dropped = rx_dropped;
482 	stats64->rx_over_errors = rx_over;
483 	stats64->rx_errors = rx_errors;
484 	stats64->rx_missed_errors = rx_missed;
485 
486 	for (i = 0; i < fbn->num_rx_queues; i++) {
487 		struct fbnic_ring *rxr = fbn->rx[i];
488 
489 		if (!rxr)
490 			continue;
491 
492 		stats = &rxr->stats;
493 		do {
494 			start = u64_stats_fetch_begin(&stats->syncp);
495 			rx_bytes = stats->bytes;
496 			rx_packets = stats->packets;
497 			rx_dropped = stats->dropped;
498 		} while (u64_stats_fetch_retry(&stats->syncp, start));
499 
500 		stats64->rx_bytes += rx_bytes;
501 		stats64->rx_packets += rx_packets;
502 		stats64->rx_dropped += rx_dropped;
503 	}
504 }
505 
506 static const struct net_device_ops fbnic_netdev_ops = {
507 	.ndo_open		= fbnic_open,
508 	.ndo_stop		= fbnic_stop,
509 	.ndo_validate_addr	= eth_validate_addr,
510 	.ndo_start_xmit		= fbnic_xmit_frame,
511 	.ndo_features_check	= fbnic_features_check,
512 	.ndo_set_mac_address	= fbnic_set_mac,
513 	.ndo_set_rx_mode	= fbnic_set_rx_mode,
514 	.ndo_get_stats64	= fbnic_get_stats64,
515 	.ndo_hwtstamp_get	= fbnic_hwtstamp_get,
516 	.ndo_hwtstamp_set	= fbnic_hwtstamp_set,
517 };
518 
519 static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
520 				     struct netdev_queue_stats_rx *rx)
521 {
522 	struct fbnic_net *fbn = netdev_priv(dev);
523 	struct fbnic_ring *rxr = fbn->rx[idx];
524 	struct fbnic_dev *fbd = fbn->fbd;
525 	struct fbnic_queue_stats *stats;
526 	u64 bytes, packets, alloc_fail;
527 	u64 csum_complete, csum_none;
528 	unsigned int start;
529 
530 	if (!rxr)
531 		return;
532 
533 	stats = &rxr->stats;
534 	do {
535 		start = u64_stats_fetch_begin(&stats->syncp);
536 		bytes = stats->bytes;
537 		packets = stats->packets;
538 		alloc_fail = stats->rx.alloc_failed;
539 		csum_complete = stats->rx.csum_complete;
540 		csum_none = stats->rx.csum_none;
541 	} while (u64_stats_fetch_retry(&stats->syncp, start));
542 
543 	rx->bytes = bytes;
544 	rx->packets = packets;
545 	rx->alloc_fail = alloc_fail;
546 	rx->csum_complete = csum_complete;
547 	rx->csum_none = csum_none;
548 
549 	fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
550 
551 	spin_lock(&fbd->hw_stats_lock);
552 	rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
553 			       fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
554 	rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
555 		       rx->hw_drop_overruns;
556 	spin_unlock(&fbd->hw_stats_lock);
557 }
558 
559 static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
560 				     struct netdev_queue_stats_tx *tx)
561 {
562 	struct fbnic_net *fbn = netdev_priv(dev);
563 	struct fbnic_ring *txr = fbn->tx[idx];
564 	struct fbnic_queue_stats *stats;
565 	u64 stop, wake, csum, lso;
566 	unsigned int start;
567 	u64 bytes, packets;
568 
569 	if (!txr)
570 		return;
571 
572 	stats = &txr->stats;
573 	do {
574 		start = u64_stats_fetch_begin(&stats->syncp);
575 		bytes = stats->bytes;
576 		packets = stats->packets;
577 		csum = stats->twq.csum_partial;
578 		lso = stats->twq.lso;
579 		stop = stats->twq.stop;
580 		wake = stats->twq.wake;
581 	} while (u64_stats_fetch_retry(&stats->syncp, start));
582 
583 	tx->bytes = bytes;
584 	tx->packets = packets;
585 	tx->needs_csum = csum + lso;
586 	tx->hw_gso_wire_packets = lso;
587 	tx->stop = stop;
588 	tx->wake = wake;
589 }
590 
591 static void fbnic_get_base_stats(struct net_device *dev,
592 				 struct netdev_queue_stats_rx *rx,
593 				 struct netdev_queue_stats_tx *tx)
594 {
595 	struct fbnic_net *fbn = netdev_priv(dev);
596 
597 	tx->bytes = fbn->tx_stats.bytes;
598 	tx->packets = fbn->tx_stats.packets;
599 	tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso;
600 	tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso;
601 	tx->stop = fbn->tx_stats.twq.stop;
602 	tx->wake = fbn->tx_stats.twq.wake;
603 
604 	rx->bytes = fbn->rx_stats.bytes;
605 	rx->packets = fbn->rx_stats.packets;
606 	rx->alloc_fail = fbn->rx_stats.rx.alloc_failed;
607 	rx->csum_complete = fbn->rx_stats.rx.csum_complete;
608 	rx->csum_none = fbn->rx_stats.rx.csum_none;
609 }
610 
611 static const struct netdev_stat_ops fbnic_stat_ops = {
612 	.get_queue_stats_rx	= fbnic_get_queue_stats_rx,
613 	.get_queue_stats_tx	= fbnic_get_queue_stats_tx,
614 	.get_base_stats		= fbnic_get_base_stats,
615 };
616 
617 void fbnic_reset_queues(struct fbnic_net *fbn,
618 			unsigned int tx, unsigned int rx)
619 {
620 	struct fbnic_dev *fbd = fbn->fbd;
621 	unsigned int max_napis;
622 
623 	max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
624 
625 	tx = min(tx, max_napis);
626 	fbn->num_tx_queues = tx;
627 
628 	rx = min(rx, max_napis);
629 	fbn->num_rx_queues = rx;
630 
631 	fbn->num_napi = max(tx, rx);
632 }
633 
634 /**
635  * fbnic_netdev_free - Free the netdev associate with fbnic
636  * @fbd: Driver specific structure to free netdev from
637  *
638  * Allocate and initialize the netdev and netdev private structure. Bind
639  * together the hardware, netdev, and pci data structures.
640  **/
641 void fbnic_netdev_free(struct fbnic_dev *fbd)
642 {
643 	struct fbnic_net *fbn = netdev_priv(fbd->netdev);
644 
645 	if (fbn->phylink)
646 		phylink_destroy(fbn->phylink);
647 
648 	free_netdev(fbd->netdev);
649 	fbd->netdev = NULL;
650 }
651 
652 /**
653  * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic
654  * @fbd: Driver specific structure to associate netdev with
655  *
656  * Allocate and initialize the netdev and netdev private structure. Bind
657  * together the hardware, netdev, and pci data structures.
658  *
659  *  Return: Pointer to net_device on success, NULL on failure
660  **/
661 struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
662 {
663 	struct net_device *netdev;
664 	struct fbnic_net *fbn;
665 	int default_queues;
666 
667 	netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS);
668 	if (!netdev)
669 		return NULL;
670 
671 	SET_NETDEV_DEV(netdev, fbd->dev);
672 	fbd->netdev = netdev;
673 
674 	netdev->netdev_ops = &fbnic_netdev_ops;
675 	netdev->stat_ops = &fbnic_stat_ops;
676 
677 	fbnic_set_ethtool_ops(netdev);
678 
679 	fbn = netdev_priv(netdev);
680 
681 	fbn->netdev = netdev;
682 	fbn->fbd = fbd;
683 
684 	fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
685 	fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
686 	fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
687 	fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
688 
689 	fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT;
690 	fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
691 	fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
692 
693 	default_queues = netif_get_num_default_rss_queues();
694 	if (default_queues > fbd->max_num_queues)
695 		default_queues = fbd->max_num_queues;
696 
697 	fbnic_reset_queues(fbn, default_queues, default_queues);
698 
699 	fbnic_reset_indir_tbl(fbn);
700 	fbnic_rss_key_fill(fbn->rss_key);
701 	fbnic_rss_init_en_mask(fbn);
702 
703 	netdev->priv_flags |= IFF_UNICAST_FLT;
704 
705 	netdev->gso_partial_features =
706 		NETIF_F_GSO_GRE |
707 		NETIF_F_GSO_GRE_CSUM |
708 		NETIF_F_GSO_IPXIP4 |
709 		NETIF_F_GSO_UDP_TUNNEL |
710 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
711 
712 	netdev->features |=
713 		netdev->gso_partial_features |
714 		FBNIC_TUN_GSO_FEATURES |
715 		NETIF_F_RXHASH |
716 		NETIF_F_SG |
717 		NETIF_F_HW_CSUM |
718 		NETIF_F_RXCSUM |
719 		NETIF_F_TSO |
720 		NETIF_F_TSO_ECN |
721 		NETIF_F_TSO6 |
722 		NETIF_F_GSO_PARTIAL |
723 		NETIF_F_GSO_UDP_L4;
724 
725 	netdev->hw_features |= netdev->features;
726 	netdev->vlan_features |= netdev->features;
727 	netdev->hw_enc_features |= netdev->features;
728 	netdev->features |= NETIF_F_NTUPLE;
729 
730 	netdev->min_mtu = IPV6_MIN_MTU;
731 	netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
732 
733 	/* TBD: This is workaround for BMC as phylink doesn't have support
734 	 * for leavling the link enabled if a BMC is present.
735 	 */
736 	netdev->ethtool->wol_enabled = true;
737 
738 	fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS;
739 	fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2;
740 	netif_carrier_off(netdev);
741 
742 	netif_tx_stop_all_queues(netdev);
743 
744 	if (fbnic_phylink_init(netdev)) {
745 		fbnic_netdev_free(fbd);
746 		return NULL;
747 	}
748 
749 	return netdev;
750 }
751 
752 static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr)
753 {
754 	addr[0] = (dsn >> 56) & 0xFF;
755 	addr[1] = (dsn >> 48) & 0xFF;
756 	addr[2] = (dsn >> 40) & 0xFF;
757 	addr[3] = (dsn >> 16) & 0xFF;
758 	addr[4] = (dsn >> 8) & 0xFF;
759 	addr[5] = dsn & 0xFF;
760 
761 	return is_valid_ether_addr(addr) ? 0 : -EINVAL;
762 }
763 
764 /**
765  * fbnic_netdev_register - Initialize general software structures
766  * @netdev: Netdev containing structure to initialize and register
767  *
768  * Initialize the MAC address for the netdev and register it.
769  *
770  *  Return: 0 on success, negative on failure
771  **/
772 int fbnic_netdev_register(struct net_device *netdev)
773 {
774 	struct fbnic_net *fbn = netdev_priv(netdev);
775 	struct fbnic_dev *fbd = fbn->fbd;
776 	u64 dsn = fbd->dsn;
777 	u8 addr[ETH_ALEN];
778 	int err;
779 
780 	err = fbnic_dsn_to_mac_addr(dsn, addr);
781 	if (!err) {
782 		ether_addr_copy(netdev->perm_addr, addr);
783 		eth_hw_addr_set(netdev, addr);
784 	} else {
785 		/* A randomly assigned MAC address will cause provisioning
786 		 * issues so instead just fail to spawn the netdev and
787 		 * avoid any confusion.
788 		 */
789 		dev_err(fbd->dev, "MAC addr %pM invalid\n", addr);
790 		return err;
791 	}
792 
793 	return register_netdev(netdev);
794 }
795 
796 void fbnic_netdev_unregister(struct net_device *netdev)
797 {
798 	unregister_netdev(netdev);
799 }
800