xref: /linux/drivers/net/ethernet/brocade/bna/bnad_ethtool.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Linux network driver for QLogic BR-series Converged Network Adapter.
4  */
5 /*
6  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7  * Copyright (c) 2014-2015 QLogic Corporation
8  * All rights reserved
9  * www.qlogic.com
10  */
11 
12 #include "cna.h"
13 
14 #include <linux/netdevice.h>
15 #include <linux/skbuff.h>
16 #include <linux/ethtool.h>
17 #include <linux/rtnetlink.h>
18 
19 #include "bna.h"
20 
21 #include "bnad.h"
22 
23 #define BNAD_NUM_TXF_COUNTERS 12
24 #define BNAD_NUM_RXF_COUNTERS 10
25 #define BNAD_NUM_CQ_COUNTERS (3 + 5)
26 #define BNAD_NUM_RXQ_COUNTERS 7
27 #define BNAD_NUM_TXQ_COUNTERS 5
28 
29 static const char *bnad_net_stats_strings[] = {
30 	"rx_packets",
31 	"tx_packets",
32 	"rx_bytes",
33 	"tx_bytes",
34 	"rx_errors",
35 	"tx_errors",
36 	"rx_dropped",
37 	"tx_dropped",
38 	"multicast",
39 	"collisions",
40 	"rx_length_errors",
41 	"rx_crc_errors",
42 	"rx_frame_errors",
43 	"tx_fifo_errors",
44 
45 	"netif_queue_stop",
46 	"netif_queue_wakeup",
47 	"netif_queue_stopped",
48 	"tso4",
49 	"tso6",
50 	"tso_err",
51 	"tcpcsum_offload",
52 	"udpcsum_offload",
53 	"csum_help",
54 	"tx_skb_too_short",
55 	"tx_skb_stopping",
56 	"tx_skb_max_vectors",
57 	"tx_skb_mss_too_long",
58 	"tx_skb_tso_too_short",
59 	"tx_skb_tso_prepare",
60 	"tx_skb_non_tso_too_long",
61 	"tx_skb_tcp_hdr",
62 	"tx_skb_udp_hdr",
63 	"tx_skb_csum_err",
64 	"tx_skb_headlen_too_long",
65 	"tx_skb_headlen_zero",
66 	"tx_skb_frag_zero",
67 	"tx_skb_len_mismatch",
68 	"tx_skb_map_failed",
69 	"hw_stats_updates",
70 	"netif_rx_dropped",
71 
72 	"link_toggle",
73 	"cee_toggle",
74 
75 	"rxp_info_alloc_failed",
76 	"mbox_intr_disabled",
77 	"mbox_intr_enabled",
78 	"tx_unmap_q_alloc_failed",
79 	"rx_unmap_q_alloc_failed",
80 	"rxbuf_alloc_failed",
81 	"rxbuf_map_failed",
82 
83 	"mac_stats_clr_cnt",
84 	"mac_frame_64",
85 	"mac_frame_65_127",
86 	"mac_frame_128_255",
87 	"mac_frame_256_511",
88 	"mac_frame_512_1023",
89 	"mac_frame_1024_1518",
90 	"mac_frame_1518_1522",
91 	"mac_rx_bytes",
92 	"mac_rx_packets",
93 	"mac_rx_fcs_error",
94 	"mac_rx_multicast",
95 	"mac_rx_broadcast",
96 	"mac_rx_control_frames",
97 	"mac_rx_pause",
98 	"mac_rx_unknown_opcode",
99 	"mac_rx_alignment_error",
100 	"mac_rx_frame_length_error",
101 	"mac_rx_code_error",
102 	"mac_rx_carrier_sense_error",
103 	"mac_rx_undersize",
104 	"mac_rx_oversize",
105 	"mac_rx_fragments",
106 	"mac_rx_jabber",
107 	"mac_rx_drop",
108 
109 	"mac_tx_bytes",
110 	"mac_tx_packets",
111 	"mac_tx_multicast",
112 	"mac_tx_broadcast",
113 	"mac_tx_pause",
114 	"mac_tx_deferral",
115 	"mac_tx_excessive_deferral",
116 	"mac_tx_single_collision",
117 	"mac_tx_muliple_collision",
118 	"mac_tx_late_collision",
119 	"mac_tx_excessive_collision",
120 	"mac_tx_total_collision",
121 	"mac_tx_pause_honored",
122 	"mac_tx_drop",
123 	"mac_tx_jabber",
124 	"mac_tx_fcs_error",
125 	"mac_tx_control_frame",
126 	"mac_tx_oversize",
127 	"mac_tx_undersize",
128 	"mac_tx_fragments",
129 
130 	"bpc_tx_pause_0",
131 	"bpc_tx_pause_1",
132 	"bpc_tx_pause_2",
133 	"bpc_tx_pause_3",
134 	"bpc_tx_pause_4",
135 	"bpc_tx_pause_5",
136 	"bpc_tx_pause_6",
137 	"bpc_tx_pause_7",
138 	"bpc_tx_zero_pause_0",
139 	"bpc_tx_zero_pause_1",
140 	"bpc_tx_zero_pause_2",
141 	"bpc_tx_zero_pause_3",
142 	"bpc_tx_zero_pause_4",
143 	"bpc_tx_zero_pause_5",
144 	"bpc_tx_zero_pause_6",
145 	"bpc_tx_zero_pause_7",
146 	"bpc_tx_first_pause_0",
147 	"bpc_tx_first_pause_1",
148 	"bpc_tx_first_pause_2",
149 	"bpc_tx_first_pause_3",
150 	"bpc_tx_first_pause_4",
151 	"bpc_tx_first_pause_5",
152 	"bpc_tx_first_pause_6",
153 	"bpc_tx_first_pause_7",
154 
155 	"bpc_rx_pause_0",
156 	"bpc_rx_pause_1",
157 	"bpc_rx_pause_2",
158 	"bpc_rx_pause_3",
159 	"bpc_rx_pause_4",
160 	"bpc_rx_pause_5",
161 	"bpc_rx_pause_6",
162 	"bpc_rx_pause_7",
163 	"bpc_rx_zero_pause_0",
164 	"bpc_rx_zero_pause_1",
165 	"bpc_rx_zero_pause_2",
166 	"bpc_rx_zero_pause_3",
167 	"bpc_rx_zero_pause_4",
168 	"bpc_rx_zero_pause_5",
169 	"bpc_rx_zero_pause_6",
170 	"bpc_rx_zero_pause_7",
171 	"bpc_rx_first_pause_0",
172 	"bpc_rx_first_pause_1",
173 	"bpc_rx_first_pause_2",
174 	"bpc_rx_first_pause_3",
175 	"bpc_rx_first_pause_4",
176 	"bpc_rx_first_pause_5",
177 	"bpc_rx_first_pause_6",
178 	"bpc_rx_first_pause_7",
179 
180 	"rad_rx_frames",
181 	"rad_rx_octets",
182 	"rad_rx_vlan_frames",
183 	"rad_rx_ucast",
184 	"rad_rx_ucast_octets",
185 	"rad_rx_ucast_vlan",
186 	"rad_rx_mcast",
187 	"rad_rx_mcast_octets",
188 	"rad_rx_mcast_vlan",
189 	"rad_rx_bcast",
190 	"rad_rx_bcast_octets",
191 	"rad_rx_bcast_vlan",
192 	"rad_rx_drops",
193 
194 	"rlb_rad_rx_frames",
195 	"rlb_rad_rx_octets",
196 	"rlb_rad_rx_vlan_frames",
197 	"rlb_rad_rx_ucast",
198 	"rlb_rad_rx_ucast_octets",
199 	"rlb_rad_rx_ucast_vlan",
200 	"rlb_rad_rx_mcast",
201 	"rlb_rad_rx_mcast_octets",
202 	"rlb_rad_rx_mcast_vlan",
203 	"rlb_rad_rx_bcast",
204 	"rlb_rad_rx_bcast_octets",
205 	"rlb_rad_rx_bcast_vlan",
206 	"rlb_rad_rx_drops",
207 
208 	"fc_rx_ucast_octets",
209 	"fc_rx_ucast",
210 	"fc_rx_ucast_vlan",
211 	"fc_rx_mcast_octets",
212 	"fc_rx_mcast",
213 	"fc_rx_mcast_vlan",
214 	"fc_rx_bcast_octets",
215 	"fc_rx_bcast",
216 	"fc_rx_bcast_vlan",
217 
218 	"fc_tx_ucast_octets",
219 	"fc_tx_ucast",
220 	"fc_tx_ucast_vlan",
221 	"fc_tx_mcast_octets",
222 	"fc_tx_mcast",
223 	"fc_tx_mcast_vlan",
224 	"fc_tx_bcast_octets",
225 	"fc_tx_bcast",
226 	"fc_tx_bcast_vlan",
227 	"fc_tx_parity_errors",
228 	"fc_tx_timeout",
229 	"fc_tx_fid_parity_errors",
230 };
231 
232 #define BNAD_ETHTOOL_STATS_NUM	ARRAY_SIZE(bnad_net_stats_strings)
233 
234 static int
235 bnad_get_link_ksettings(struct net_device *netdev,
236 			struct ethtool_link_ksettings *cmd)
237 {
238 	u32 supported, advertising;
239 
240 	supported = SUPPORTED_10000baseT_Full;
241 	advertising = ADVERTISED_10000baseT_Full;
242 	cmd->base.autoneg = AUTONEG_DISABLE;
243 	supported |= SUPPORTED_FIBRE;
244 	advertising |= ADVERTISED_FIBRE;
245 	cmd->base.port = PORT_FIBRE;
246 	cmd->base.phy_address = 0;
247 
248 	if (netif_carrier_ok(netdev)) {
249 		cmd->base.speed = SPEED_10000;
250 		cmd->base.duplex = DUPLEX_FULL;
251 	} else {
252 		cmd->base.speed = SPEED_UNKNOWN;
253 		cmd->base.duplex = DUPLEX_UNKNOWN;
254 	}
255 
256 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
257 						supported);
258 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
259 						advertising);
260 
261 	return 0;
262 }
263 
264 static int
265 bnad_set_link_ksettings(struct net_device *netdev,
266 			const struct ethtool_link_ksettings *cmd)
267 {
268 	/* 10G full duplex setting supported only */
269 	if (cmd->base.autoneg == AUTONEG_ENABLE)
270 		return -EOPNOTSUPP;
271 
272 	if ((cmd->base.speed == SPEED_10000) &&
273 	    (cmd->base.duplex == DUPLEX_FULL))
274 		return 0;
275 
276 	return -EOPNOTSUPP;
277 }
278 
279 static void
280 bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
281 {
282 	struct bnad *bnad = netdev_priv(netdev);
283 	struct bfa_ioc_attr *ioc_attr;
284 	unsigned long flags;
285 
286 	strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
287 	strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
288 
289 	ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
290 	if (ioc_attr) {
291 		spin_lock_irqsave(&bnad->bna_lock, flags);
292 		bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
293 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
294 
295 		strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
296 			sizeof(drvinfo->fw_version));
297 		kfree(ioc_attr);
298 	}
299 
300 	strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
301 		sizeof(drvinfo->bus_info));
302 }
303 
304 static void
305 bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
306 {
307 	wolinfo->supported = 0;
308 	wolinfo->wolopts = 0;
309 }
310 
311 static int
312 bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
313 {
314 	struct bnad *bnad = netdev_priv(netdev);
315 	unsigned long flags;
316 
317 	/* Lock rqd. to access bnad->bna_lock */
318 	spin_lock_irqsave(&bnad->bna_lock, flags);
319 	coalesce->use_adaptive_rx_coalesce =
320 		(bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
321 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
322 
323 	coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
324 					BFI_COALESCING_TIMER_UNIT;
325 	coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
326 					BFI_COALESCING_TIMER_UNIT;
327 	coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
328 
329 	return 0;
330 }
331 
332 static int
333 bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
334 {
335 	struct bnad *bnad = netdev_priv(netdev);
336 	unsigned long flags;
337 	int to_del = 0;
338 
339 	if (coalesce->rx_coalesce_usecs == 0 ||
340 	    coalesce->rx_coalesce_usecs >
341 	    BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
342 		return -EINVAL;
343 
344 	if (coalesce->tx_coalesce_usecs == 0 ||
345 	    coalesce->tx_coalesce_usecs >
346 	    BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
347 		return -EINVAL;
348 
349 	mutex_lock(&bnad->conf_mutex);
350 	/*
351 	 * Do not need to store rx_coalesce_usecs here
352 	 * Every time DIM is disabled, we can get it from the
353 	 * stack.
354 	 */
355 	spin_lock_irqsave(&bnad->bna_lock, flags);
356 	if (coalesce->use_adaptive_rx_coalesce) {
357 		if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
358 			bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
359 			bnad_dim_timer_start(bnad);
360 		}
361 	} else {
362 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
363 			bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
364 			if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
365 			    test_bit(BNAD_RF_DIM_TIMER_RUNNING,
366 			    &bnad->run_flags)) {
367 				clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
368 							&bnad->run_flags);
369 				to_del = 1;
370 			}
371 			spin_unlock_irqrestore(&bnad->bna_lock, flags);
372 			if (to_del)
373 				del_timer_sync(&bnad->dim_timer);
374 			spin_lock_irqsave(&bnad->bna_lock, flags);
375 			bnad_rx_coalescing_timeo_set(bnad);
376 		}
377 	}
378 	if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
379 					BFI_COALESCING_TIMER_UNIT) {
380 		bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
381 						BFI_COALESCING_TIMER_UNIT;
382 		bnad_tx_coalescing_timeo_set(bnad);
383 	}
384 
385 	if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
386 					BFI_COALESCING_TIMER_UNIT) {
387 		bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
388 						BFI_COALESCING_TIMER_UNIT;
389 
390 		if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
391 			bnad_rx_coalescing_timeo_set(bnad);
392 
393 	}
394 
395 	/* Add Tx Inter-pkt DMA count?  */
396 
397 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
398 
399 	mutex_unlock(&bnad->conf_mutex);
400 	return 0;
401 }
402 
403 static void
404 bnad_get_ringparam(struct net_device *netdev,
405 		   struct ethtool_ringparam *ringparam)
406 {
407 	struct bnad *bnad = netdev_priv(netdev);
408 
409 	ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH;
410 	ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH;
411 
412 	ringparam->rx_pending = bnad->rxq_depth;
413 	ringparam->tx_pending = bnad->txq_depth;
414 }
415 
416 static int
417 bnad_set_ringparam(struct net_device *netdev,
418 		   struct ethtool_ringparam *ringparam)
419 {
420 	int i, current_err, err = 0;
421 	struct bnad *bnad = netdev_priv(netdev);
422 	unsigned long flags;
423 
424 	mutex_lock(&bnad->conf_mutex);
425 	if (ringparam->rx_pending == bnad->rxq_depth &&
426 	    ringparam->tx_pending == bnad->txq_depth) {
427 		mutex_unlock(&bnad->conf_mutex);
428 		return 0;
429 	}
430 
431 	if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
432 	    ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
433 	    !is_power_of_2(ringparam->rx_pending)) {
434 		mutex_unlock(&bnad->conf_mutex);
435 		return -EINVAL;
436 	}
437 	if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
438 	    ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
439 	    !is_power_of_2(ringparam->tx_pending)) {
440 		mutex_unlock(&bnad->conf_mutex);
441 		return -EINVAL;
442 	}
443 
444 	if (ringparam->rx_pending != bnad->rxq_depth) {
445 		bnad->rxq_depth = ringparam->rx_pending;
446 		if (!netif_running(netdev)) {
447 			mutex_unlock(&bnad->conf_mutex);
448 			return 0;
449 		}
450 
451 		for (i = 0; i < bnad->num_rx; i++) {
452 			if (!bnad->rx_info[i].rx)
453 				continue;
454 			bnad_destroy_rx(bnad, i);
455 			current_err = bnad_setup_rx(bnad, i);
456 			if (current_err && !err)
457 				err = current_err;
458 		}
459 
460 		if (!err && bnad->rx_info[0].rx) {
461 			/* restore rx configuration */
462 			bnad_restore_vlans(bnad, 0);
463 			bnad_enable_default_bcast(bnad);
464 			spin_lock_irqsave(&bnad->bna_lock, flags);
465 			bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
466 			spin_unlock_irqrestore(&bnad->bna_lock, flags);
467 			bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI |
468 					     BNAD_CF_PROMISC);
469 			bnad_set_rx_mode(netdev);
470 		}
471 	}
472 	if (ringparam->tx_pending != bnad->txq_depth) {
473 		bnad->txq_depth = ringparam->tx_pending;
474 		if (!netif_running(netdev)) {
475 			mutex_unlock(&bnad->conf_mutex);
476 			return 0;
477 		}
478 
479 		for (i = 0; i < bnad->num_tx; i++) {
480 			if (!bnad->tx_info[i].tx)
481 				continue;
482 			bnad_destroy_tx(bnad, i);
483 			current_err = bnad_setup_tx(bnad, i);
484 			if (current_err && !err)
485 				err = current_err;
486 		}
487 	}
488 
489 	mutex_unlock(&bnad->conf_mutex);
490 	return err;
491 }
492 
493 static void
494 bnad_get_pauseparam(struct net_device *netdev,
495 		    struct ethtool_pauseparam *pauseparam)
496 {
497 	struct bnad *bnad = netdev_priv(netdev);
498 
499 	pauseparam->autoneg = 0;
500 	pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
501 	pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
502 }
503 
504 static int
505 bnad_set_pauseparam(struct net_device *netdev,
506 		    struct ethtool_pauseparam *pauseparam)
507 {
508 	struct bnad *bnad = netdev_priv(netdev);
509 	struct bna_pause_config pause_config;
510 	unsigned long flags;
511 
512 	if (pauseparam->autoneg == AUTONEG_ENABLE)
513 		return -EINVAL;
514 
515 	mutex_lock(&bnad->conf_mutex);
516 	if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
517 	    pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
518 		pause_config.rx_pause = pauseparam->rx_pause;
519 		pause_config.tx_pause = pauseparam->tx_pause;
520 		spin_lock_irqsave(&bnad->bna_lock, flags);
521 		bna_enet_pause_config(&bnad->bna.enet, &pause_config);
522 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
523 	}
524 	mutex_unlock(&bnad->conf_mutex);
525 	return 0;
526 }
527 
528 static void
529 bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
530 {
531 	struct bnad *bnad = netdev_priv(netdev);
532 	int i, j, q_num;
533 	u32 bmap;
534 
535 	mutex_lock(&bnad->conf_mutex);
536 
537 	switch (stringset) {
538 	case ETH_SS_STATS:
539 		for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
540 			BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
541 				   ETH_GSTRING_LEN));
542 			strncpy(string, bnad_net_stats_strings[i],
543 				ETH_GSTRING_LEN);
544 			string += ETH_GSTRING_LEN;
545 		}
546 		bmap = bna_tx_rid_mask(&bnad->bna);
547 		for (i = 0; bmap; i++) {
548 			if (bmap & 1) {
549 				sprintf(string, "txf%d_ucast_octets", i);
550 				string += ETH_GSTRING_LEN;
551 				sprintf(string, "txf%d_ucast", i);
552 				string += ETH_GSTRING_LEN;
553 				sprintf(string, "txf%d_ucast_vlan", i);
554 				string += ETH_GSTRING_LEN;
555 				sprintf(string, "txf%d_mcast_octets", i);
556 				string += ETH_GSTRING_LEN;
557 				sprintf(string, "txf%d_mcast", i);
558 				string += ETH_GSTRING_LEN;
559 				sprintf(string, "txf%d_mcast_vlan", i);
560 				string += ETH_GSTRING_LEN;
561 				sprintf(string, "txf%d_bcast_octets", i);
562 				string += ETH_GSTRING_LEN;
563 				sprintf(string, "txf%d_bcast", i);
564 				string += ETH_GSTRING_LEN;
565 				sprintf(string, "txf%d_bcast_vlan", i);
566 				string += ETH_GSTRING_LEN;
567 				sprintf(string, "txf%d_errors", i);
568 				string += ETH_GSTRING_LEN;
569 				sprintf(string, "txf%d_filter_vlan", i);
570 				string += ETH_GSTRING_LEN;
571 				sprintf(string, "txf%d_filter_mac_sa", i);
572 				string += ETH_GSTRING_LEN;
573 			}
574 			bmap >>= 1;
575 		}
576 
577 		bmap = bna_rx_rid_mask(&bnad->bna);
578 		for (i = 0; bmap; i++) {
579 			if (bmap & 1) {
580 				sprintf(string, "rxf%d_ucast_octets", i);
581 				string += ETH_GSTRING_LEN;
582 				sprintf(string, "rxf%d_ucast", i);
583 				string += ETH_GSTRING_LEN;
584 				sprintf(string, "rxf%d_ucast_vlan", i);
585 				string += ETH_GSTRING_LEN;
586 				sprintf(string, "rxf%d_mcast_octets", i);
587 				string += ETH_GSTRING_LEN;
588 				sprintf(string, "rxf%d_mcast", i);
589 				string += ETH_GSTRING_LEN;
590 				sprintf(string, "rxf%d_mcast_vlan", i);
591 				string += ETH_GSTRING_LEN;
592 				sprintf(string, "rxf%d_bcast_octets", i);
593 				string += ETH_GSTRING_LEN;
594 				sprintf(string, "rxf%d_bcast", i);
595 				string += ETH_GSTRING_LEN;
596 				sprintf(string, "rxf%d_bcast_vlan", i);
597 				string += ETH_GSTRING_LEN;
598 				sprintf(string, "rxf%d_frame_drops", i);
599 				string += ETH_GSTRING_LEN;
600 			}
601 			bmap >>= 1;
602 		}
603 
604 		q_num = 0;
605 		for (i = 0; i < bnad->num_rx; i++) {
606 			if (!bnad->rx_info[i].rx)
607 				continue;
608 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
609 				sprintf(string, "cq%d_producer_index", q_num);
610 				string += ETH_GSTRING_LEN;
611 				sprintf(string, "cq%d_consumer_index", q_num);
612 				string += ETH_GSTRING_LEN;
613 				sprintf(string, "cq%d_hw_producer_index",
614 					q_num);
615 				string += ETH_GSTRING_LEN;
616 				sprintf(string, "cq%d_intr", q_num);
617 				string += ETH_GSTRING_LEN;
618 				sprintf(string, "cq%d_poll", q_num);
619 				string += ETH_GSTRING_LEN;
620 				sprintf(string, "cq%d_schedule", q_num);
621 				string += ETH_GSTRING_LEN;
622 				sprintf(string, "cq%d_keep_poll", q_num);
623 				string += ETH_GSTRING_LEN;
624 				sprintf(string, "cq%d_complete", q_num);
625 				string += ETH_GSTRING_LEN;
626 				q_num++;
627 			}
628 		}
629 
630 		q_num = 0;
631 		for (i = 0; i < bnad->num_rx; i++) {
632 			if (!bnad->rx_info[i].rx)
633 				continue;
634 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
635 				sprintf(string, "rxq%d_packets", q_num);
636 				string += ETH_GSTRING_LEN;
637 				sprintf(string, "rxq%d_bytes", q_num);
638 				string += ETH_GSTRING_LEN;
639 				sprintf(string, "rxq%d_packets_with_error",
640 								q_num);
641 				string += ETH_GSTRING_LEN;
642 				sprintf(string, "rxq%d_allocbuf_failed", q_num);
643 				string += ETH_GSTRING_LEN;
644 				sprintf(string, "rxq%d_mapbuf_failed", q_num);
645 				string += ETH_GSTRING_LEN;
646 				sprintf(string, "rxq%d_producer_index", q_num);
647 				string += ETH_GSTRING_LEN;
648 				sprintf(string, "rxq%d_consumer_index", q_num);
649 				string += ETH_GSTRING_LEN;
650 				q_num++;
651 				if (bnad->rx_info[i].rx_ctrl[j].ccb &&
652 					bnad->rx_info[i].rx_ctrl[j].ccb->
653 					rcb[1] &&
654 					bnad->rx_info[i].rx_ctrl[j].ccb->
655 					rcb[1]->rxq) {
656 					sprintf(string, "rxq%d_packets", q_num);
657 					string += ETH_GSTRING_LEN;
658 					sprintf(string, "rxq%d_bytes", q_num);
659 					string += ETH_GSTRING_LEN;
660 					sprintf(string,
661 					"rxq%d_packets_with_error", q_num);
662 					string += ETH_GSTRING_LEN;
663 					sprintf(string, "rxq%d_allocbuf_failed",
664 								q_num);
665 					string += ETH_GSTRING_LEN;
666 					sprintf(string, "rxq%d_mapbuf_failed",
667 						q_num);
668 					string += ETH_GSTRING_LEN;
669 					sprintf(string, "rxq%d_producer_index",
670 								q_num);
671 					string += ETH_GSTRING_LEN;
672 					sprintf(string, "rxq%d_consumer_index",
673 								q_num);
674 					string += ETH_GSTRING_LEN;
675 					q_num++;
676 				}
677 			}
678 		}
679 
680 		q_num = 0;
681 		for (i = 0; i < bnad->num_tx; i++) {
682 			if (!bnad->tx_info[i].tx)
683 				continue;
684 			for (j = 0; j < bnad->num_txq_per_tx; j++) {
685 				sprintf(string, "txq%d_packets", q_num);
686 				string += ETH_GSTRING_LEN;
687 				sprintf(string, "txq%d_bytes", q_num);
688 				string += ETH_GSTRING_LEN;
689 				sprintf(string, "txq%d_producer_index", q_num);
690 				string += ETH_GSTRING_LEN;
691 				sprintf(string, "txq%d_consumer_index", q_num);
692 				string += ETH_GSTRING_LEN;
693 				sprintf(string, "txq%d_hw_consumer_index",
694 									q_num);
695 				string += ETH_GSTRING_LEN;
696 				q_num++;
697 			}
698 		}
699 
700 		break;
701 
702 	default:
703 		break;
704 	}
705 
706 	mutex_unlock(&bnad->conf_mutex);
707 }
708 
709 static int
710 bnad_get_stats_count_locked(struct net_device *netdev)
711 {
712 	struct bnad *bnad = netdev_priv(netdev);
713 	int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0;
714 	u32 bmap;
715 
716 	bmap = bna_tx_rid_mask(&bnad->bna);
717 	for (i = 0; bmap; i++) {
718 		if (bmap & 1)
719 			txf_active_num++;
720 		bmap >>= 1;
721 	}
722 	bmap = bna_rx_rid_mask(&bnad->bna);
723 	for (i = 0; bmap; i++) {
724 		if (bmap & 1)
725 			rxf_active_num++;
726 		bmap >>= 1;
727 	}
728 	count = BNAD_ETHTOOL_STATS_NUM +
729 		txf_active_num * BNAD_NUM_TXF_COUNTERS +
730 		rxf_active_num * BNAD_NUM_RXF_COUNTERS;
731 
732 	for (i = 0; i < bnad->num_rx; i++) {
733 		if (!bnad->rx_info[i].rx)
734 			continue;
735 		count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
736 		count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
737 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
738 			if (bnad->rx_info[i].rx_ctrl[j].ccb &&
739 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
740 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
741 				count +=  BNAD_NUM_RXQ_COUNTERS;
742 	}
743 
744 	for (i = 0; i < bnad->num_tx; i++) {
745 		if (!bnad->tx_info[i].tx)
746 			continue;
747 		count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
748 	}
749 	return count;
750 }
751 
752 static int
753 bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
754 {
755 	int i, j;
756 	struct bna_rcb *rcb = NULL;
757 	struct bna_tcb *tcb = NULL;
758 
759 	for (i = 0; i < bnad->num_rx; i++) {
760 		if (!bnad->rx_info[i].rx)
761 			continue;
762 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
763 			if (bnad->rx_info[i].rx_ctrl[j].ccb &&
764 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
765 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
766 				buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
767 						ccb->producer_index;
768 				buf[bi++] = 0; /* ccb->consumer_index */
769 				buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
770 						ccb->hw_producer_index);
771 
772 				buf[bi++] = bnad->rx_info[i].
773 						rx_ctrl[j].rx_intr_ctr;
774 				buf[bi++] = bnad->rx_info[i].
775 						rx_ctrl[j].rx_poll_ctr;
776 				buf[bi++] = bnad->rx_info[i].
777 						rx_ctrl[j].rx_schedule;
778 				buf[bi++] = bnad->rx_info[i].
779 						rx_ctrl[j].rx_keep_poll;
780 				buf[bi++] = bnad->rx_info[i].
781 						rx_ctrl[j].rx_complete;
782 			}
783 	}
784 	for (i = 0; i < bnad->num_rx; i++) {
785 		if (!bnad->rx_info[i].rx)
786 			continue;
787 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
788 			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
789 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
790 					bnad->rx_info[i].rx_ctrl[j].ccb->
791 					rcb[0]->rxq) {
792 					rcb = bnad->rx_info[i].rx_ctrl[j].
793 							ccb->rcb[0];
794 					buf[bi++] = rcb->rxq->rx_packets;
795 					buf[bi++] = rcb->rxq->rx_bytes;
796 					buf[bi++] = rcb->rxq->
797 							rx_packets_with_error;
798 					buf[bi++] = rcb->rxq->
799 							rxbuf_alloc_failed;
800 					buf[bi++] = rcb->rxq->rxbuf_map_failed;
801 					buf[bi++] = rcb->producer_index;
802 					buf[bi++] = rcb->consumer_index;
803 				}
804 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
805 					bnad->rx_info[i].rx_ctrl[j].ccb->
806 					rcb[1]->rxq) {
807 					rcb = bnad->rx_info[i].rx_ctrl[j].
808 								ccb->rcb[1];
809 					buf[bi++] = rcb->rxq->rx_packets;
810 					buf[bi++] = rcb->rxq->rx_bytes;
811 					buf[bi++] = rcb->rxq->
812 							rx_packets_with_error;
813 					buf[bi++] = rcb->rxq->
814 							rxbuf_alloc_failed;
815 					buf[bi++] = rcb->rxq->rxbuf_map_failed;
816 					buf[bi++] = rcb->producer_index;
817 					buf[bi++] = rcb->consumer_index;
818 				}
819 			}
820 	}
821 
822 	for (i = 0; i < bnad->num_tx; i++) {
823 		if (!bnad->tx_info[i].tx)
824 			continue;
825 		for (j = 0; j < bnad->num_txq_per_tx; j++)
826 			if (bnad->tx_info[i].tcb[j] &&
827 				bnad->tx_info[i].tcb[j]->txq) {
828 				tcb = bnad->tx_info[i].tcb[j];
829 				buf[bi++] = tcb->txq->tx_packets;
830 				buf[bi++] = tcb->txq->tx_bytes;
831 				buf[bi++] = tcb->producer_index;
832 				buf[bi++] = tcb->consumer_index;
833 				buf[bi++] = *(tcb->hw_consumer_index);
834 			}
835 	}
836 
837 	return bi;
838 }
839 
840 static void
841 bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
842 		       u64 *buf)
843 {
844 	struct bnad *bnad = netdev_priv(netdev);
845 	int i, j, bi = 0;
846 	unsigned long flags;
847 	struct rtnl_link_stats64 net_stats64;
848 	u64 *stats64;
849 	u32 bmap;
850 
851 	mutex_lock(&bnad->conf_mutex);
852 	if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
853 		mutex_unlock(&bnad->conf_mutex);
854 		return;
855 	}
856 
857 	/*
858 	 * Used bna_lock to sync reads from bna_stats, which is written
859 	 * under the same lock
860 	 */
861 	spin_lock_irqsave(&bnad->bna_lock, flags);
862 
863 	memset(&net_stats64, 0, sizeof(net_stats64));
864 	bnad_netdev_qstats_fill(bnad, &net_stats64);
865 	bnad_netdev_hwstats_fill(bnad, &net_stats64);
866 
867 	buf[bi++] = net_stats64.rx_packets;
868 	buf[bi++] = net_stats64.tx_packets;
869 	buf[bi++] = net_stats64.rx_bytes;
870 	buf[bi++] = net_stats64.tx_bytes;
871 	buf[bi++] = net_stats64.rx_errors;
872 	buf[bi++] = net_stats64.tx_errors;
873 	buf[bi++] = net_stats64.rx_dropped;
874 	buf[bi++] = net_stats64.tx_dropped;
875 	buf[bi++] = net_stats64.multicast;
876 	buf[bi++] = net_stats64.collisions;
877 	buf[bi++] = net_stats64.rx_length_errors;
878 	buf[bi++] = net_stats64.rx_crc_errors;
879 	buf[bi++] = net_stats64.rx_frame_errors;
880 	buf[bi++] = net_stats64.tx_fifo_errors;
881 
882 	/* Get netif_queue_stopped from stack */
883 	bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
884 
885 	/* Fill driver stats into ethtool buffers */
886 	stats64 = (u64 *)&bnad->stats.drv_stats;
887 	for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
888 		buf[bi++] = stats64[i];
889 
890 	/* Fill hardware stats excluding the rxf/txf into ethtool bufs */
891 	stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
892 	for (i = 0;
893 	     i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
894 		sizeof(u64);
895 	     i++)
896 		buf[bi++] = stats64[i];
897 
898 	/* Fill txf stats into ethtool buffers */
899 	bmap = bna_tx_rid_mask(&bnad->bna);
900 	for (i = 0; bmap; i++) {
901 		if (bmap & 1) {
902 			stats64 = (u64 *)&bnad->stats.bna_stats->
903 						hw_stats.txf_stats[i];
904 			for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
905 					sizeof(u64); j++)
906 				buf[bi++] = stats64[j];
907 		}
908 		bmap >>= 1;
909 	}
910 
911 	/*  Fill rxf stats into ethtool buffers */
912 	bmap = bna_rx_rid_mask(&bnad->bna);
913 	for (i = 0; bmap; i++) {
914 		if (bmap & 1) {
915 			stats64 = (u64 *)&bnad->stats.bna_stats->
916 						hw_stats.rxf_stats[i];
917 			for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
918 					sizeof(u64); j++)
919 				buf[bi++] = stats64[j];
920 		}
921 		bmap >>= 1;
922 	}
923 
924 	/* Fill per Q stats into ethtool buffers */
925 	bi = bnad_per_q_stats_fill(bnad, buf, bi);
926 
927 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
928 
929 	mutex_unlock(&bnad->conf_mutex);
930 }
931 
932 static int
933 bnad_get_sset_count(struct net_device *netdev, int sset)
934 {
935 	switch (sset) {
936 	case ETH_SS_STATS:
937 		return bnad_get_stats_count_locked(netdev);
938 	default:
939 		return -EOPNOTSUPP;
940 	}
941 }
942 
943 static u32
944 bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
945 				u32 *base_offset)
946 {
947 	struct bfa_flash_attr *flash_attr;
948 	struct bnad_iocmd_comp fcomp;
949 	u32 i, flash_part = 0, ret;
950 	unsigned long flags = 0;
951 
952 	flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
953 	if (!flash_attr)
954 		return 0;
955 
956 	fcomp.bnad = bnad;
957 	fcomp.comp_status = 0;
958 
959 	init_completion(&fcomp.comp);
960 	spin_lock_irqsave(&bnad->bna_lock, flags);
961 	ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
962 				bnad_cb_completion, &fcomp);
963 	if (ret != BFA_STATUS_OK) {
964 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
965 		kfree(flash_attr);
966 		return 0;
967 	}
968 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
969 	wait_for_completion(&fcomp.comp);
970 	ret = fcomp.comp_status;
971 
972 	/* Check for the flash type & base offset value */
973 	if (ret == BFA_STATUS_OK) {
974 		for (i = 0; i < flash_attr->npart; i++) {
975 			if (offset >= flash_attr->part[i].part_off &&
976 			    offset < (flash_attr->part[i].part_off +
977 				      flash_attr->part[i].part_size)) {
978 				flash_part = flash_attr->part[i].part_type;
979 				*base_offset = flash_attr->part[i].part_off;
980 				break;
981 			}
982 		}
983 	}
984 	kfree(flash_attr);
985 	return flash_part;
986 }
987 
988 static int
989 bnad_get_eeprom_len(struct net_device *netdev)
990 {
991 	return BFA_TOTAL_FLASH_SIZE;
992 }
993 
994 static int
995 bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
996 		u8 *bytes)
997 {
998 	struct bnad *bnad = netdev_priv(netdev);
999 	struct bnad_iocmd_comp fcomp;
1000 	u32 flash_part = 0, base_offset = 0;
1001 	unsigned long flags = 0;
1002 	int ret = 0;
1003 
1004 	/* Fill the magic value */
1005 	eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16);
1006 
1007 	/* Query the flash partition based on the offset */
1008 	flash_part = bnad_get_flash_partition_by_offset(bnad,
1009 				eeprom->offset, &base_offset);
1010 	if (flash_part == 0)
1011 		return -EFAULT;
1012 
1013 	fcomp.bnad = bnad;
1014 	fcomp.comp_status = 0;
1015 
1016 	init_completion(&fcomp.comp);
1017 	spin_lock_irqsave(&bnad->bna_lock, flags);
1018 	ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
1019 				bnad->id, bytes, eeprom->len,
1020 				eeprom->offset - base_offset,
1021 				bnad_cb_completion, &fcomp);
1022 	if (ret != BFA_STATUS_OK) {
1023 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1024 		goto done;
1025 	}
1026 
1027 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1028 	wait_for_completion(&fcomp.comp);
1029 	ret = fcomp.comp_status;
1030 done:
1031 	return ret;
1032 }
1033 
1034 static int
1035 bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
1036 		u8 *bytes)
1037 {
1038 	struct bnad *bnad = netdev_priv(netdev);
1039 	struct bnad_iocmd_comp fcomp;
1040 	u32 flash_part = 0, base_offset = 0;
1041 	unsigned long flags = 0;
1042 	int ret = 0;
1043 
1044 	/* Check if the flash update request is valid */
1045 	if (eeprom->magic != (bnad->pcidev->vendor |
1046 			     (bnad->pcidev->device << 16)))
1047 		return -EINVAL;
1048 
1049 	/* Query the flash partition based on the offset */
1050 	flash_part = bnad_get_flash_partition_by_offset(bnad,
1051 				eeprom->offset, &base_offset);
1052 	if (flash_part == 0)
1053 		return -EFAULT;
1054 
1055 	fcomp.bnad = bnad;
1056 	fcomp.comp_status = 0;
1057 
1058 	init_completion(&fcomp.comp);
1059 	spin_lock_irqsave(&bnad->bna_lock, flags);
1060 	ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
1061 				bnad->id, bytes, eeprom->len,
1062 				eeprom->offset - base_offset,
1063 				bnad_cb_completion, &fcomp);
1064 	if (ret != BFA_STATUS_OK) {
1065 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1066 		goto done;
1067 	}
1068 
1069 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1070 	wait_for_completion(&fcomp.comp);
1071 	ret = fcomp.comp_status;
1072 done:
1073 	return ret;
1074 }
1075 
1076 static int
1077 bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
1078 {
1079 	struct bnad *bnad = netdev_priv(netdev);
1080 	struct bnad_iocmd_comp fcomp;
1081 	const struct firmware *fw;
1082 	int ret = 0;
1083 
1084 	ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
1085 	if (ret) {
1086 		netdev_err(netdev, "can't load firmware %s\n", eflash->data);
1087 		goto out;
1088 	}
1089 
1090 	fcomp.bnad = bnad;
1091 	fcomp.comp_status = 0;
1092 
1093 	init_completion(&fcomp.comp);
1094 	spin_lock_irq(&bnad->bna_lock);
1095 	ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG,
1096 				bnad->id, (u8 *)fw->data, fw->size, 0,
1097 				bnad_cb_completion, &fcomp);
1098 	if (ret != BFA_STATUS_OK) {
1099 		netdev_warn(netdev, "flash update failed with err=%d\n", ret);
1100 		ret = -EIO;
1101 		spin_unlock_irq(&bnad->bna_lock);
1102 		goto out;
1103 	}
1104 
1105 	spin_unlock_irq(&bnad->bna_lock);
1106 	wait_for_completion(&fcomp.comp);
1107 	if (fcomp.comp_status != BFA_STATUS_OK) {
1108 		ret = -EIO;
1109 		netdev_warn(netdev,
1110 			    "firmware image update failed with err=%d\n",
1111 			    fcomp.comp_status);
1112 	}
1113 out:
1114 	release_firmware(fw);
1115 	return ret;
1116 }
1117 
1118 static const struct ethtool_ops bnad_ethtool_ops = {
1119 	.get_drvinfo = bnad_get_drvinfo,
1120 	.get_wol = bnad_get_wol,
1121 	.get_link = ethtool_op_get_link,
1122 	.get_coalesce = bnad_get_coalesce,
1123 	.set_coalesce = bnad_set_coalesce,
1124 	.get_ringparam = bnad_get_ringparam,
1125 	.set_ringparam = bnad_set_ringparam,
1126 	.get_pauseparam = bnad_get_pauseparam,
1127 	.set_pauseparam = bnad_set_pauseparam,
1128 	.get_strings = bnad_get_strings,
1129 	.get_ethtool_stats = bnad_get_ethtool_stats,
1130 	.get_sset_count = bnad_get_sset_count,
1131 	.get_eeprom_len = bnad_get_eeprom_len,
1132 	.get_eeprom = bnad_get_eeprom,
1133 	.set_eeprom = bnad_set_eeprom,
1134 	.flash_device = bnad_flash_device,
1135 	.get_ts_info = ethtool_op_get_ts_info,
1136 	.get_link_ksettings = bnad_get_link_ksettings,
1137 	.set_link_ksettings = bnad_set_link_ksettings,
1138 };
1139 
1140 void
1141 bnad_set_ethtool_ops(struct net_device *netdev)
1142 {
1143 	netdev->ethtool_ops = &bnad_ethtool_ops;
1144 }
1145