xref: /linux/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9 
10 #include <linux/ctype.h>
11 #include <linux/stringify.h>
12 #include <linux/ethtool.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/etherdevice.h>
16 #include <linux/crc32.h>
17 #include <linux/firmware.h>
18 #include "bnxt_hsi.h"
19 #include "bnxt.h"
20 #include "bnxt_ethtool.h"
21 #include "bnxt_nvm_defs.h"	/* NVRAM content constant and structure defs */
22 #include "bnxt_fw_hdr.h"	/* Firmware hdr constant and structure defs */
23 #define FLASH_NVRAM_TIMEOUT	((HWRM_CMD_TIMEOUT) * 100)
24 
25 static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
26 
27 static u32 bnxt_get_msglevel(struct net_device *dev)
28 {
29 	struct bnxt *bp = netdev_priv(dev);
30 
31 	return bp->msg_enable;
32 }
33 
34 static void bnxt_set_msglevel(struct net_device *dev, u32 value)
35 {
36 	struct bnxt *bp = netdev_priv(dev);
37 
38 	bp->msg_enable = value;
39 }
40 
41 static int bnxt_get_coalesce(struct net_device *dev,
42 			     struct ethtool_coalesce *coal)
43 {
44 	struct bnxt *bp = netdev_priv(dev);
45 
46 	memset(coal, 0, sizeof(*coal));
47 
48 	coal->rx_coalesce_usecs = bp->rx_coal_ticks;
49 	/* 2 completion records per rx packet */
50 	coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2;
51 	coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq;
52 	coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2;
53 
54 	coal->tx_coalesce_usecs = bp->tx_coal_ticks;
55 	coal->tx_max_coalesced_frames = bp->tx_coal_bufs;
56 	coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
57 	coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
58 
59 	return 0;
60 }
61 
62 static int bnxt_set_coalesce(struct net_device *dev,
63 			     struct ethtool_coalesce *coal)
64 {
65 	struct bnxt *bp = netdev_priv(dev);
66 	int rc = 0;
67 
68 	bp->rx_coal_ticks = coal->rx_coalesce_usecs;
69 	/* 2 completion records per rx packet */
70 	bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2;
71 	bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq;
72 	bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
73 
74 	bp->tx_coal_ticks = coal->tx_coalesce_usecs;
75 	bp->tx_coal_bufs = coal->tx_max_coalesced_frames;
76 	bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
77 	bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
78 
79 	if (netif_running(dev))
80 		rc = bnxt_hwrm_set_coal(bp);
81 
82 	return rc;
83 }
84 
85 #define BNXT_NUM_STATS	21
86 
87 #define BNXT_RX_STATS_OFFSET(counter)	\
88 	(offsetof(struct rx_port_stats, counter) / 8)
89 
90 #define BNXT_RX_STATS_ENTRY(counter)	\
91 	{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
92 
93 #define BNXT_TX_STATS_OFFSET(counter)			\
94 	((offsetof(struct tx_port_stats, counter) +	\
95 	  sizeof(struct rx_port_stats) + 512) / 8)
96 
97 #define BNXT_TX_STATS_ENTRY(counter)	\
98 	{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
99 
100 static const struct {
101 	long offset;
102 	char string[ETH_GSTRING_LEN];
103 } bnxt_port_stats_arr[] = {
104 	BNXT_RX_STATS_ENTRY(rx_64b_frames),
105 	BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
106 	BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
107 	BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
108 	BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
109 	BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames),
110 	BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
111 	BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
112 	BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
113 	BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
114 	BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
115 	BNXT_RX_STATS_ENTRY(rx_total_frames),
116 	BNXT_RX_STATS_ENTRY(rx_ucast_frames),
117 	BNXT_RX_STATS_ENTRY(rx_mcast_frames),
118 	BNXT_RX_STATS_ENTRY(rx_bcast_frames),
119 	BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
120 	BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
121 	BNXT_RX_STATS_ENTRY(rx_pause_frames),
122 	BNXT_RX_STATS_ENTRY(rx_pfc_frames),
123 	BNXT_RX_STATS_ENTRY(rx_align_err_frames),
124 	BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
125 	BNXT_RX_STATS_ENTRY(rx_jbr_frames),
126 	BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
127 	BNXT_RX_STATS_ENTRY(rx_tagged_frames),
128 	BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
129 	BNXT_RX_STATS_ENTRY(rx_good_frames),
130 	BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
131 	BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
132 	BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
133 	BNXT_RX_STATS_ENTRY(rx_bytes),
134 	BNXT_RX_STATS_ENTRY(rx_runt_bytes),
135 	BNXT_RX_STATS_ENTRY(rx_runt_frames),
136 
137 	BNXT_TX_STATS_ENTRY(tx_64b_frames),
138 	BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
139 	BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
140 	BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
141 	BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
142 	BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames),
143 	BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
144 	BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames),
145 	BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
146 	BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
147 	BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
148 	BNXT_TX_STATS_ENTRY(tx_good_frames),
149 	BNXT_TX_STATS_ENTRY(tx_total_frames),
150 	BNXT_TX_STATS_ENTRY(tx_ucast_frames),
151 	BNXT_TX_STATS_ENTRY(tx_mcast_frames),
152 	BNXT_TX_STATS_ENTRY(tx_bcast_frames),
153 	BNXT_TX_STATS_ENTRY(tx_pause_frames),
154 	BNXT_TX_STATS_ENTRY(tx_pfc_frames),
155 	BNXT_TX_STATS_ENTRY(tx_jabber_frames),
156 	BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
157 	BNXT_TX_STATS_ENTRY(tx_err),
158 	BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
159 	BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
160 	BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
161 	BNXT_TX_STATS_ENTRY(tx_total_collisions),
162 	BNXT_TX_STATS_ENTRY(tx_bytes),
163 };
164 
165 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
166 
167 static int bnxt_get_sset_count(struct net_device *dev, int sset)
168 {
169 	struct bnxt *bp = netdev_priv(dev);
170 
171 	switch (sset) {
172 	case ETH_SS_STATS: {
173 		int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
174 
175 		if (bp->flags & BNXT_FLAG_PORT_STATS)
176 			num_stats += BNXT_NUM_PORT_STATS;
177 
178 		return num_stats;
179 	}
180 	default:
181 		return -EOPNOTSUPP;
182 	}
183 }
184 
185 static void bnxt_get_ethtool_stats(struct net_device *dev,
186 				   struct ethtool_stats *stats, u64 *buf)
187 {
188 	u32 i, j = 0;
189 	struct bnxt *bp = netdev_priv(dev);
190 	u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
191 	u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
192 
193 	memset(buf, 0, buf_size);
194 
195 	if (!bp->bnapi)
196 		return;
197 
198 	for (i = 0; i < bp->cp_nr_rings; i++) {
199 		struct bnxt_napi *bnapi = bp->bnapi[i];
200 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
201 		__le64 *hw_stats = (__le64 *)cpr->hw_stats;
202 		int k;
203 
204 		for (k = 0; k < stat_fields; j++, k++)
205 			buf[j] = le64_to_cpu(hw_stats[k]);
206 		buf[j++] = cpr->rx_l4_csum_errors;
207 	}
208 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
209 		__le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
210 
211 		for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
212 			buf[j] = le64_to_cpu(*(port_stats +
213 					       bnxt_port_stats_arr[i].offset));
214 		}
215 	}
216 }
217 
218 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
219 {
220 	struct bnxt *bp = netdev_priv(dev);
221 	u32 i;
222 
223 	switch (stringset) {
224 	/* The number of strings must match BNXT_NUM_STATS defined above. */
225 	case ETH_SS_STATS:
226 		for (i = 0; i < bp->cp_nr_rings; i++) {
227 			sprintf(buf, "[%d]: rx_ucast_packets", i);
228 			buf += ETH_GSTRING_LEN;
229 			sprintf(buf, "[%d]: rx_mcast_packets", i);
230 			buf += ETH_GSTRING_LEN;
231 			sprintf(buf, "[%d]: rx_bcast_packets", i);
232 			buf += ETH_GSTRING_LEN;
233 			sprintf(buf, "[%d]: rx_discards", i);
234 			buf += ETH_GSTRING_LEN;
235 			sprintf(buf, "[%d]: rx_drops", i);
236 			buf += ETH_GSTRING_LEN;
237 			sprintf(buf, "[%d]: rx_ucast_bytes", i);
238 			buf += ETH_GSTRING_LEN;
239 			sprintf(buf, "[%d]: rx_mcast_bytes", i);
240 			buf += ETH_GSTRING_LEN;
241 			sprintf(buf, "[%d]: rx_bcast_bytes", i);
242 			buf += ETH_GSTRING_LEN;
243 			sprintf(buf, "[%d]: tx_ucast_packets", i);
244 			buf += ETH_GSTRING_LEN;
245 			sprintf(buf, "[%d]: tx_mcast_packets", i);
246 			buf += ETH_GSTRING_LEN;
247 			sprintf(buf, "[%d]: tx_bcast_packets", i);
248 			buf += ETH_GSTRING_LEN;
249 			sprintf(buf, "[%d]: tx_discards", i);
250 			buf += ETH_GSTRING_LEN;
251 			sprintf(buf, "[%d]: tx_drops", i);
252 			buf += ETH_GSTRING_LEN;
253 			sprintf(buf, "[%d]: tx_ucast_bytes", i);
254 			buf += ETH_GSTRING_LEN;
255 			sprintf(buf, "[%d]: tx_mcast_bytes", i);
256 			buf += ETH_GSTRING_LEN;
257 			sprintf(buf, "[%d]: tx_bcast_bytes", i);
258 			buf += ETH_GSTRING_LEN;
259 			sprintf(buf, "[%d]: tpa_packets", i);
260 			buf += ETH_GSTRING_LEN;
261 			sprintf(buf, "[%d]: tpa_bytes", i);
262 			buf += ETH_GSTRING_LEN;
263 			sprintf(buf, "[%d]: tpa_events", i);
264 			buf += ETH_GSTRING_LEN;
265 			sprintf(buf, "[%d]: tpa_aborts", i);
266 			buf += ETH_GSTRING_LEN;
267 			sprintf(buf, "[%d]: rx_l4_csum_errors", i);
268 			buf += ETH_GSTRING_LEN;
269 		}
270 		if (bp->flags & BNXT_FLAG_PORT_STATS) {
271 			for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
272 				strcpy(buf, bnxt_port_stats_arr[i].string);
273 				buf += ETH_GSTRING_LEN;
274 			}
275 		}
276 		break;
277 	default:
278 		netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
279 			   stringset);
280 		break;
281 	}
282 }
283 
284 static void bnxt_get_ringparam(struct net_device *dev,
285 			       struct ethtool_ringparam *ering)
286 {
287 	struct bnxt *bp = netdev_priv(dev);
288 
289 	ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
290 	ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
291 	ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
292 
293 	ering->rx_pending = bp->rx_ring_size;
294 	ering->rx_jumbo_pending = bp->rx_agg_ring_size;
295 	ering->tx_pending = bp->tx_ring_size;
296 }
297 
298 static int bnxt_set_ringparam(struct net_device *dev,
299 			      struct ethtool_ringparam *ering)
300 {
301 	struct bnxt *bp = netdev_priv(dev);
302 
303 	if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
304 	    (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
305 	    (ering->tx_pending <= MAX_SKB_FRAGS))
306 		return -EINVAL;
307 
308 	if (netif_running(dev))
309 		bnxt_close_nic(bp, false, false);
310 
311 	bp->rx_ring_size = ering->rx_pending;
312 	bp->tx_ring_size = ering->tx_pending;
313 	bnxt_set_ring_params(bp);
314 
315 	if (netif_running(dev))
316 		return bnxt_open_nic(bp, false, false);
317 
318 	return 0;
319 }
320 
321 static void bnxt_get_channels(struct net_device *dev,
322 			      struct ethtool_channels *channel)
323 {
324 	struct bnxt *bp = netdev_priv(dev);
325 	int max_rx_rings, max_tx_rings, tcs;
326 
327 	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
328 	channel->max_combined = max_rx_rings;
329 
330 	if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
331 		max_rx_rings = 0;
332 		max_tx_rings = 0;
333 	}
334 
335 	tcs = netdev_get_num_tc(dev);
336 	if (tcs > 1)
337 		max_tx_rings /= tcs;
338 
339 	channel->max_rx = max_rx_rings;
340 	channel->max_tx = max_tx_rings;
341 	channel->max_other = 0;
342 	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
343 		channel->combined_count = bp->rx_nr_rings;
344 	} else {
345 		channel->rx_count = bp->rx_nr_rings;
346 		channel->tx_count = bp->tx_nr_rings_per_tc;
347 	}
348 }
349 
350 static int bnxt_set_channels(struct net_device *dev,
351 			     struct ethtool_channels *channel)
352 {
353 	struct bnxt *bp = netdev_priv(dev);
354 	int max_rx_rings, max_tx_rings, tcs;
355 	u32 rc = 0;
356 	bool sh = false;
357 
358 	if (channel->other_count)
359 		return -EINVAL;
360 
361 	if (!channel->combined_count &&
362 	    (!channel->rx_count || !channel->tx_count))
363 		return -EINVAL;
364 
365 	if (channel->combined_count &&
366 	    (channel->rx_count || channel->tx_count))
367 		return -EINVAL;
368 
369 	if (channel->combined_count)
370 		sh = true;
371 
372 	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
373 
374 	tcs = netdev_get_num_tc(dev);
375 	if (tcs > 1)
376 		max_tx_rings /= tcs;
377 
378 	if (sh && (channel->combined_count > max_rx_rings ||
379 		   channel->combined_count > max_tx_rings))
380 		return -ENOMEM;
381 
382 	if (!sh && (channel->rx_count > max_rx_rings ||
383 		    channel->tx_count > max_tx_rings))
384 		return -ENOMEM;
385 
386 	if (netif_running(dev)) {
387 		if (BNXT_PF(bp)) {
388 			/* TODO CHIMP_FW: Send message to all VF's
389 			 * before PF unload
390 			 */
391 		}
392 		rc = bnxt_close_nic(bp, true, false);
393 		if (rc) {
394 			netdev_err(bp->dev, "Set channel failure rc :%x\n",
395 				   rc);
396 			return rc;
397 		}
398 	}
399 
400 	if (sh) {
401 		bp->flags |= BNXT_FLAG_SHARED_RINGS;
402 		bp->rx_nr_rings = channel->combined_count;
403 		bp->tx_nr_rings_per_tc = channel->combined_count;
404 	} else {
405 		bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
406 		bp->rx_nr_rings = channel->rx_count;
407 		bp->tx_nr_rings_per_tc = channel->tx_count;
408 	}
409 
410 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
411 	if (tcs > 1)
412 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
413 
414 	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
415 			       bp->tx_nr_rings + bp->rx_nr_rings;
416 
417 	bp->num_stat_ctxs = bp->cp_nr_rings;
418 
419 	/* After changing number of rx channels, update NTUPLE feature. */
420 	netdev_update_features(dev);
421 	if (netif_running(dev)) {
422 		rc = bnxt_open_nic(bp, true, false);
423 		if ((!rc) && BNXT_PF(bp)) {
424 			/* TODO CHIMP_FW: Send message to all VF's
425 			 * to renable
426 			 */
427 		}
428 	}
429 
430 	return rc;
431 }
432 
433 #ifdef CONFIG_RFS_ACCEL
434 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
435 			    u32 *rule_locs)
436 {
437 	int i, j = 0;
438 
439 	cmd->data = bp->ntp_fltr_count;
440 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
441 		struct hlist_head *head;
442 		struct bnxt_ntuple_filter *fltr;
443 
444 		head = &bp->ntp_fltr_hash_tbl[i];
445 		rcu_read_lock();
446 		hlist_for_each_entry_rcu(fltr, head, hash) {
447 			if (j == cmd->rule_cnt)
448 				break;
449 			rule_locs[j++] = fltr->sw_id;
450 		}
451 		rcu_read_unlock();
452 		if (j == cmd->rule_cnt)
453 			break;
454 	}
455 	cmd->rule_cnt = j;
456 	return 0;
457 }
458 
459 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
460 {
461 	struct ethtool_rx_flow_spec *fs =
462 		(struct ethtool_rx_flow_spec *)&cmd->fs;
463 	struct bnxt_ntuple_filter *fltr;
464 	struct flow_keys *fkeys;
465 	int i, rc = -EINVAL;
466 
467 	if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
468 		return rc;
469 
470 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
471 		struct hlist_head *head;
472 
473 		head = &bp->ntp_fltr_hash_tbl[i];
474 		rcu_read_lock();
475 		hlist_for_each_entry_rcu(fltr, head, hash) {
476 			if (fltr->sw_id == fs->location)
477 				goto fltr_found;
478 		}
479 		rcu_read_unlock();
480 	}
481 	return rc;
482 
483 fltr_found:
484 	fkeys = &fltr->fkeys;
485 	if (fkeys->basic.ip_proto == IPPROTO_TCP)
486 		fs->flow_type = TCP_V4_FLOW;
487 	else if (fkeys->basic.ip_proto == IPPROTO_UDP)
488 		fs->flow_type = UDP_V4_FLOW;
489 	else
490 		goto fltr_err;
491 
492 	fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
493 	fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
494 
495 	fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
496 	fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
497 
498 	fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
499 	fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
500 
501 	fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
502 	fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
503 
504 	fs->ring_cookie = fltr->rxq;
505 	rc = 0;
506 
507 fltr_err:
508 	rcu_read_unlock();
509 
510 	return rc;
511 }
512 
513 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
514 			  u32 *rule_locs)
515 {
516 	struct bnxt *bp = netdev_priv(dev);
517 	int rc = 0;
518 
519 	switch (cmd->cmd) {
520 	case ETHTOOL_GRXRINGS:
521 		cmd->data = bp->rx_nr_rings;
522 		break;
523 
524 	case ETHTOOL_GRXCLSRLCNT:
525 		cmd->rule_cnt = bp->ntp_fltr_count;
526 		cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
527 		break;
528 
529 	case ETHTOOL_GRXCLSRLALL:
530 		rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
531 		break;
532 
533 	case ETHTOOL_GRXCLSRULE:
534 		rc = bnxt_grxclsrule(bp, cmd);
535 		break;
536 
537 	default:
538 		rc = -EOPNOTSUPP;
539 		break;
540 	}
541 
542 	return rc;
543 }
544 #endif
545 
546 static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
547 {
548 	return HW_HASH_INDEX_SIZE;
549 }
550 
551 static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
552 {
553 	return HW_HASH_KEY_SIZE;
554 }
555 
556 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
557 			 u8 *hfunc)
558 {
559 	struct bnxt *bp = netdev_priv(dev);
560 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
561 	int i = 0;
562 
563 	if (hfunc)
564 		*hfunc = ETH_RSS_HASH_TOP;
565 
566 	if (indir)
567 		for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
568 			indir[i] = le16_to_cpu(vnic->rss_table[i]);
569 
570 	if (key)
571 		memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
572 
573 	return 0;
574 }
575 
576 static void bnxt_get_drvinfo(struct net_device *dev,
577 			     struct ethtool_drvinfo *info)
578 {
579 	struct bnxt *bp = netdev_priv(dev);
580 	char *pkglog;
581 	char *pkgver = NULL;
582 
583 	pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
584 	if (pkglog)
585 		pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
586 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
587 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
588 	if (pkgver && *pkgver != 0 && isdigit(*pkgver))
589 		snprintf(info->fw_version, sizeof(info->fw_version) - 1,
590 			 "%s pkg %s", bp->fw_ver_str, pkgver);
591 	else
592 		strlcpy(info->fw_version, bp->fw_ver_str,
593 			sizeof(info->fw_version));
594 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
595 	info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
596 	info->testinfo_len = BNXT_NUM_TESTS(bp);
597 	/* TODO CHIMP_FW: eeprom dump details */
598 	info->eedump_len = 0;
599 	/* TODO CHIMP FW: reg dump details */
600 	info->regdump_len = 0;
601 	kfree(pkglog);
602 }
603 
604 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
605 {
606 	u32 speed_mask = 0;
607 
608 	/* TODO: support 25GB, 40GB, 50GB with different cable type */
609 	/* set the advertised speeds */
610 	if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
611 		speed_mask |= ADVERTISED_100baseT_Full;
612 	if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
613 		speed_mask |= ADVERTISED_1000baseT_Full;
614 	if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
615 		speed_mask |= ADVERTISED_2500baseX_Full;
616 	if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
617 		speed_mask |= ADVERTISED_10000baseT_Full;
618 	if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
619 		speed_mask |= ADVERTISED_40000baseCR4_Full;
620 
621 	if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
622 		speed_mask |= ADVERTISED_Pause;
623 	else if (fw_pause & BNXT_LINK_PAUSE_TX)
624 		speed_mask |= ADVERTISED_Asym_Pause;
625 	else if (fw_pause & BNXT_LINK_PAUSE_RX)
626 		speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
627 
628 	return speed_mask;
629 }
630 
631 static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
632 {
633 	u16 fw_speeds = link_info->auto_link_speeds;
634 	u8 fw_pause = 0;
635 
636 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
637 		fw_pause = link_info->auto_pause_setting;
638 
639 	return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
640 }
641 
642 static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info)
643 {
644 	u16 fw_speeds = link_info->lp_auto_link_speeds;
645 	u8 fw_pause = 0;
646 
647 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
648 		fw_pause = link_info->lp_pause;
649 
650 	return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
651 }
652 
653 static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
654 {
655 	u16 fw_speeds = link_info->support_speeds;
656 	u32 supported;
657 
658 	supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
659 	return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
660 }
661 
662 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
663 {
664 	switch (fw_link_speed) {
665 	case BNXT_LINK_SPEED_100MB:
666 		return SPEED_100;
667 	case BNXT_LINK_SPEED_1GB:
668 		return SPEED_1000;
669 	case BNXT_LINK_SPEED_2_5GB:
670 		return SPEED_2500;
671 	case BNXT_LINK_SPEED_10GB:
672 		return SPEED_10000;
673 	case BNXT_LINK_SPEED_20GB:
674 		return SPEED_20000;
675 	case BNXT_LINK_SPEED_25GB:
676 		return SPEED_25000;
677 	case BNXT_LINK_SPEED_40GB:
678 		return SPEED_40000;
679 	case BNXT_LINK_SPEED_50GB:
680 		return SPEED_50000;
681 	default:
682 		return SPEED_UNKNOWN;
683 	}
684 }
685 
686 static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
687 {
688 	struct bnxt *bp = netdev_priv(dev);
689 	struct bnxt_link_info *link_info = &bp->link_info;
690 	u16 ethtool_speed;
691 
692 	cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
693 
694 	if (link_info->auto_link_speeds)
695 		cmd->supported |= SUPPORTED_Autoneg;
696 
697 	if (link_info->autoneg) {
698 		cmd->advertising =
699 			bnxt_fw_to_ethtool_advertised_spds(link_info);
700 		cmd->advertising |= ADVERTISED_Autoneg;
701 		cmd->autoneg = AUTONEG_ENABLE;
702 		if (link_info->phy_link_status == BNXT_LINK_LINK)
703 			cmd->lp_advertising =
704 				bnxt_fw_to_ethtool_lp_adv(link_info);
705 		ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
706 		if (!netif_carrier_ok(dev))
707 			cmd->duplex = DUPLEX_UNKNOWN;
708 		else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
709 			cmd->duplex = DUPLEX_FULL;
710 		else
711 			cmd->duplex = DUPLEX_HALF;
712 	} else {
713 		cmd->autoneg = AUTONEG_DISABLE;
714 		cmd->advertising = 0;
715 		ethtool_speed =
716 			bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
717 		cmd->duplex = DUPLEX_HALF;
718 		if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
719 			cmd->duplex = DUPLEX_FULL;
720 	}
721 	ethtool_cmd_speed_set(cmd, ethtool_speed);
722 
723 	cmd->port = PORT_NONE;
724 	if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
725 		cmd->port = PORT_TP;
726 		cmd->supported |= SUPPORTED_TP;
727 		cmd->advertising |= ADVERTISED_TP;
728 	} else {
729 		cmd->supported |= SUPPORTED_FIBRE;
730 		cmd->advertising |= ADVERTISED_FIBRE;
731 
732 		if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
733 			cmd->port = PORT_DA;
734 		else if (link_info->media_type ==
735 			 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
736 			cmd->port = PORT_FIBRE;
737 	}
738 
739 	if (link_info->transceiver ==
740 	    PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL)
741 		cmd->transceiver = XCVR_INTERNAL;
742 	else
743 		cmd->transceiver = XCVR_EXTERNAL;
744 	cmd->phy_address = link_info->phy_addr;
745 
746 	return 0;
747 }
748 
749 static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
750 {
751 	struct bnxt *bp = netdev_priv(dev);
752 	struct bnxt_link_info *link_info = &bp->link_info;
753 	u16 support_spds = link_info->support_speeds;
754 	u32 fw_speed = 0;
755 
756 	switch (ethtool_speed) {
757 	case SPEED_100:
758 		if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
759 			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
760 		break;
761 	case SPEED_1000:
762 		if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
763 			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
764 		break;
765 	case SPEED_2500:
766 		if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
767 			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
768 		break;
769 	case SPEED_10000:
770 		if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
771 			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
772 		break;
773 	case SPEED_20000:
774 		if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
775 			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
776 		break;
777 	case SPEED_25000:
778 		if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
779 			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
780 		break;
781 	case SPEED_40000:
782 		if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
783 			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
784 		break;
785 	case SPEED_50000:
786 		if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
787 			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
788 		break;
789 	default:
790 		netdev_err(dev, "unsupported speed!\n");
791 		break;
792 	}
793 	return fw_speed;
794 }
795 
796 u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
797 {
798 	u16 fw_speed_mask = 0;
799 
800 	/* only support autoneg at speed 100, 1000, and 10000 */
801 	if (advertising & (ADVERTISED_100baseT_Full |
802 			   ADVERTISED_100baseT_Half)) {
803 		fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
804 	}
805 	if (advertising & (ADVERTISED_1000baseT_Full |
806 			   ADVERTISED_1000baseT_Half)) {
807 		fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
808 	}
809 	if (advertising & ADVERTISED_10000baseT_Full)
810 		fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
811 
812 	if (advertising & ADVERTISED_40000baseCR4_Full)
813 		fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
814 
815 	return fw_speed_mask;
816 }
817 
818 static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
819 {
820 	int rc = 0;
821 	struct bnxt *bp = netdev_priv(dev);
822 	struct bnxt_link_info *link_info = &bp->link_info;
823 	u32 speed, fw_advertising = 0;
824 	bool set_pause = false;
825 
826 	if (BNXT_VF(bp))
827 		return rc;
828 
829 	if (cmd->autoneg == AUTONEG_ENABLE) {
830 		u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info);
831 
832 		if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg |
833 					 ADVERTISED_TP | ADVERTISED_FIBRE)) {
834 			netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
835 				   cmd->advertising);
836 			rc = -EINVAL;
837 			goto set_setting_exit;
838 		}
839 		fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
840 		if (fw_advertising & ~link_info->support_speeds) {
841 			netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
842 				   cmd->advertising);
843 			rc = -EINVAL;
844 			goto set_setting_exit;
845 		}
846 		link_info->autoneg |= BNXT_AUTONEG_SPEED;
847 		if (!fw_advertising)
848 			link_info->advertising = link_info->support_speeds;
849 		else
850 			link_info->advertising = fw_advertising;
851 		/* any change to autoneg will cause link change, therefore the
852 		 * driver should put back the original pause setting in autoneg
853 		 */
854 		set_pause = true;
855 	} else {
856 		u16 fw_speed;
857 		u8 phy_type = link_info->phy_type;
858 
859 		if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
860 		    phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
861 		    link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
862 			netdev_err(dev, "10GBase-T devices must autoneg\n");
863 			rc = -EINVAL;
864 			goto set_setting_exit;
865 		}
866 		/* TODO: currently don't support half duplex */
867 		if (cmd->duplex == DUPLEX_HALF) {
868 			netdev_err(dev, "HALF DUPLEX is not supported!\n");
869 			rc = -EINVAL;
870 			goto set_setting_exit;
871 		}
872 		/* If received a request for an unknown duplex, assume full*/
873 		if (cmd->duplex == DUPLEX_UNKNOWN)
874 			cmd->duplex = DUPLEX_FULL;
875 		speed = ethtool_cmd_speed(cmd);
876 		fw_speed = bnxt_get_fw_speed(dev, speed);
877 		if (!fw_speed) {
878 			rc = -EINVAL;
879 			goto set_setting_exit;
880 		}
881 		link_info->req_link_speed = fw_speed;
882 		link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
883 		link_info->autoneg = 0;
884 		link_info->advertising = 0;
885 	}
886 
887 	if (netif_running(dev))
888 		rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
889 
890 set_setting_exit:
891 	return rc;
892 }
893 
894 static void bnxt_get_pauseparam(struct net_device *dev,
895 				struct ethtool_pauseparam *epause)
896 {
897 	struct bnxt *bp = netdev_priv(dev);
898 	struct bnxt_link_info *link_info = &bp->link_info;
899 
900 	if (BNXT_VF(bp))
901 		return;
902 	epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
903 	epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
904 	epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
905 }
906 
907 static int bnxt_set_pauseparam(struct net_device *dev,
908 			       struct ethtool_pauseparam *epause)
909 {
910 	int rc = 0;
911 	struct bnxt *bp = netdev_priv(dev);
912 	struct bnxt_link_info *link_info = &bp->link_info;
913 
914 	if (BNXT_VF(bp))
915 		return rc;
916 
917 	if (epause->autoneg) {
918 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
919 			return -EINVAL;
920 
921 		link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
922 		if (bp->hwrm_spec_code >= 0x10201)
923 			link_info->req_flow_ctrl =
924 				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
925 	} else {
926 		/* when transition from auto pause to force pause,
927 		 * force a link change
928 		 */
929 		if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
930 			link_info->force_link_chng = true;
931 		link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
932 		link_info->req_flow_ctrl = 0;
933 	}
934 	if (epause->rx_pause)
935 		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
936 
937 	if (epause->tx_pause)
938 		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
939 
940 	if (netif_running(dev))
941 		rc = bnxt_hwrm_set_pause(bp);
942 	return rc;
943 }
944 
945 static u32 bnxt_get_link(struct net_device *dev)
946 {
947 	struct bnxt *bp = netdev_priv(dev);
948 
949 	/* TODO: handle MF, VF, driver close case */
950 	return bp->link_info.link_up;
951 }
952 
953 static int bnxt_flash_nvram(struct net_device *dev,
954 			    u16 dir_type,
955 			    u16 dir_ordinal,
956 			    u16 dir_ext,
957 			    u16 dir_attr,
958 			    const u8 *data,
959 			    size_t data_len)
960 {
961 	struct bnxt *bp = netdev_priv(dev);
962 	int rc;
963 	struct hwrm_nvm_write_input req = {0};
964 	dma_addr_t dma_handle;
965 	u8 *kmem;
966 
967 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
968 
969 	req.dir_type = cpu_to_le16(dir_type);
970 	req.dir_ordinal = cpu_to_le16(dir_ordinal);
971 	req.dir_ext = cpu_to_le16(dir_ext);
972 	req.dir_attr = cpu_to_le16(dir_attr);
973 	req.dir_data_length = cpu_to_le32(data_len);
974 
975 	kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
976 				  GFP_KERNEL);
977 	if (!kmem) {
978 		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
979 			   (unsigned)data_len);
980 		return -ENOMEM;
981 	}
982 	memcpy(kmem, data, data_len);
983 	req.host_src_addr = cpu_to_le64(dma_handle);
984 
985 	rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
986 	dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
987 
988 	return rc;
989 }
990 
991 static int bnxt_firmware_reset(struct net_device *dev,
992 			       u16 dir_type)
993 {
994 	struct bnxt *bp = netdev_priv(dev);
995 	struct hwrm_fw_reset_input req = {0};
996 
997 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
998 
999 	/* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */
1000 	/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
1001 	/*       (e.g. when firmware isn't already running) */
1002 	switch (dir_type) {
1003 	case BNX_DIR_TYPE_CHIMP_PATCH:
1004 	case BNX_DIR_TYPE_BOOTCODE:
1005 	case BNX_DIR_TYPE_BOOTCODE_2:
1006 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
1007 		/* Self-reset ChiMP upon next PCIe reset: */
1008 		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
1009 		break;
1010 	case BNX_DIR_TYPE_APE_FW:
1011 	case BNX_DIR_TYPE_APE_PATCH:
1012 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
1013 		break;
1014 	case BNX_DIR_TYPE_KONG_FW:
1015 	case BNX_DIR_TYPE_KONG_PATCH:
1016 		req.embedded_proc_type =
1017 			FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
1018 		break;
1019 	case BNX_DIR_TYPE_BONO_FW:
1020 	case BNX_DIR_TYPE_BONO_PATCH:
1021 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
1022 		break;
1023 	default:
1024 		return -EINVAL;
1025 	}
1026 
1027 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1028 }
1029 
1030 static int bnxt_flash_firmware(struct net_device *dev,
1031 			       u16 dir_type,
1032 			       const u8 *fw_data,
1033 			       size_t fw_size)
1034 {
1035 	int	rc = 0;
1036 	u16	code_type;
1037 	u32	stored_crc;
1038 	u32	calculated_crc;
1039 	struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
1040 
1041 	switch (dir_type) {
1042 	case BNX_DIR_TYPE_BOOTCODE:
1043 	case BNX_DIR_TYPE_BOOTCODE_2:
1044 		code_type = CODE_BOOT;
1045 		break;
1046 	case BNX_DIR_TYPE_APE_FW:
1047 		code_type = CODE_MCTP_PASSTHRU;
1048 		break;
1049 	default:
1050 		netdev_err(dev, "Unsupported directory entry type: %u\n",
1051 			   dir_type);
1052 		return -EINVAL;
1053 	}
1054 	if (fw_size < sizeof(struct bnxt_fw_header)) {
1055 		netdev_err(dev, "Invalid firmware file size: %u\n",
1056 			   (unsigned int)fw_size);
1057 		return -EINVAL;
1058 	}
1059 	if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
1060 		netdev_err(dev, "Invalid firmware signature: %08X\n",
1061 			   le32_to_cpu(header->signature));
1062 		return -EINVAL;
1063 	}
1064 	if (header->code_type != code_type) {
1065 		netdev_err(dev, "Expected firmware type: %d, read: %d\n",
1066 			   code_type, header->code_type);
1067 		return -EINVAL;
1068 	}
1069 	if (header->device != DEVICE_CUMULUS_FAMILY) {
1070 		netdev_err(dev, "Expected firmware device family %d, read: %d\n",
1071 			   DEVICE_CUMULUS_FAMILY, header->device);
1072 		return -EINVAL;
1073 	}
1074 	/* Confirm the CRC32 checksum of the file: */
1075 	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
1076 					     sizeof(stored_crc)));
1077 	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
1078 	if (calculated_crc != stored_crc) {
1079 		netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
1080 			   (unsigned long)stored_crc,
1081 			   (unsigned long)calculated_crc);
1082 		return -EINVAL;
1083 	}
1084 	/* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
1085 	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1086 			      0, 0, fw_data, fw_size);
1087 	if (rc == 0)	/* Firmware update successful */
1088 		rc = bnxt_firmware_reset(dev, dir_type);
1089 
1090 	return rc;
1091 }
1092 
1093 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
1094 {
1095 	switch (dir_type) {
1096 	case BNX_DIR_TYPE_CHIMP_PATCH:
1097 	case BNX_DIR_TYPE_BOOTCODE:
1098 	case BNX_DIR_TYPE_BOOTCODE_2:
1099 	case BNX_DIR_TYPE_APE_FW:
1100 	case BNX_DIR_TYPE_APE_PATCH:
1101 	case BNX_DIR_TYPE_KONG_FW:
1102 	case BNX_DIR_TYPE_KONG_PATCH:
1103 		return true;
1104 	}
1105 
1106 	return false;
1107 }
1108 
1109 static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
1110 {
1111 	switch (dir_type) {
1112 	case BNX_DIR_TYPE_AVS:
1113 	case BNX_DIR_TYPE_EXP_ROM_MBA:
1114 	case BNX_DIR_TYPE_PCIE:
1115 	case BNX_DIR_TYPE_TSCF_UCODE:
1116 	case BNX_DIR_TYPE_EXT_PHY:
1117 	case BNX_DIR_TYPE_CCM:
1118 	case BNX_DIR_TYPE_ISCSI_BOOT:
1119 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
1120 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
1121 		return true;
1122 	}
1123 
1124 	return false;
1125 }
1126 
1127 static bool bnxt_dir_type_is_executable(u16 dir_type)
1128 {
1129 	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
1130 		bnxt_dir_type_is_unprotected_exec_format(dir_type);
1131 }
1132 
1133 static int bnxt_flash_firmware_from_file(struct net_device *dev,
1134 					 u16 dir_type,
1135 					 const char *filename)
1136 {
1137 	const struct firmware  *fw;
1138 	int			rc;
1139 
1140 	if (bnxt_dir_type_is_executable(dir_type) == false)
1141 		return -EINVAL;
1142 
1143 	rc = request_firmware(&fw, filename, &dev->dev);
1144 	if (rc != 0) {
1145 		netdev_err(dev, "Error %d requesting firmware file: %s\n",
1146 			   rc, filename);
1147 		return rc;
1148 	}
1149 	if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
1150 		rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
1151 	else
1152 		rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1153 				      0, 0, fw->data, fw->size);
1154 	release_firmware(fw);
1155 	return rc;
1156 }
1157 
1158 static int bnxt_flash_package_from_file(struct net_device *dev,
1159 					char *filename)
1160 {
1161 	netdev_err(dev, "packages are not yet supported\n");
1162 	return -EINVAL;
1163 }
1164 
1165 static int bnxt_flash_device(struct net_device *dev,
1166 			     struct ethtool_flash *flash)
1167 {
1168 	if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
1169 		netdev_err(dev, "flashdev not supported from a virtual function\n");
1170 		return -EINVAL;
1171 	}
1172 
1173 	if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
1174 		return bnxt_flash_package_from_file(dev, flash->data);
1175 
1176 	return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
1177 }
1178 
1179 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
1180 {
1181 	struct bnxt *bp = netdev_priv(dev);
1182 	int rc;
1183 	struct hwrm_nvm_get_dir_info_input req = {0};
1184 	struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
1185 
1186 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
1187 
1188 	mutex_lock(&bp->hwrm_cmd_lock);
1189 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1190 	if (!rc) {
1191 		*entries = le32_to_cpu(output->entries);
1192 		*length = le32_to_cpu(output->entry_length);
1193 	}
1194 	mutex_unlock(&bp->hwrm_cmd_lock);
1195 	return rc;
1196 }
1197 
1198 static int bnxt_get_eeprom_len(struct net_device *dev)
1199 {
1200 	/* The -1 return value allows the entire 32-bit range of offsets to be
1201 	 * passed via the ethtool command-line utility.
1202 	 */
1203 	return -1;
1204 }
1205 
1206 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
1207 {
1208 	struct bnxt *bp = netdev_priv(dev);
1209 	int rc;
1210 	u32 dir_entries;
1211 	u32 entry_length;
1212 	u8 *buf;
1213 	size_t buflen;
1214 	dma_addr_t dma_handle;
1215 	struct hwrm_nvm_get_dir_entries_input req = {0};
1216 
1217 	rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
1218 	if (rc != 0)
1219 		return rc;
1220 
1221 	/* Insert 2 bytes of directory info (count and size of entries) */
1222 	if (len < 2)
1223 		return -EINVAL;
1224 
1225 	*data++ = dir_entries;
1226 	*data++ = entry_length;
1227 	len -= 2;
1228 	memset(data, 0xff, len);
1229 
1230 	buflen = dir_entries * entry_length;
1231 	buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
1232 				 GFP_KERNEL);
1233 	if (!buf) {
1234 		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1235 			   (unsigned)buflen);
1236 		return -ENOMEM;
1237 	}
1238 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
1239 	req.host_dest_addr = cpu_to_le64(dma_handle);
1240 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1241 	if (rc == 0)
1242 		memcpy(data, buf, len > buflen ? buflen : len);
1243 	dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
1244 	return rc;
1245 }
1246 
1247 static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
1248 			       u32 length, u8 *data)
1249 {
1250 	struct bnxt *bp = netdev_priv(dev);
1251 	int rc;
1252 	u8 *buf;
1253 	dma_addr_t dma_handle;
1254 	struct hwrm_nvm_read_input req = {0};
1255 
1256 	buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
1257 				 GFP_KERNEL);
1258 	if (!buf) {
1259 		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1260 			   (unsigned)length);
1261 		return -ENOMEM;
1262 	}
1263 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
1264 	req.host_dest_addr = cpu_to_le64(dma_handle);
1265 	req.dir_idx = cpu_to_le16(index);
1266 	req.offset = cpu_to_le32(offset);
1267 	req.len = cpu_to_le32(length);
1268 
1269 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1270 	if (rc == 0)
1271 		memcpy(data, buf, length);
1272 	dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
1273 	return rc;
1274 }
1275 
1276 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1277 				u16 ext, u16 *index, u32 *item_length,
1278 				u32 *data_length)
1279 {
1280 	struct bnxt *bp = netdev_priv(dev);
1281 	int rc;
1282 	struct hwrm_nvm_find_dir_entry_input req = {0};
1283 	struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
1284 
1285 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
1286 	req.enables = 0;
1287 	req.dir_idx = 0;
1288 	req.dir_type = cpu_to_le16(type);
1289 	req.dir_ordinal = cpu_to_le16(ordinal);
1290 	req.dir_ext = cpu_to_le16(ext);
1291 	req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
1292 	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1293 	if (rc == 0) {
1294 		if (index)
1295 			*index = le16_to_cpu(output->dir_idx);
1296 		if (item_length)
1297 			*item_length = le32_to_cpu(output->dir_item_length);
1298 		if (data_length)
1299 			*data_length = le32_to_cpu(output->dir_data_length);
1300 	}
1301 	return rc;
1302 }
1303 
1304 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
1305 {
1306 	char	*retval = NULL;
1307 	char	*p;
1308 	char	*value;
1309 	int	field = 0;
1310 
1311 	if (datalen < 1)
1312 		return NULL;
1313 	/* null-terminate the log data (removing last '\n'): */
1314 	data[datalen - 1] = 0;
1315 	for (p = data; *p != 0; p++) {
1316 		field = 0;
1317 		retval = NULL;
1318 		while (*p != 0 && *p != '\n') {
1319 			value = p;
1320 			while (*p != 0 && *p != '\t' && *p != '\n')
1321 				p++;
1322 			if (field == desired_field)
1323 				retval = value;
1324 			if (*p != '\t')
1325 				break;
1326 			*p = 0;
1327 			field++;
1328 			p++;
1329 		}
1330 		if (*p == 0)
1331 			break;
1332 		*p = 0;
1333 	}
1334 	return retval;
1335 }
1336 
1337 static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
1338 {
1339 	u16 index = 0;
1340 	u32 datalen;
1341 
1342 	if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
1343 				 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1344 				 &index, NULL, &datalen) != 0)
1345 		return NULL;
1346 
1347 	memset(buf, 0, buflen);
1348 	if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
1349 		return NULL;
1350 
1351 	return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
1352 		datalen);
1353 }
1354 
1355 static int bnxt_get_eeprom(struct net_device *dev,
1356 			   struct ethtool_eeprom *eeprom,
1357 			   u8 *data)
1358 {
1359 	u32 index;
1360 	u32 offset;
1361 
1362 	if (eeprom->offset == 0) /* special offset value to get directory */
1363 		return bnxt_get_nvram_directory(dev, eeprom->len, data);
1364 
1365 	index = eeprom->offset >> 24;
1366 	offset = eeprom->offset & 0xffffff;
1367 
1368 	if (index == 0) {
1369 		netdev_err(dev, "unsupported index value: %d\n", index);
1370 		return -EINVAL;
1371 	}
1372 
1373 	return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
1374 }
1375 
1376 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
1377 {
1378 	struct bnxt *bp = netdev_priv(dev);
1379 	struct hwrm_nvm_erase_dir_entry_input req = {0};
1380 
1381 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
1382 	req.dir_idx = cpu_to_le16(index);
1383 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1384 }
1385 
1386 static int bnxt_set_eeprom(struct net_device *dev,
1387 			   struct ethtool_eeprom *eeprom,
1388 			   u8 *data)
1389 {
1390 	struct bnxt *bp = netdev_priv(dev);
1391 	u8 index, dir_op;
1392 	u16 type, ext, ordinal, attr;
1393 
1394 	if (!BNXT_PF(bp)) {
1395 		netdev_err(dev, "NVM write not supported from a virtual function\n");
1396 		return -EINVAL;
1397 	}
1398 
1399 	type = eeprom->magic >> 16;
1400 
1401 	if (type == 0xffff) { /* special value for directory operations */
1402 		index = eeprom->magic & 0xff;
1403 		dir_op = eeprom->magic >> 8;
1404 		if (index == 0)
1405 			return -EINVAL;
1406 		switch (dir_op) {
1407 		case 0x0e: /* erase */
1408 			if (eeprom->offset != ~eeprom->magic)
1409 				return -EINVAL;
1410 			return bnxt_erase_nvram_directory(dev, index - 1);
1411 		default:
1412 			return -EINVAL;
1413 		}
1414 	}
1415 
1416 	/* Create or re-write an NVM item: */
1417 	if (bnxt_dir_type_is_executable(type) == true)
1418 		return -EINVAL;
1419 	ext = eeprom->magic & 0xffff;
1420 	ordinal = eeprom->offset >> 16;
1421 	attr = eeprom->offset & 0xffff;
1422 
1423 	return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
1424 				eeprom->len);
1425 }
1426 
1427 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1428 {
1429 	struct bnxt *bp = netdev_priv(dev);
1430 	struct ethtool_eee *eee = &bp->eee;
1431 	struct bnxt_link_info *link_info = &bp->link_info;
1432 	u32 advertising =
1433 		 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
1434 	int rc = 0;
1435 
1436 	if (BNXT_VF(bp))
1437 		return 0;
1438 
1439 	if (!(bp->flags & BNXT_FLAG_EEE_CAP))
1440 		return -EOPNOTSUPP;
1441 
1442 	if (!edata->eee_enabled)
1443 		goto eee_ok;
1444 
1445 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
1446 		netdev_warn(dev, "EEE requires autoneg\n");
1447 		return -EINVAL;
1448 	}
1449 	if (edata->tx_lpi_enabled) {
1450 		if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
1451 				       edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
1452 			netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
1453 				    bp->lpi_tmr_lo, bp->lpi_tmr_hi);
1454 			return -EINVAL;
1455 		} else if (!bp->lpi_tmr_hi) {
1456 			edata->tx_lpi_timer = eee->tx_lpi_timer;
1457 		}
1458 	}
1459 	if (!edata->advertised) {
1460 		edata->advertised = advertising & eee->supported;
1461 	} else if (edata->advertised & ~advertising) {
1462 		netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
1463 			    edata->advertised, advertising);
1464 		return -EINVAL;
1465 	}
1466 
1467 	eee->advertised = edata->advertised;
1468 	eee->tx_lpi_enabled = edata->tx_lpi_enabled;
1469 	eee->tx_lpi_timer = edata->tx_lpi_timer;
1470 eee_ok:
1471 	eee->eee_enabled = edata->eee_enabled;
1472 
1473 	if (netif_running(dev))
1474 		rc = bnxt_hwrm_set_link_setting(bp, false, true);
1475 
1476 	return rc;
1477 }
1478 
1479 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
1480 {
1481 	struct bnxt *bp = netdev_priv(dev);
1482 
1483 	if (!(bp->flags & BNXT_FLAG_EEE_CAP))
1484 		return -EOPNOTSUPP;
1485 
1486 	*edata = bp->eee;
1487 	if (!bp->eee.eee_enabled) {
1488 		/* Preserve tx_lpi_timer so that the last value will be used
1489 		 * by default when it is re-enabled.
1490 		 */
1491 		edata->advertised = 0;
1492 		edata->tx_lpi_enabled = 0;
1493 	}
1494 
1495 	if (!bp->eee.eee_active)
1496 		edata->lp_advertised = 0;
1497 
1498 	return 0;
1499 }
1500 
1501 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
1502 					    u16 page_number, u16 start_addr,
1503 					    u16 data_length, u8 *buf)
1504 {
1505 	struct hwrm_port_phy_i2c_read_input req = {0};
1506 	struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
1507 	int rc, byte_offset = 0;
1508 
1509 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
1510 	req.i2c_slave_addr = i2c_addr;
1511 	req.page_number = cpu_to_le16(page_number);
1512 	req.port_id = cpu_to_le16(bp->pf.port_id);
1513 	do {
1514 		u16 xfer_size;
1515 
1516 		xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
1517 		data_length -= xfer_size;
1518 		req.page_offset = cpu_to_le16(start_addr + byte_offset);
1519 		req.data_length = xfer_size;
1520 		req.enables = cpu_to_le32(start_addr + byte_offset ?
1521 				 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
1522 		mutex_lock(&bp->hwrm_cmd_lock);
1523 		rc = _hwrm_send_message(bp, &req, sizeof(req),
1524 					HWRM_CMD_TIMEOUT);
1525 		if (!rc)
1526 			memcpy(buf + byte_offset, output->data, xfer_size);
1527 		mutex_unlock(&bp->hwrm_cmd_lock);
1528 		byte_offset += xfer_size;
1529 	} while (!rc && data_length > 0);
1530 
1531 	return rc;
1532 }
1533 
1534 static int bnxt_get_module_info(struct net_device *dev,
1535 				struct ethtool_modinfo *modinfo)
1536 {
1537 	struct bnxt *bp = netdev_priv(dev);
1538 	struct hwrm_port_phy_i2c_read_input req = {0};
1539 	struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
1540 	int rc;
1541 
1542 	/* No point in going further if phy status indicates
1543 	 * module is not inserted or if it is powered down or
1544 	 * if it is of type 10GBase-T
1545 	 */
1546 	if (bp->link_info.module_status >
1547 		PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
1548 		return -EOPNOTSUPP;
1549 
1550 	/* This feature is not supported in older firmware versions */
1551 	if (bp->hwrm_spec_code < 0x10202)
1552 		return -EOPNOTSUPP;
1553 
1554 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
1555 	req.i2c_slave_addr = I2C_DEV_ADDR_A0;
1556 	req.page_number = 0;
1557 	req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
1558 	req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
1559 	req.port_id = cpu_to_le16(bp->pf.port_id);
1560 	mutex_lock(&bp->hwrm_cmd_lock);
1561 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1562 	if (!rc) {
1563 		u32 module_id = le32_to_cpu(output->data[0]);
1564 
1565 		switch (module_id) {
1566 		case SFF_MODULE_ID_SFP:
1567 			modinfo->type = ETH_MODULE_SFF_8472;
1568 			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1569 			break;
1570 		case SFF_MODULE_ID_QSFP:
1571 		case SFF_MODULE_ID_QSFP_PLUS:
1572 			modinfo->type = ETH_MODULE_SFF_8436;
1573 			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
1574 			break;
1575 		case SFF_MODULE_ID_QSFP28:
1576 			modinfo->type = ETH_MODULE_SFF_8636;
1577 			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
1578 			break;
1579 		default:
1580 			rc = -EOPNOTSUPP;
1581 			break;
1582 		}
1583 	}
1584 	mutex_unlock(&bp->hwrm_cmd_lock);
1585 	return rc;
1586 }
1587 
1588 static int bnxt_get_module_eeprom(struct net_device *dev,
1589 				  struct ethtool_eeprom *eeprom,
1590 				  u8 *data)
1591 {
1592 	struct bnxt *bp = netdev_priv(dev);
1593 	u16  start = eeprom->offset, length = eeprom->len;
1594 	int rc;
1595 
1596 	memset(data, 0, eeprom->len);
1597 
1598 	/* Read A0 portion of the EEPROM */
1599 	if (start < ETH_MODULE_SFF_8436_LEN) {
1600 		if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
1601 			length = ETH_MODULE_SFF_8436_LEN - start;
1602 		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
1603 						      start, length, data);
1604 		if (rc)
1605 			return rc;
1606 		start += length;
1607 		data += length;
1608 		length = eeprom->len - length;
1609 	}
1610 
1611 	/* Read A2 portion of the EEPROM */
1612 	if (length) {
1613 		start -= ETH_MODULE_SFF_8436_LEN;
1614 		bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
1615 						 length, data);
1616 	}
1617 	return rc;
1618 }
1619 
1620 const struct ethtool_ops bnxt_ethtool_ops = {
1621 	.get_settings		= bnxt_get_settings,
1622 	.set_settings		= bnxt_set_settings,
1623 	.get_pauseparam		= bnxt_get_pauseparam,
1624 	.set_pauseparam		= bnxt_set_pauseparam,
1625 	.get_drvinfo		= bnxt_get_drvinfo,
1626 	.get_coalesce		= bnxt_get_coalesce,
1627 	.set_coalesce		= bnxt_set_coalesce,
1628 	.get_msglevel		= bnxt_get_msglevel,
1629 	.set_msglevel		= bnxt_set_msglevel,
1630 	.get_sset_count		= bnxt_get_sset_count,
1631 	.get_strings		= bnxt_get_strings,
1632 	.get_ethtool_stats	= bnxt_get_ethtool_stats,
1633 	.set_ringparam		= bnxt_set_ringparam,
1634 	.get_ringparam		= bnxt_get_ringparam,
1635 	.get_channels		= bnxt_get_channels,
1636 	.set_channels		= bnxt_set_channels,
1637 #ifdef CONFIG_RFS_ACCEL
1638 	.get_rxnfc		= bnxt_get_rxnfc,
1639 #endif
1640 	.get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
1641 	.get_rxfh_key_size      = bnxt_get_rxfh_key_size,
1642 	.get_rxfh               = bnxt_get_rxfh,
1643 	.flash_device		= bnxt_flash_device,
1644 	.get_eeprom_len         = bnxt_get_eeprom_len,
1645 	.get_eeprom             = bnxt_get_eeprom,
1646 	.set_eeprom		= bnxt_set_eeprom,
1647 	.get_link		= bnxt_get_link,
1648 	.get_eee		= bnxt_get_eee,
1649 	.set_eee		= bnxt_set_eee,
1650 	.get_module_info	= bnxt_get_module_info,
1651 	.get_module_eeprom	= bnxt_get_module_eeprom,
1652 };
1653