xref: /linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c (revision 93a3545d812ae7cfe4426374e00a7d8f64ac02e0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2013-2015 Chelsio Communications.  All rights reserved.
4  */
5 
6 #include <linux/firmware.h>
7 #include <linux/mdio.h>
8 
9 #include "cxgb4.h"
10 #include "t4_regs.h"
11 #include "t4fw_api.h"
12 #include "cxgb4_cudbg.h"
13 #include "cxgb4_filter.h"
14 #include "cxgb4_tc_flower.h"
15 
16 #define EEPROM_MAGIC 0x38E2F10C
17 
18 static u32 get_msglevel(struct net_device *dev)
19 {
20 	return netdev2adap(dev)->msg_enable;
21 }
22 
23 static void set_msglevel(struct net_device *dev, u32 val)
24 {
25 	netdev2adap(dev)->msg_enable = val;
26 }
27 
28 static const char * const flash_region_strings[] = {
29 	"All",
30 	"Firmware",
31 	"PHY Firmware",
32 	"Boot",
33 	"Boot CFG",
34 };
35 
36 static const char stats_strings[][ETH_GSTRING_LEN] = {
37 	"tx_octets_ok           ",
38 	"tx_frames_ok           ",
39 	"tx_broadcast_frames    ",
40 	"tx_multicast_frames    ",
41 	"tx_unicast_frames      ",
42 	"tx_error_frames        ",
43 
44 	"tx_frames_64           ",
45 	"tx_frames_65_to_127    ",
46 	"tx_frames_128_to_255   ",
47 	"tx_frames_256_to_511   ",
48 	"tx_frames_512_to_1023  ",
49 	"tx_frames_1024_to_1518 ",
50 	"tx_frames_1519_to_max  ",
51 
52 	"tx_frames_dropped      ",
53 	"tx_pause_frames        ",
54 	"tx_ppp0_frames         ",
55 	"tx_ppp1_frames         ",
56 	"tx_ppp2_frames         ",
57 	"tx_ppp3_frames         ",
58 	"tx_ppp4_frames         ",
59 	"tx_ppp5_frames         ",
60 	"tx_ppp6_frames         ",
61 	"tx_ppp7_frames         ",
62 
63 	"rx_octets_ok           ",
64 	"rx_frames_ok           ",
65 	"rx_broadcast_frames    ",
66 	"rx_multicast_frames    ",
67 	"rx_unicast_frames      ",
68 
69 	"rx_frames_too_long     ",
70 	"rx_jabber_errors       ",
71 	"rx_fcs_errors          ",
72 	"rx_length_errors       ",
73 	"rx_symbol_errors       ",
74 	"rx_runt_frames         ",
75 
76 	"rx_frames_64           ",
77 	"rx_frames_65_to_127    ",
78 	"rx_frames_128_to_255   ",
79 	"rx_frames_256_to_511   ",
80 	"rx_frames_512_to_1023  ",
81 	"rx_frames_1024_to_1518 ",
82 	"rx_frames_1519_to_max  ",
83 
84 	"rx_pause_frames        ",
85 	"rx_ppp0_frames         ",
86 	"rx_ppp1_frames         ",
87 	"rx_ppp2_frames         ",
88 	"rx_ppp3_frames         ",
89 	"rx_ppp4_frames         ",
90 	"rx_ppp5_frames         ",
91 	"rx_ppp6_frames         ",
92 	"rx_ppp7_frames         ",
93 
94 	"rx_bg0_frames_dropped  ",
95 	"rx_bg1_frames_dropped  ",
96 	"rx_bg2_frames_dropped  ",
97 	"rx_bg3_frames_dropped  ",
98 	"rx_bg0_frames_trunc    ",
99 	"rx_bg1_frames_trunc    ",
100 	"rx_bg2_frames_trunc    ",
101 	"rx_bg3_frames_trunc    ",
102 
103 	"tso                    ",
104 	"uso                    ",
105 	"tx_csum_offload        ",
106 	"rx_csum_good           ",
107 	"vlan_extractions       ",
108 	"vlan_insertions        ",
109 	"gro_packets            ",
110 	"gro_merged             ",
111 };
112 
113 static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
114 	"db_drop                ",
115 	"db_full                ",
116 	"db_empty               ",
117 	"write_coal_success     ",
118 	"write_coal_fail        ",
119 #ifdef CONFIG_CHELSIO_TLS_DEVICE
120 	"tx_tls_encrypted_packets",
121 	"tx_tls_encrypted_bytes  ",
122 	"tx_tls_ctx              ",
123 	"tx_tls_ooo              ",
124 	"tx_tls_skip_no_sync_data",
125 	"tx_tls_drop_no_sync_data",
126 	"tx_tls_drop_bypass_req  ",
127 #endif
128 };
129 
130 static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
131 	"-------Loopback----------- ",
132 	"octets_ok              ",
133 	"frames_ok              ",
134 	"bcast_frames           ",
135 	"mcast_frames           ",
136 	"ucast_frames           ",
137 	"error_frames           ",
138 	"frames_64              ",
139 	"frames_65_to_127       ",
140 	"frames_128_to_255      ",
141 	"frames_256_to_511      ",
142 	"frames_512_to_1023     ",
143 	"frames_1024_to_1518    ",
144 	"frames_1519_to_max     ",
145 	"frames_dropped         ",
146 	"bg0_frames_dropped     ",
147 	"bg1_frames_dropped     ",
148 	"bg2_frames_dropped     ",
149 	"bg3_frames_dropped     ",
150 	"bg0_frames_trunc       ",
151 	"bg1_frames_trunc       ",
152 	"bg2_frames_trunc       ",
153 	"bg3_frames_trunc       ",
154 };
155 
156 static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
157 	[PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
158 };
159 
160 static int get_sset_count(struct net_device *dev, int sset)
161 {
162 	switch (sset) {
163 	case ETH_SS_STATS:
164 		return ARRAY_SIZE(stats_strings) +
165 		       ARRAY_SIZE(adapter_stats_strings) +
166 		       ARRAY_SIZE(loopback_stats_strings);
167 	case ETH_SS_PRIV_FLAGS:
168 		return ARRAY_SIZE(cxgb4_priv_flags_strings);
169 	default:
170 		return -EOPNOTSUPP;
171 	}
172 }
173 
174 static int get_regs_len(struct net_device *dev)
175 {
176 	struct adapter *adap = netdev2adap(dev);
177 
178 	return t4_get_regs_len(adap);
179 }
180 
181 static int get_eeprom_len(struct net_device *dev)
182 {
183 	return EEPROMSIZE;
184 }
185 
186 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
187 {
188 	struct adapter *adapter = netdev2adap(dev);
189 	u32 exprom_vers;
190 
191 	strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
192 	strlcpy(info->bus_info, pci_name(adapter->pdev),
193 		sizeof(info->bus_info));
194 	info->regdump_len = get_regs_len(dev);
195 
196 	if (adapter->params.fw_vers)
197 		snprintf(info->fw_version, sizeof(info->fw_version),
198 			 "%u.%u.%u.%u, TP %u.%u.%u.%u",
199 			 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
200 			 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
201 			 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
202 			 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
203 			 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
204 			 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
205 			 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
206 			 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
207 
208 	if (!t4_get_exprom_version(adapter, &exprom_vers))
209 		snprintf(info->erom_version, sizeof(info->erom_version),
210 			 "%u.%u.%u.%u",
211 			 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
212 			 FW_HDR_FW_VER_MINOR_G(exprom_vers),
213 			 FW_HDR_FW_VER_MICRO_G(exprom_vers),
214 			 FW_HDR_FW_VER_BUILD_G(exprom_vers));
215 	info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
216 }
217 
218 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
219 {
220 	if (stringset == ETH_SS_STATS) {
221 		memcpy(data, stats_strings, sizeof(stats_strings));
222 		data += sizeof(stats_strings);
223 		memcpy(data, adapter_stats_strings,
224 		       sizeof(adapter_stats_strings));
225 		data += sizeof(adapter_stats_strings);
226 		memcpy(data, loopback_stats_strings,
227 		       sizeof(loopback_stats_strings));
228 	} else if (stringset == ETH_SS_PRIV_FLAGS) {
229 		memcpy(data, cxgb4_priv_flags_strings,
230 		       sizeof(cxgb4_priv_flags_strings));
231 	}
232 }
233 
234 /* port stats maintained per queue of the port. They should be in the same
235  * order as in stats_strings above.
236  */
237 struct queue_port_stats {
238 	u64 tso;
239 	u64 uso;
240 	u64 tx_csum;
241 	u64 rx_csum;
242 	u64 vlan_ex;
243 	u64 vlan_ins;
244 	u64 gro_pkts;
245 	u64 gro_merged;
246 };
247 
248 struct adapter_stats {
249 	u64 db_drop;
250 	u64 db_full;
251 	u64 db_empty;
252 	u64 wc_success;
253 	u64 wc_fail;
254 #ifdef CONFIG_CHELSIO_TLS_DEVICE
255 	u64 tx_tls_encrypted_packets;
256 	u64 tx_tls_encrypted_bytes;
257 	u64 tx_tls_ctx;
258 	u64 tx_tls_ooo;
259 	u64 tx_tls_skip_no_sync_data;
260 	u64 tx_tls_drop_no_sync_data;
261 	u64 tx_tls_drop_bypass_req;
262 #endif
263 };
264 
265 static void collect_sge_port_stats(const struct adapter *adap,
266 				   const struct port_info *p,
267 				   struct queue_port_stats *s)
268 {
269 	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
270 	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
271 	struct sge_eohw_txq *eohw_tx;
272 	unsigned int i;
273 
274 	memset(s, 0, sizeof(*s));
275 	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
276 		s->tso += tx->tso;
277 		s->uso += tx->uso;
278 		s->tx_csum += tx->tx_cso;
279 		s->rx_csum += rx->stats.rx_cso;
280 		s->vlan_ex += rx->stats.vlan_ex;
281 		s->vlan_ins += tx->vlan_ins;
282 		s->gro_pkts += rx->stats.lro_pkts;
283 		s->gro_merged += rx->stats.lro_merged;
284 	}
285 
286 	if (adap->sge.eohw_txq) {
287 		eohw_tx = &adap->sge.eohw_txq[p->first_qset];
288 		for (i = 0; i < p->nqsets; i++, eohw_tx++) {
289 			s->tso += eohw_tx->tso;
290 			s->uso += eohw_tx->uso;
291 			s->tx_csum += eohw_tx->tx_cso;
292 			s->vlan_ins += eohw_tx->vlan_ins;
293 		}
294 	}
295 }
296 
297 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
298 {
299 	u64 val1, val2;
300 
301 	memset(s, 0, sizeof(*s));
302 
303 	s->db_drop = adap->db_stats.db_drop;
304 	s->db_full = adap->db_stats.db_full;
305 	s->db_empty = adap->db_stats.db_empty;
306 
307 	if (!is_t4(adap->params.chip)) {
308 		int v;
309 
310 		v = t4_read_reg(adap, SGE_STAT_CFG_A);
311 		if (STATSOURCE_T5_G(v) == 7) {
312 			val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
313 			val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
314 			s->wc_success = val1 - val2;
315 			s->wc_fail = val2;
316 		}
317 	}
318 }
319 
320 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
321 		      u64 *data)
322 {
323 	struct port_info *pi = netdev_priv(dev);
324 	struct adapter *adapter = pi->adapter;
325 	struct lb_port_stats s;
326 	int i;
327 	u64 *p0;
328 
329 	t4_get_port_stats_offset(adapter, pi->tx_chan,
330 				 (struct port_stats *)data,
331 				 &pi->stats_base);
332 
333 	data += sizeof(struct port_stats) / sizeof(u64);
334 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
335 	data += sizeof(struct queue_port_stats) / sizeof(u64);
336 	collect_adapter_stats(adapter, (struct adapter_stats *)data);
337 	data += sizeof(struct adapter_stats) / sizeof(u64);
338 
339 	*data++ = (u64)pi->port_id;
340 	memset(&s, 0, sizeof(s));
341 	t4_get_lb_stats(adapter, pi->port_id, &s);
342 
343 	p0 = &s.octets;
344 	for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
345 		*data++ = (unsigned long long)*p0++;
346 }
347 
348 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
349 		     void *buf)
350 {
351 	struct adapter *adap = netdev2adap(dev);
352 	size_t buf_size;
353 
354 	buf_size = t4_get_regs_len(adap);
355 	regs->version = mk_adap_vers(adap);
356 	t4_get_regs(adap, buf, buf_size);
357 }
358 
359 static int restart_autoneg(struct net_device *dev)
360 {
361 	struct port_info *p = netdev_priv(dev);
362 
363 	if (!netif_running(dev))
364 		return -EAGAIN;
365 	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
366 		return -EINVAL;
367 	t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
368 	return 0;
369 }
370 
371 static int identify_port(struct net_device *dev,
372 			 enum ethtool_phys_id_state state)
373 {
374 	unsigned int val;
375 	struct adapter *adap = netdev2adap(dev);
376 
377 	if (state == ETHTOOL_ID_ACTIVE)
378 		val = 0xffff;
379 	else if (state == ETHTOOL_ID_INACTIVE)
380 		val = 0;
381 	else
382 		return -EINVAL;
383 
384 	return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
385 }
386 
387 /**
388  *	from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
389  *	@port_type: Firmware Port Type
390  *	@mod_type: Firmware Module Type
391  *
392  *	Translate Firmware Port/Module type to Ethtool Port Type.
393  */
394 static int from_fw_port_mod_type(enum fw_port_type port_type,
395 				 enum fw_port_module_type mod_type)
396 {
397 	if (port_type == FW_PORT_TYPE_BT_SGMII ||
398 	    port_type == FW_PORT_TYPE_BT_XFI ||
399 	    port_type == FW_PORT_TYPE_BT_XAUI) {
400 		return PORT_TP;
401 	} else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
402 		   port_type == FW_PORT_TYPE_FIBER_XAUI) {
403 		return PORT_FIBRE;
404 	} else if (port_type == FW_PORT_TYPE_SFP ||
405 		   port_type == FW_PORT_TYPE_QSFP_10G ||
406 		   port_type == FW_PORT_TYPE_QSA ||
407 		   port_type == FW_PORT_TYPE_QSFP ||
408 		   port_type == FW_PORT_TYPE_CR4_QSFP ||
409 		   port_type == FW_PORT_TYPE_CR_QSFP ||
410 		   port_type == FW_PORT_TYPE_CR2_QSFP ||
411 		   port_type == FW_PORT_TYPE_SFP28) {
412 		if (mod_type == FW_PORT_MOD_TYPE_LR ||
413 		    mod_type == FW_PORT_MOD_TYPE_SR ||
414 		    mod_type == FW_PORT_MOD_TYPE_ER ||
415 		    mod_type == FW_PORT_MOD_TYPE_LRM)
416 			return PORT_FIBRE;
417 		else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
418 			 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
419 			return PORT_DA;
420 		else
421 			return PORT_OTHER;
422 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
423 		   port_type == FW_PORT_TYPE_KR_SFP28 ||
424 		   port_type == FW_PORT_TYPE_KR_XLAUI) {
425 		return PORT_NONE;
426 	}
427 
428 	return PORT_OTHER;
429 }
430 
431 /**
432  *	speed_to_fw_caps - translate Port Speed to Firmware Port Capabilities
433  *	@speed: speed in Kb/s
434  *
435  *	Translates a specific Port Speed into a Firmware Port Capabilities
436  *	value.
437  */
438 static unsigned int speed_to_fw_caps(int speed)
439 {
440 	if (speed == 100)
441 		return FW_PORT_CAP32_SPEED_100M;
442 	if (speed == 1000)
443 		return FW_PORT_CAP32_SPEED_1G;
444 	if (speed == 10000)
445 		return FW_PORT_CAP32_SPEED_10G;
446 	if (speed == 25000)
447 		return FW_PORT_CAP32_SPEED_25G;
448 	if (speed == 40000)
449 		return FW_PORT_CAP32_SPEED_40G;
450 	if (speed == 50000)
451 		return FW_PORT_CAP32_SPEED_50G;
452 	if (speed == 100000)
453 		return FW_PORT_CAP32_SPEED_100G;
454 	if (speed == 200000)
455 		return FW_PORT_CAP32_SPEED_200G;
456 	if (speed == 400000)
457 		return FW_PORT_CAP32_SPEED_400G;
458 	return 0;
459 }
460 
461 /**
462  *	fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
463  *	@port_type: Firmware Port Type
464  *	@fw_caps: Firmware Port Capabilities
465  *	@link_mode_mask: ethtool Link Mode Mask
466  *
467  *	Translate a Firmware Port Capabilities specification to an ethtool
468  *	Link Mode Mask.
469  */
470 static void fw_caps_to_lmm(enum fw_port_type port_type,
471 			   fw_port_cap32_t fw_caps,
472 			   unsigned long *link_mode_mask)
473 {
474 	#define SET_LMM(__lmm_name) \
475 		do { \
476 			__set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
477 				  link_mode_mask); \
478 		} while (0)
479 
480 	#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
481 		do { \
482 			if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
483 				SET_LMM(__lmm_name); \
484 		} while (0)
485 
486 	switch (port_type) {
487 	case FW_PORT_TYPE_BT_SGMII:
488 	case FW_PORT_TYPE_BT_XFI:
489 	case FW_PORT_TYPE_BT_XAUI:
490 		SET_LMM(TP);
491 		FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
492 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
493 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
494 		break;
495 
496 	case FW_PORT_TYPE_KX4:
497 	case FW_PORT_TYPE_KX:
498 		SET_LMM(Backplane);
499 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
500 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
501 		break;
502 
503 	case FW_PORT_TYPE_KR:
504 		SET_LMM(Backplane);
505 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
506 		break;
507 
508 	case FW_PORT_TYPE_BP_AP:
509 		SET_LMM(Backplane);
510 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
511 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
512 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
513 		break;
514 
515 	case FW_PORT_TYPE_BP4_AP:
516 		SET_LMM(Backplane);
517 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
518 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
519 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
520 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
521 		break;
522 
523 	case FW_PORT_TYPE_FIBER_XFI:
524 	case FW_PORT_TYPE_FIBER_XAUI:
525 	case FW_PORT_TYPE_SFP:
526 	case FW_PORT_TYPE_QSFP_10G:
527 	case FW_PORT_TYPE_QSA:
528 		SET_LMM(FIBRE);
529 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
530 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
531 		break;
532 
533 	case FW_PORT_TYPE_BP40_BA:
534 	case FW_PORT_TYPE_QSFP:
535 		SET_LMM(FIBRE);
536 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
537 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
538 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
539 		break;
540 
541 	case FW_PORT_TYPE_CR_QSFP:
542 	case FW_PORT_TYPE_SFP28:
543 		SET_LMM(FIBRE);
544 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
545 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
546 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
547 		break;
548 
549 	case FW_PORT_TYPE_KR_SFP28:
550 		SET_LMM(Backplane);
551 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
552 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
553 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
554 		break;
555 
556 	case FW_PORT_TYPE_KR_XLAUI:
557 		SET_LMM(Backplane);
558 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
559 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
560 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
561 		break;
562 
563 	case FW_PORT_TYPE_CR2_QSFP:
564 		SET_LMM(FIBRE);
565 		FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full);
566 		break;
567 
568 	case FW_PORT_TYPE_KR4_100G:
569 	case FW_PORT_TYPE_CR4_QSFP:
570 		SET_LMM(FIBRE);
571 		FW_CAPS_TO_LMM(SPEED_1G,  1000baseT_Full);
572 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
573 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
574 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
575 		FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
576 		FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full);
577 		break;
578 
579 	default:
580 		break;
581 	}
582 
583 	if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
584 		FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
585 		FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
586 	} else {
587 		SET_LMM(FEC_NONE);
588 	}
589 
590 	FW_CAPS_TO_LMM(ANEG, Autoneg);
591 	FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
592 	FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
593 
594 	#undef FW_CAPS_TO_LMM
595 	#undef SET_LMM
596 }
597 
598 /**
599  *	lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
600  *	capabilities
601  *	@link_mode_mask: ethtool Link Mode Mask
602  *
603  *	Translate ethtool Link Mode Mask into a Firmware Port capabilities
604  *	value.
605  */
606 static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask)
607 {
608 	unsigned int fw_caps = 0;
609 
610 	#define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \
611 		do { \
612 			if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
613 				     link_mode_mask)) \
614 				fw_caps |= FW_PORT_CAP32_ ## __fw_name; \
615 		} while (0)
616 
617 	LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M);
618 	LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G);
619 	LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G);
620 	LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G);
621 	LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G);
622 	LMM_TO_FW_CAPS(50000baseCR2_Full, SPEED_50G);
623 	LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G);
624 
625 	#undef LMM_TO_FW_CAPS
626 
627 	return fw_caps;
628 }
629 
630 static int get_link_ksettings(struct net_device *dev,
631 			      struct ethtool_link_ksettings *link_ksettings)
632 {
633 	struct port_info *pi = netdev_priv(dev);
634 	struct ethtool_link_settings *base = &link_ksettings->base;
635 
636 	/* For the nonce, the Firmware doesn't send up Port State changes
637 	 * when the Virtual Interface attached to the Port is down.  So
638 	 * if it's down, let's grab any changes.
639 	 */
640 	if (!netif_running(dev))
641 		(void)t4_update_port_info(pi);
642 
643 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
644 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
645 	ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
646 
647 	base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
648 
649 	if (pi->mdio_addr >= 0) {
650 		base->phy_address = pi->mdio_addr;
651 		base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
652 				      ? ETH_MDIO_SUPPORTS_C22
653 				      : ETH_MDIO_SUPPORTS_C45);
654 	} else {
655 		base->phy_address = 255;
656 		base->mdio_support = 0;
657 	}
658 
659 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
660 		       link_ksettings->link_modes.supported);
661 	fw_caps_to_lmm(pi->port_type,
662 		       t4_link_acaps(pi->adapter,
663 				     pi->lport,
664 				     &pi->link_cfg),
665 		       link_ksettings->link_modes.advertising);
666 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
667 		       link_ksettings->link_modes.lp_advertising);
668 
669 	base->speed = (netif_carrier_ok(dev)
670 		       ? pi->link_cfg.speed
671 		       : SPEED_UNKNOWN);
672 	base->duplex = DUPLEX_FULL;
673 
674 	base->autoneg = pi->link_cfg.autoneg;
675 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
676 		ethtool_link_ksettings_add_link_mode(link_ksettings,
677 						     supported, Autoneg);
678 	if (pi->link_cfg.autoneg)
679 		ethtool_link_ksettings_add_link_mode(link_ksettings,
680 						     advertising, Autoneg);
681 
682 	return 0;
683 }
684 
685 static int set_link_ksettings(struct net_device *dev,
686 			    const struct ethtool_link_ksettings *link_ksettings)
687 {
688 	struct port_info *pi = netdev_priv(dev);
689 	struct link_config *lc = &pi->link_cfg;
690 	const struct ethtool_link_settings *base = &link_ksettings->base;
691 	struct link_config old_lc;
692 	unsigned int fw_caps;
693 	int ret = 0;
694 
695 	/* only full-duplex supported */
696 	if (base->duplex != DUPLEX_FULL)
697 		return -EINVAL;
698 
699 	old_lc = *lc;
700 	if (!(lc->pcaps & FW_PORT_CAP32_ANEG) ||
701 	    base->autoneg == AUTONEG_DISABLE) {
702 		fw_caps = speed_to_fw_caps(base->speed);
703 
704 		/* Speed must be supported by Physical Port Capabilities. */
705 		if (!(lc->pcaps & fw_caps))
706 			return -EINVAL;
707 
708 		lc->speed_caps = fw_caps;
709 		lc->acaps = fw_caps;
710 	} else {
711 		fw_caps =
712 			lmm_to_fw_caps(link_ksettings->link_modes.advertising);
713 		if (!(lc->pcaps & fw_caps))
714 			return -EINVAL;
715 		lc->speed_caps = 0;
716 		lc->acaps = fw_caps | FW_PORT_CAP32_ANEG;
717 	}
718 	lc->autoneg = base->autoneg;
719 
720 	/* If the firmware rejects the Link Configuration request, back out
721 	 * the changes and report the error.
722 	 */
723 	ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc);
724 	if (ret)
725 		*lc = old_lc;
726 
727 	return ret;
728 }
729 
730 /* Translate the Firmware FEC value into the ethtool value. */
731 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
732 {
733 	unsigned int eth_fec = 0;
734 
735 	if (fw_fec & FW_PORT_CAP32_FEC_RS)
736 		eth_fec |= ETHTOOL_FEC_RS;
737 	if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
738 		eth_fec |= ETHTOOL_FEC_BASER;
739 
740 	/* if nothing is set, then FEC is off */
741 	if (!eth_fec)
742 		eth_fec = ETHTOOL_FEC_OFF;
743 
744 	return eth_fec;
745 }
746 
747 /* Translate Common Code FEC value into ethtool value. */
748 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
749 {
750 	unsigned int eth_fec = 0;
751 
752 	if (cc_fec & FEC_AUTO)
753 		eth_fec |= ETHTOOL_FEC_AUTO;
754 	if (cc_fec & FEC_RS)
755 		eth_fec |= ETHTOOL_FEC_RS;
756 	if (cc_fec & FEC_BASER_RS)
757 		eth_fec |= ETHTOOL_FEC_BASER;
758 
759 	/* if nothing is set, then FEC is off */
760 	if (!eth_fec)
761 		eth_fec = ETHTOOL_FEC_OFF;
762 
763 	return eth_fec;
764 }
765 
766 /* Translate ethtool FEC value into Common Code value. */
767 static inline unsigned int eth_to_cc_fec(unsigned int eth_fec)
768 {
769 	unsigned int cc_fec = 0;
770 
771 	if (eth_fec & ETHTOOL_FEC_OFF)
772 		return cc_fec;
773 
774 	if (eth_fec & ETHTOOL_FEC_AUTO)
775 		cc_fec |= FEC_AUTO;
776 	if (eth_fec & ETHTOOL_FEC_RS)
777 		cc_fec |= FEC_RS;
778 	if (eth_fec & ETHTOOL_FEC_BASER)
779 		cc_fec |= FEC_BASER_RS;
780 
781 	return cc_fec;
782 }
783 
784 static int get_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
785 {
786 	const struct port_info *pi = netdev_priv(dev);
787 	const struct link_config *lc = &pi->link_cfg;
788 
789 	/* Translate the Firmware FEC Support into the ethtool value.  We
790 	 * always support IEEE 802.3 "automatic" selection of Link FEC type if
791 	 * any FEC is supported.
792 	 */
793 	fec->fec = fwcap_to_eth_fec(lc->pcaps);
794 	if (fec->fec != ETHTOOL_FEC_OFF)
795 		fec->fec |= ETHTOOL_FEC_AUTO;
796 
797 	/* Translate the current internal FEC parameters into the
798 	 * ethtool values.
799 	 */
800 	fec->active_fec = cc_to_eth_fec(lc->fec);
801 
802 	return 0;
803 }
804 
805 static int set_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
806 {
807 	struct port_info *pi = netdev_priv(dev);
808 	struct link_config *lc = &pi->link_cfg;
809 	struct link_config old_lc;
810 	int ret;
811 
812 	/* Save old Link Configuration in case the L1 Configure below
813 	 * fails.
814 	 */
815 	old_lc = *lc;
816 
817 	/* Try to perform the L1 Configure and return the result of that
818 	 * effort.  If it fails, revert the attempted change.
819 	 */
820 	lc->requested_fec = eth_to_cc_fec(fec->fec);
821 	ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox,
822 			    pi->tx_chan, lc);
823 	if (ret)
824 		*lc = old_lc;
825 	return ret;
826 }
827 
828 static void get_pauseparam(struct net_device *dev,
829 			   struct ethtool_pauseparam *epause)
830 {
831 	struct port_info *p = netdev_priv(dev);
832 
833 	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
834 	epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
835 	epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
836 }
837 
838 static int set_pauseparam(struct net_device *dev,
839 			  struct ethtool_pauseparam *epause)
840 {
841 	struct port_info *p = netdev_priv(dev);
842 	struct link_config *lc = &p->link_cfg;
843 
844 	if (epause->autoneg == AUTONEG_DISABLE)
845 		lc->requested_fc = 0;
846 	else if (lc->pcaps & FW_PORT_CAP32_ANEG)
847 		lc->requested_fc = PAUSE_AUTONEG;
848 	else
849 		return -EINVAL;
850 
851 	if (epause->rx_pause)
852 		lc->requested_fc |= PAUSE_RX;
853 	if (epause->tx_pause)
854 		lc->requested_fc |= PAUSE_TX;
855 	if (netif_running(dev))
856 		return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan,
857 				     lc);
858 	return 0;
859 }
860 
861 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
862 {
863 	const struct port_info *pi = netdev_priv(dev);
864 	const struct sge *s = &pi->adapter->sge;
865 
866 	e->rx_max_pending = MAX_RX_BUFFERS;
867 	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
868 	e->rx_jumbo_max_pending = 0;
869 	e->tx_max_pending = MAX_TXQ_ENTRIES;
870 
871 	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
872 	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
873 	e->rx_jumbo_pending = 0;
874 	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
875 }
876 
877 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
878 {
879 	int i;
880 	const struct port_info *pi = netdev_priv(dev);
881 	struct adapter *adapter = pi->adapter;
882 	struct sge *s = &adapter->sge;
883 
884 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
885 	    e->tx_pending > MAX_TXQ_ENTRIES ||
886 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
887 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
888 	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
889 		return -EINVAL;
890 
891 	if (adapter->flags & CXGB4_FULL_INIT_DONE)
892 		return -EBUSY;
893 
894 	for (i = 0; i < pi->nqsets; ++i) {
895 		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
896 		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
897 		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
898 	}
899 	return 0;
900 }
901 
902 /**
903  * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
904  * @dev: the network device
905  * @us: the hold-off time in us, or 0 to disable timer
906  * @cnt: the hold-off packet count, or 0 to disable counter
907  *
908  * Set the RX interrupt hold-off parameters for a network device.
909  */
910 static int set_rx_intr_params(struct net_device *dev,
911 			      unsigned int us, unsigned int cnt)
912 {
913 	int i, err;
914 	struct port_info *pi = netdev_priv(dev);
915 	struct adapter *adap = pi->adapter;
916 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
917 
918 	for (i = 0; i < pi->nqsets; i++, q++) {
919 		err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
920 		if (err)
921 			return err;
922 	}
923 	return 0;
924 }
925 
926 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
927 {
928 	int i;
929 	struct port_info *pi = netdev_priv(dev);
930 	struct adapter *adap = pi->adapter;
931 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
932 
933 	for (i = 0; i < pi->nqsets; i++, q++)
934 		q->rspq.adaptive_rx = adaptive_rx;
935 
936 	return 0;
937 }
938 
939 static int get_adaptive_rx_setting(struct net_device *dev)
940 {
941 	struct port_info *pi = netdev_priv(dev);
942 	struct adapter *adap = pi->adapter;
943 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
944 
945 	return q->rspq.adaptive_rx;
946 }
947 
948 /* Return the current global Adapter SGE Doorbell Queue Timer Tick for all
949  * Ethernet TX Queues.
950  */
951 static int get_dbqtimer_tick(struct net_device *dev)
952 {
953 	struct port_info *pi = netdev_priv(dev);
954 	struct adapter *adap = pi->adapter;
955 
956 	if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
957 		return 0;
958 
959 	return adap->sge.dbqtimer_tick;
960 }
961 
962 /* Return the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
963  * associated with a Network Device.
964  */
965 static int get_dbqtimer(struct net_device *dev)
966 {
967 	struct port_info *pi = netdev_priv(dev);
968 	struct adapter *adap = pi->adapter;
969 	struct sge_eth_txq *txq;
970 
971 	txq = &adap->sge.ethtxq[pi->first_qset];
972 
973 	if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
974 		return 0;
975 
976 	/* all of the TX Queues use the same Timer Index */
977 	return adap->sge.dbqtimer_val[txq->dbqtimerix];
978 }
979 
980 /* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
981  * Queues.  This is the fundamental "Tick" that sets the scale of values which
982  * can be used.  Individual Ethernet TX Queues index into a relatively small
983  * array of Tick Multipliers.  Changing the base Tick will thus change all of
984  * the resulting Timer Values associated with those multipliers for all
985  * Ethernet TX Queues.
986  */
987 static int set_dbqtimer_tick(struct net_device *dev, int usecs)
988 {
989 	struct port_info *pi = netdev_priv(dev);
990 	struct adapter *adap = pi->adapter;
991 	struct sge *s = &adap->sge;
992 	u32 param, val;
993 	int ret;
994 
995 	if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
996 		return 0;
997 
998 	/* return early if it's the same Timer Tick we're already using */
999 	if (s->dbqtimer_tick == usecs)
1000 		return 0;
1001 
1002 	/* attempt to set the new Timer Tick value */
1003 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1004 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
1005 	val = usecs;
1006 	ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
1007 	if (ret)
1008 		return ret;
1009 	s->dbqtimer_tick = usecs;
1010 
1011 	/* if successful, reread resulting dependent Timer values */
1012 	ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(s->dbqtimer_val),
1013 				    s->dbqtimer_val);
1014 	return ret;
1015 }
1016 
1017 /* Set the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
1018  * associated with a Network Device.  There is a relatively small array of
1019  * possible Timer Values so we need to pick the closest value available.
1020  */
1021 static int set_dbqtimer(struct net_device *dev, int usecs)
1022 {
1023 	int qix, timerix, min_timerix, delta, min_delta;
1024 	struct port_info *pi = netdev_priv(dev);
1025 	struct adapter *adap = pi->adapter;
1026 	struct sge *s = &adap->sge;
1027 	struct sge_eth_txq *txq;
1028 	u32 param, val;
1029 	int ret;
1030 
1031 	if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1032 		return 0;
1033 
1034 	/* Find the SGE Doorbell Timer Value that's closest to the requested
1035 	 * value.
1036 	 */
1037 	min_delta = INT_MAX;
1038 	min_timerix = 0;
1039 	for (timerix = 0; timerix < ARRAY_SIZE(s->dbqtimer_val); timerix++) {
1040 		delta = s->dbqtimer_val[timerix] - usecs;
1041 		if (delta < 0)
1042 			delta = -delta;
1043 		if (delta < min_delta) {
1044 			min_delta = delta;
1045 			min_timerix = timerix;
1046 		}
1047 	}
1048 
1049 	/* Return early if it's the same Timer Index we're already using.
1050 	 * We use the same Timer Index for all of the TX Queues for an
1051 	 * interface so it's only necessary to check the first one.
1052 	 */
1053 	txq = &s->ethtxq[pi->first_qset];
1054 	if (txq->dbqtimerix == min_timerix)
1055 		return 0;
1056 
1057 	for (qix = 0; qix < pi->nqsets; qix++, txq++) {
1058 		if (adap->flags & CXGB4_FULL_INIT_DONE) {
1059 			param =
1060 			 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1061 			  FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
1062 			  FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
1063 			val = min_timerix;
1064 			ret = t4_set_params(adap, adap->mbox, adap->pf, 0,
1065 					    1, &param, &val);
1066 			if (ret)
1067 				return ret;
1068 		}
1069 		txq->dbqtimerix = min_timerix;
1070 	}
1071 	return 0;
1072 }
1073 
1074 /* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
1075  * Queues and the Timer Value for the Ethernet TX Queues associated with a
1076  * Network Device.  Since changing the global Tick changes all of the
1077  * available Timer Values, we need to do this first before selecting the
1078  * resulting closest Timer Value.  Moreover, since the Tick is global,
1079  * changing it affects the Timer Values for all Network Devices on the
1080  * adapter.  So, before changing the Tick, we grab all of the current Timer
1081  * Values for other Network Devices on this Adapter and then attempt to select
1082  * new Timer Values which are close to the old values ...
1083  */
1084 static int set_dbqtimer_tickval(struct net_device *dev,
1085 				int tick_usecs, int timer_usecs)
1086 {
1087 	struct port_info *pi = netdev_priv(dev);
1088 	struct adapter *adap = pi->adapter;
1089 	int timer[MAX_NPORTS];
1090 	unsigned int port;
1091 	int ret;
1092 
1093 	/* Grab the other adapter Network Interface current timers and fill in
1094 	 * the new one for this Network Interface.
1095 	 */
1096 	for_each_port(adap, port)
1097 		if (port == pi->port_id)
1098 			timer[port] = timer_usecs;
1099 		else
1100 			timer[port] = get_dbqtimer(adap->port[port]);
1101 
1102 	/* Change the global Tick first ... */
1103 	ret = set_dbqtimer_tick(dev, tick_usecs);
1104 	if (ret)
1105 		return ret;
1106 
1107 	/* ... and then set all of the Network Interface Timer Values ... */
1108 	for_each_port(adap, port) {
1109 		ret = set_dbqtimer(adap->port[port], timer[port]);
1110 		if (ret)
1111 			return ret;
1112 	}
1113 
1114 	return 0;
1115 }
1116 
1117 static int set_coalesce(struct net_device *dev,
1118 			struct ethtool_coalesce *coalesce)
1119 {
1120 	int ret;
1121 
1122 	set_adaptive_rx_setting(dev, coalesce->use_adaptive_rx_coalesce);
1123 
1124 	ret = set_rx_intr_params(dev, coalesce->rx_coalesce_usecs,
1125 				 coalesce->rx_max_coalesced_frames);
1126 	if (ret)
1127 		return ret;
1128 
1129 	return set_dbqtimer_tickval(dev,
1130 				    coalesce->tx_coalesce_usecs_irq,
1131 				    coalesce->tx_coalesce_usecs);
1132 }
1133 
1134 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1135 {
1136 	const struct port_info *pi = netdev_priv(dev);
1137 	const struct adapter *adap = pi->adapter;
1138 	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1139 
1140 	c->rx_coalesce_usecs = qtimer_val(adap, rq);
1141 	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
1142 		adap->sge.counter_val[rq->pktcnt_idx] : 0;
1143 	c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
1144 	c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev);
1145 	c->tx_coalesce_usecs = get_dbqtimer(dev);
1146 	return 0;
1147 }
1148 
1149 /* The next two routines implement eeprom read/write from physical addresses.
1150  */
1151 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1152 {
1153 	int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1154 
1155 	if (vaddr >= 0)
1156 		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1157 	return vaddr < 0 ? vaddr : 0;
1158 }
1159 
1160 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1161 {
1162 	int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1163 
1164 	if (vaddr >= 0)
1165 		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1166 	return vaddr < 0 ? vaddr : 0;
1167 }
1168 
1169 #define EEPROM_MAGIC 0x38E2F10C
1170 
1171 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1172 		      u8 *data)
1173 {
1174 	int i, err = 0;
1175 	struct adapter *adapter = netdev2adap(dev);
1176 	u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL);
1177 
1178 	if (!buf)
1179 		return -ENOMEM;
1180 
1181 	e->magic = EEPROM_MAGIC;
1182 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1183 		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1184 
1185 	if (!err)
1186 		memcpy(data, buf + e->offset, e->len);
1187 	kvfree(buf);
1188 	return err;
1189 }
1190 
1191 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1192 		      u8 *data)
1193 {
1194 	u8 *buf;
1195 	int err = 0;
1196 	u32 aligned_offset, aligned_len, *p;
1197 	struct adapter *adapter = netdev2adap(dev);
1198 
1199 	if (eeprom->magic != EEPROM_MAGIC)
1200 		return -EINVAL;
1201 
1202 	aligned_offset = eeprom->offset & ~3;
1203 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1204 
1205 	if (adapter->pf > 0) {
1206 		u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1207 
1208 		if (aligned_offset < start ||
1209 		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
1210 			return -EPERM;
1211 	}
1212 
1213 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1214 		/* RMW possibly needed for first or last words.
1215 		 */
1216 		buf = kvzalloc(aligned_len, GFP_KERNEL);
1217 		if (!buf)
1218 			return -ENOMEM;
1219 		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1220 		if (!err && aligned_len > 4)
1221 			err = eeprom_rd_phys(adapter,
1222 					     aligned_offset + aligned_len - 4,
1223 					     (u32 *)&buf[aligned_len - 4]);
1224 		if (err)
1225 			goto out;
1226 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1227 	} else {
1228 		buf = data;
1229 	}
1230 
1231 	err = t4_seeprom_wp(adapter, false);
1232 	if (err)
1233 		goto out;
1234 
1235 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1236 		err = eeprom_wr_phys(adapter, aligned_offset, *p);
1237 		aligned_offset += 4;
1238 	}
1239 
1240 	if (!err)
1241 		err = t4_seeprom_wp(adapter, true);
1242 out:
1243 	if (buf != data)
1244 		kvfree(buf);
1245 	return err;
1246 }
1247 
1248 static int cxgb4_ethtool_flash_bootcfg(struct net_device *netdev,
1249 				       const u8 *data, u32 size)
1250 {
1251 	struct adapter *adap = netdev2adap(netdev);
1252 	int ret;
1253 
1254 	ret = t4_load_bootcfg(adap, data, size);
1255 	if (ret)
1256 		dev_err(adap->pdev_dev, "Failed to load boot cfg image\n");
1257 
1258 	return ret;
1259 }
1260 
1261 static int cxgb4_ethtool_flash_boot(struct net_device *netdev,
1262 				    const u8 *bdata, u32 size)
1263 {
1264 	struct adapter *adap = netdev2adap(netdev);
1265 	unsigned int offset;
1266 	u8 *data;
1267 	int ret;
1268 
1269 	data = kmemdup(bdata, size, GFP_KERNEL);
1270 	if (!data)
1271 		return -ENOMEM;
1272 
1273 	offset = OFFSET_G(t4_read_reg(adap, PF_REG(0, PCIE_PF_EXPROM_OFST_A)));
1274 
1275 	ret = t4_load_boot(adap, data, offset, size);
1276 	if (ret)
1277 		dev_err(adap->pdev_dev, "Failed to load boot image\n");
1278 
1279 	kfree(data);
1280 	return ret;
1281 }
1282 
1283 #define CXGB4_PHY_SIG 0x130000ea
1284 
1285 static int cxgb4_validate_phy_image(const u8 *data, u32 *size)
1286 {
1287 	struct cxgb4_fw_data *header;
1288 
1289 	header = (struct cxgb4_fw_data *)data;
1290 	if (be32_to_cpu(header->signature) != CXGB4_PHY_SIG)
1291 		return -EINVAL;
1292 
1293 	return 0;
1294 }
1295 
1296 static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
1297 				   const u8 *data, u32 size)
1298 {
1299 	struct adapter *adap = netdev2adap(netdev);
1300 	int ret;
1301 
1302 	ret = cxgb4_validate_phy_image(data, NULL);
1303 	if (ret) {
1304 		dev_err(adap->pdev_dev, "PHY signature mismatch\n");
1305 		return ret;
1306 	}
1307 
1308 	spin_lock_bh(&adap->win0_lock);
1309 	ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
1310 	spin_unlock_bh(&adap->win0_lock);
1311 	if (ret)
1312 		dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
1313 
1314 	return ret;
1315 }
1316 
1317 static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
1318 				  const u8 *data, u32 size)
1319 {
1320 	struct adapter *adap = netdev2adap(netdev);
1321 	unsigned int mbox = PCIE_FW_MASTER_M + 1;
1322 	int ret;
1323 
1324 	/* If the adapter has been fully initialized then we'll go ahead and
1325 	 * try to get the firmware's cooperation in upgrading to the new
1326 	 * firmware image otherwise we'll try to do the entire job from the
1327 	 * host ... and we always "force" the operation in this path.
1328 	 */
1329 	if (adap->flags & CXGB4_FULL_INIT_DONE)
1330 		mbox = adap->mbox;
1331 
1332 	ret = t4_fw_upgrade(adap, mbox, data, size, 1);
1333 	if (ret)
1334 		dev_err(adap->pdev_dev,
1335 			"Failed to flash firmware\n");
1336 
1337 	return ret;
1338 }
1339 
1340 static int cxgb4_ethtool_flash_region(struct net_device *netdev,
1341 				      const u8 *data, u32 size, u32 region)
1342 {
1343 	struct adapter *adap = netdev2adap(netdev);
1344 	int ret;
1345 
1346 	switch (region) {
1347 	case CXGB4_ETHTOOL_FLASH_FW:
1348 		ret = cxgb4_ethtool_flash_fw(netdev, data, size);
1349 		break;
1350 	case CXGB4_ETHTOOL_FLASH_PHY:
1351 		ret = cxgb4_ethtool_flash_phy(netdev, data, size);
1352 		break;
1353 	case CXGB4_ETHTOOL_FLASH_BOOT:
1354 		ret = cxgb4_ethtool_flash_boot(netdev, data, size);
1355 		break;
1356 	case CXGB4_ETHTOOL_FLASH_BOOTCFG:
1357 		ret = cxgb4_ethtool_flash_bootcfg(netdev, data, size);
1358 		break;
1359 	default:
1360 		ret = -EOPNOTSUPP;
1361 		break;
1362 	}
1363 
1364 	if (!ret)
1365 		dev_info(adap->pdev_dev,
1366 			 "loading %s successful, reload cxgb4 driver\n",
1367 			 flash_region_strings[region]);
1368 	return ret;
1369 }
1370 
1371 #define CXGB4_FW_SIG 0x4368656c
1372 #define CXGB4_FW_SIG_OFFSET 0x160
1373 
1374 static int cxgb4_validate_fw_image(const u8 *data, u32 *size)
1375 {
1376 	struct cxgb4_fw_data *header;
1377 
1378 	header = (struct cxgb4_fw_data *)&data[CXGB4_FW_SIG_OFFSET];
1379 	if (be32_to_cpu(header->signature) != CXGB4_FW_SIG)
1380 		return -EINVAL;
1381 
1382 	if (size)
1383 		*size = be16_to_cpu(((struct fw_hdr *)data)->len512) * 512;
1384 
1385 	return 0;
1386 }
1387 
1388 static int cxgb4_validate_bootcfg_image(const u8 *data, u32 *size)
1389 {
1390 	struct cxgb4_bootcfg_data *header;
1391 
1392 	header = (struct cxgb4_bootcfg_data *)data;
1393 	if (le16_to_cpu(header->signature) != BOOT_CFG_SIG)
1394 		return -EINVAL;
1395 
1396 	return 0;
1397 }
1398 
1399 static int cxgb4_validate_boot_image(const u8 *data, u32 *size)
1400 {
1401 	struct cxgb4_pci_exp_rom_header *exp_header;
1402 	struct cxgb4_pcir_data *pcir_header;
1403 	struct legacy_pci_rom_hdr *header;
1404 	const u8 *cur_header = data;
1405 	u16 pcir_offset;
1406 
1407 	exp_header = (struct cxgb4_pci_exp_rom_header *)data;
1408 
1409 	if (le16_to_cpu(exp_header->signature) != BOOT_SIGNATURE)
1410 		return -EINVAL;
1411 
1412 	if (size) {
1413 		do {
1414 			header = (struct legacy_pci_rom_hdr *)cur_header;
1415 			pcir_offset = le16_to_cpu(header->pcir_offset);
1416 			pcir_header = (struct cxgb4_pcir_data *)(cur_header +
1417 				      pcir_offset);
1418 
1419 			*size += header->size512 * 512;
1420 			cur_header += header->size512 * 512;
1421 		} while (!(pcir_header->indicator & CXGB4_HDR_INDI));
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size)
1428 {
1429 	if (!cxgb4_validate_fw_image(data, size))
1430 		return CXGB4_ETHTOOL_FLASH_FW;
1431 	if (!cxgb4_validate_boot_image(data, size))
1432 		return CXGB4_ETHTOOL_FLASH_BOOT;
1433 	if (!cxgb4_validate_phy_image(data, size))
1434 		return CXGB4_ETHTOOL_FLASH_PHY;
1435 	if (!cxgb4_validate_bootcfg_image(data, size))
1436 		return CXGB4_ETHTOOL_FLASH_BOOTCFG;
1437 
1438 	return -EOPNOTSUPP;
1439 }
1440 
1441 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1442 {
1443 	struct adapter *adap = netdev2adap(netdev);
1444 	const struct firmware *fw;
1445 	unsigned int master;
1446 	u8 master_vld = 0;
1447 	const u8 *fw_data;
1448 	size_t fw_size;
1449 	u32 size = 0;
1450 	u32 pcie_fw;
1451 	int region;
1452 	int ret;
1453 
1454 	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
1455 	master = PCIE_FW_MASTER_G(pcie_fw);
1456 	if (pcie_fw & PCIE_FW_MASTER_VLD_F)
1457 		master_vld = 1;
1458 	/* if csiostor is the master return */
1459 	if (master_vld && (master != adap->pf)) {
1460 		dev_warn(adap->pdev_dev,
1461 			 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
1462 		return -EOPNOTSUPP;
1463 	}
1464 
1465 	ef->data[sizeof(ef->data) - 1] = '\0';
1466 	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1467 	if (ret < 0)
1468 		return ret;
1469 
1470 	fw_data = fw->data;
1471 	fw_size = fw->size;
1472 	if (ef->region == ETHTOOL_FLASH_ALL_REGIONS) {
1473 		while (fw_size > 0) {
1474 			size = 0;
1475 			region = cxgb4_ethtool_get_flash_region(fw_data, &size);
1476 			if (region < 0 || !size) {
1477 				ret = region;
1478 				goto out_free_fw;
1479 			}
1480 
1481 			ret = cxgb4_ethtool_flash_region(netdev, fw_data, size,
1482 							 region);
1483 			if (ret)
1484 				goto out_free_fw;
1485 
1486 			fw_data += size;
1487 			fw_size -= size;
1488 		}
1489 	} else {
1490 		ret = cxgb4_ethtool_flash_region(netdev, fw_data, fw_size,
1491 						 ef->region);
1492 	}
1493 
1494 out_free_fw:
1495 	release_firmware(fw);
1496 	return ret;
1497 }
1498 
1499 static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
1500 {
1501 	struct port_info *pi = netdev_priv(dev);
1502 	struct  adapter *adapter = pi->adapter;
1503 
1504 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1505 				   SOF_TIMESTAMPING_RX_SOFTWARE |
1506 				   SOF_TIMESTAMPING_SOFTWARE;
1507 
1508 	ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
1509 				    SOF_TIMESTAMPING_TX_HARDWARE |
1510 				    SOF_TIMESTAMPING_RAW_HARDWARE;
1511 
1512 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1513 			    (1 << HWTSTAMP_TX_ON);
1514 
1515 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1516 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1517 			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1518 			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1519 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1520 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1521 
1522 	if (adapter->ptp_clock)
1523 		ts_info->phc_index = ptp_clock_index(adapter->ptp_clock);
1524 	else
1525 		ts_info->phc_index = -1;
1526 
1527 	return 0;
1528 }
1529 
1530 static u32 get_rss_table_size(struct net_device *dev)
1531 {
1532 	const struct port_info *pi = netdev_priv(dev);
1533 
1534 	return pi->rss_size;
1535 }
1536 
1537 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
1538 {
1539 	const struct port_info *pi = netdev_priv(dev);
1540 	unsigned int n = pi->rss_size;
1541 
1542 	if (hfunc)
1543 		*hfunc = ETH_RSS_HASH_TOP;
1544 	if (!p)
1545 		return 0;
1546 	while (n--)
1547 		p[n] = pi->rss[n];
1548 	return 0;
1549 }
1550 
1551 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
1552 			 const u8 hfunc)
1553 {
1554 	unsigned int i;
1555 	struct port_info *pi = netdev_priv(dev);
1556 
1557 	/* We require at least one supported parameter to be changed and no
1558 	 * change in any of the unsupported parameters
1559 	 */
1560 	if (key ||
1561 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
1562 		return -EOPNOTSUPP;
1563 	if (!p)
1564 		return 0;
1565 
1566 	/* Interface must be brought up atleast once */
1567 	if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
1568 		for (i = 0; i < pi->rss_size; i++)
1569 			pi->rss[i] = p[i];
1570 
1571 		return cxgb4_write_rss(pi, pi->rss);
1572 	}
1573 
1574 	return -EPERM;
1575 }
1576 
1577 static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
1578 						   u32 ftid)
1579 {
1580 	struct tid_info *t = &adap->tids;
1581 	struct filter_entry *f;
1582 
1583 	if (ftid < t->nhpftids)
1584 		f = &adap->tids.hpftid_tab[ftid];
1585 	else if (ftid < t->nftids)
1586 		f = &adap->tids.ftid_tab[ftid - t->nhpftids];
1587 	else
1588 		f = lookup_tid(&adap->tids, ftid);
1589 
1590 	return f;
1591 }
1592 
1593 static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
1594 				   struct ch_filter_specification *dfs)
1595 {
1596 	switch (dfs->val.proto) {
1597 	case IPPROTO_TCP:
1598 		if (dfs->type)
1599 			fs->flow_type = TCP_V6_FLOW;
1600 		else
1601 			fs->flow_type = TCP_V4_FLOW;
1602 		break;
1603 	case IPPROTO_UDP:
1604 		if (dfs->type)
1605 			fs->flow_type = UDP_V6_FLOW;
1606 		else
1607 			fs->flow_type = UDP_V4_FLOW;
1608 		break;
1609 	}
1610 
1611 	if (dfs->type) {
1612 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->val.fport);
1613 		fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->mask.fport);
1614 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->val.lport);
1615 		fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->mask.lport);
1616 		memcpy(&fs->h_u.tcp_ip6_spec.ip6src, &dfs->val.fip[0],
1617 		       sizeof(fs->h_u.tcp_ip6_spec.ip6src));
1618 		memcpy(&fs->m_u.tcp_ip6_spec.ip6src, &dfs->mask.fip[0],
1619 		       sizeof(fs->m_u.tcp_ip6_spec.ip6src));
1620 		memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, &dfs->val.lip[0],
1621 		       sizeof(fs->h_u.tcp_ip6_spec.ip6dst));
1622 		memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, &dfs->mask.lip[0],
1623 		       sizeof(fs->m_u.tcp_ip6_spec.ip6dst));
1624 		fs->h_u.tcp_ip6_spec.tclass = dfs->val.tos;
1625 		fs->m_u.tcp_ip6_spec.tclass = dfs->mask.tos;
1626 	} else {
1627 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->val.fport);
1628 		fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->mask.fport);
1629 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->val.lport);
1630 		fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->mask.lport);
1631 		memcpy(&fs->h_u.tcp_ip4_spec.ip4src, &dfs->val.fip[0],
1632 		       sizeof(fs->h_u.tcp_ip4_spec.ip4src));
1633 		memcpy(&fs->m_u.tcp_ip4_spec.ip4src, &dfs->mask.fip[0],
1634 		       sizeof(fs->m_u.tcp_ip4_spec.ip4src));
1635 		memcpy(&fs->h_u.tcp_ip4_spec.ip4dst, &dfs->val.lip[0],
1636 		       sizeof(fs->h_u.tcp_ip4_spec.ip4dst));
1637 		memcpy(&fs->m_u.tcp_ip4_spec.ip4dst, &dfs->mask.lip[0],
1638 		       sizeof(fs->m_u.tcp_ip4_spec.ip4dst));
1639 		fs->h_u.tcp_ip4_spec.tos = dfs->val.tos;
1640 		fs->m_u.tcp_ip4_spec.tos = dfs->mask.tos;
1641 	}
1642 	fs->h_ext.vlan_tci = cpu_to_be16(dfs->val.ivlan);
1643 	fs->m_ext.vlan_tci = cpu_to_be16(dfs->mask.ivlan);
1644 	fs->flow_type |= FLOW_EXT;
1645 
1646 	if (dfs->action == FILTER_DROP)
1647 		fs->ring_cookie = RX_CLS_FLOW_DISC;
1648 	else
1649 		fs->ring_cookie = dfs->iq;
1650 }
1651 
1652 static int cxgb4_ntuple_get_filter(struct net_device *dev,
1653 				   struct ethtool_rxnfc *cmd,
1654 				   unsigned int loc)
1655 {
1656 	const struct port_info *pi = netdev_priv(dev);
1657 	struct adapter *adap = netdev2adap(dev);
1658 	struct filter_entry *f;
1659 	int ftid;
1660 
1661 	if (!(adap->flags & CXGB4_FULL_INIT_DONE))
1662 		return -EAGAIN;
1663 
1664 	/* Check for maximum filter range */
1665 	if (!adap->ethtool_filters)
1666 		return -EOPNOTSUPP;
1667 
1668 	if (loc >= adap->ethtool_filters->nentries)
1669 		return -ERANGE;
1670 
1671 	if (!test_bit(loc, adap->ethtool_filters->port[pi->port_id].bmap))
1672 		return -ENOENT;
1673 
1674 	ftid = adap->ethtool_filters->port[pi->port_id].loc_array[loc];
1675 
1676 	/* Fetch filter_entry */
1677 	f = cxgb4_get_filter_entry(adap, ftid);
1678 
1679 	cxgb4_fill_filter_rule(&cmd->fs, &f->fs);
1680 
1681 	return 0;
1682 }
1683 
1684 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1685 		     u32 *rules)
1686 {
1687 	const struct port_info *pi = netdev_priv(dev);
1688 	struct adapter *adap = netdev2adap(dev);
1689 	unsigned int count = 0, index = 0;
1690 	int ret = 0;
1691 
1692 	switch (info->cmd) {
1693 	case ETHTOOL_GRXFH: {
1694 		unsigned int v = pi->rss_mode;
1695 
1696 		info->data = 0;
1697 		switch (info->flow_type) {
1698 		case TCP_V4_FLOW:
1699 			if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
1700 				info->data = RXH_IP_SRC | RXH_IP_DST |
1701 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1702 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1703 				info->data = RXH_IP_SRC | RXH_IP_DST;
1704 			break;
1705 		case UDP_V4_FLOW:
1706 			if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
1707 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1708 				info->data = RXH_IP_SRC | RXH_IP_DST |
1709 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1710 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1711 				info->data = RXH_IP_SRC | RXH_IP_DST;
1712 			break;
1713 		case SCTP_V4_FLOW:
1714 		case AH_ESP_V4_FLOW:
1715 		case IPV4_FLOW:
1716 			if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1717 				info->data = RXH_IP_SRC | RXH_IP_DST;
1718 			break;
1719 		case TCP_V6_FLOW:
1720 			if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
1721 				info->data = RXH_IP_SRC | RXH_IP_DST |
1722 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1723 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1724 				info->data = RXH_IP_SRC | RXH_IP_DST;
1725 			break;
1726 		case UDP_V6_FLOW:
1727 			if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
1728 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1729 				info->data = RXH_IP_SRC | RXH_IP_DST |
1730 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1731 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1732 				info->data = RXH_IP_SRC | RXH_IP_DST;
1733 			break;
1734 		case SCTP_V6_FLOW:
1735 		case AH_ESP_V6_FLOW:
1736 		case IPV6_FLOW:
1737 			if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1738 				info->data = RXH_IP_SRC | RXH_IP_DST;
1739 			break;
1740 		}
1741 		return 0;
1742 	}
1743 	case ETHTOOL_GRXRINGS:
1744 		info->data = pi->nqsets;
1745 		return 0;
1746 	case ETHTOOL_GRXCLSRLCNT:
1747 		info->rule_cnt =
1748 		       adap->ethtool_filters->port[pi->port_id].in_use;
1749 		return 0;
1750 	case ETHTOOL_GRXCLSRULE:
1751 		return cxgb4_ntuple_get_filter(dev, info, info->fs.location);
1752 	case ETHTOOL_GRXCLSRLALL:
1753 		info->data = adap->ethtool_filters->nentries;
1754 		while (count < info->rule_cnt) {
1755 			ret = cxgb4_ntuple_get_filter(dev, info, index);
1756 			if (!ret)
1757 				rules[count++] = index;
1758 			index++;
1759 		}
1760 		return 0;
1761 	}
1762 
1763 	return -EOPNOTSUPP;
1764 }
1765 
1766 static int cxgb4_ntuple_del_filter(struct net_device *dev,
1767 				   struct ethtool_rxnfc *cmd)
1768 {
1769 	struct cxgb4_ethtool_filter_info *filter_info;
1770 	struct adapter *adapter = netdev2adap(dev);
1771 	struct port_info *pi = netdev_priv(dev);
1772 	struct filter_entry *f;
1773 	u32 filter_id;
1774 	int ret;
1775 
1776 	if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1777 		return -EAGAIN;  /* can still change nfilters */
1778 
1779 	if (!adapter->ethtool_filters)
1780 		return -EOPNOTSUPP;
1781 
1782 	if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1783 		dev_err(adapter->pdev_dev,
1784 			"Location must be < %u",
1785 			adapter->ethtool_filters->nentries);
1786 		return -ERANGE;
1787 	}
1788 
1789 	filter_info = &adapter->ethtool_filters->port[pi->port_id];
1790 
1791 	if (!test_bit(cmd->fs.location, filter_info->bmap))
1792 		return -ENOENT;
1793 
1794 	filter_id = filter_info->loc_array[cmd->fs.location];
1795 	f = cxgb4_get_filter_entry(adapter, filter_id);
1796 
1797 	ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
1798 	if (ret)
1799 		goto err;
1800 
1801 	clear_bit(cmd->fs.location, filter_info->bmap);
1802 	filter_info->in_use--;
1803 
1804 err:
1805 	return ret;
1806 }
1807 
1808 /* Add Ethtool n-tuple filters. */
1809 static int cxgb4_ntuple_set_filter(struct net_device *netdev,
1810 				   struct ethtool_rxnfc *cmd)
1811 {
1812 	struct ethtool_rx_flow_spec_input input = {};
1813 	struct cxgb4_ethtool_filter_info *filter_info;
1814 	struct adapter *adapter = netdev2adap(netdev);
1815 	struct port_info *pi = netdev_priv(netdev);
1816 	struct ch_filter_specification fs;
1817 	struct ethtool_rx_flow_rule *flow;
1818 	u32 tid;
1819 	int ret;
1820 
1821 	if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1822 		return -EAGAIN;  /* can still change nfilters */
1823 
1824 	if (!adapter->ethtool_filters)
1825 		return -EOPNOTSUPP;
1826 
1827 	if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1828 		dev_err(adapter->pdev_dev,
1829 			"Location must be < %u",
1830 			adapter->ethtool_filters->nentries);
1831 		return -ERANGE;
1832 	}
1833 
1834 	if (test_bit(cmd->fs.location,
1835 		     adapter->ethtool_filters->port[pi->port_id].bmap))
1836 		return -EEXIST;
1837 
1838 	memset(&fs, 0, sizeof(fs));
1839 
1840 	input.fs = &cmd->fs;
1841 	flow = ethtool_rx_flow_rule_create(&input);
1842 	if (IS_ERR(flow)) {
1843 		ret = PTR_ERR(flow);
1844 		goto exit;
1845 	}
1846 
1847 	fs.hitcnts = 1;
1848 
1849 	ret = cxgb4_flow_rule_replace(netdev, flow->rule, cmd->fs.location,
1850 				      NULL, &fs, &tid);
1851 	if (ret)
1852 		goto free;
1853 
1854 	filter_info = &adapter->ethtool_filters->port[pi->port_id];
1855 
1856 	filter_info->loc_array[cmd->fs.location] = tid;
1857 	set_bit(cmd->fs.location, filter_info->bmap);
1858 	filter_info->in_use++;
1859 
1860 free:
1861 	ethtool_rx_flow_rule_destroy(flow);
1862 exit:
1863 	return ret;
1864 }
1865 
1866 static int set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1867 {
1868 	int ret = -EOPNOTSUPP;
1869 
1870 	switch (cmd->cmd) {
1871 	case ETHTOOL_SRXCLSRLINS:
1872 		ret = cxgb4_ntuple_set_filter(dev, cmd);
1873 		break;
1874 	case ETHTOOL_SRXCLSRLDEL:
1875 		ret = cxgb4_ntuple_del_filter(dev, cmd);
1876 		break;
1877 	default:
1878 		break;
1879 	}
1880 
1881 	return ret;
1882 }
1883 
1884 static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump)
1885 {
1886 	struct adapter *adapter = netdev2adap(dev);
1887 	u32 len = 0;
1888 
1889 	len = sizeof(struct cudbg_hdr) +
1890 	      sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1891 	len += cxgb4_get_dump_length(adapter, eth_dump->flag);
1892 
1893 	adapter->eth_dump.flag = eth_dump->flag;
1894 	adapter->eth_dump.len = len;
1895 	return 0;
1896 }
1897 
1898 static int get_dump_flag(struct net_device *dev, struct ethtool_dump *eth_dump)
1899 {
1900 	struct adapter *adapter = netdev2adap(dev);
1901 
1902 	eth_dump->flag = adapter->eth_dump.flag;
1903 	eth_dump->len = adapter->eth_dump.len;
1904 	eth_dump->version = adapter->eth_dump.version;
1905 	return 0;
1906 }
1907 
1908 static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
1909 			 void *buf)
1910 {
1911 	struct adapter *adapter = netdev2adap(dev);
1912 	u32 len = 0;
1913 	int ret = 0;
1914 
1915 	if (adapter->eth_dump.flag == CXGB4_ETH_DUMP_NONE)
1916 		return -ENOENT;
1917 
1918 	len = sizeof(struct cudbg_hdr) +
1919 	      sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1920 	len += cxgb4_get_dump_length(adapter, adapter->eth_dump.flag);
1921 	if (eth_dump->len < len)
1922 		return -ENOMEM;
1923 
1924 	ret = cxgb4_cudbg_collect(adapter, buf, &len, adapter->eth_dump.flag);
1925 	if (ret)
1926 		return ret;
1927 
1928 	eth_dump->flag = adapter->eth_dump.flag;
1929 	eth_dump->len = len;
1930 	eth_dump->version = adapter->eth_dump.version;
1931 	return 0;
1932 }
1933 
1934 static int cxgb4_get_module_info(struct net_device *dev,
1935 				 struct ethtool_modinfo *modinfo)
1936 {
1937 	struct port_info *pi = netdev_priv(dev);
1938 	u8 sff8472_comp, sff_diag_type, sff_rev;
1939 	struct adapter *adapter = pi->adapter;
1940 	int ret;
1941 
1942 	if (!t4_is_inserted_mod_type(pi->mod_type))
1943 		return -EINVAL;
1944 
1945 	switch (pi->port_type) {
1946 	case FW_PORT_TYPE_SFP:
1947 	case FW_PORT_TYPE_QSA:
1948 	case FW_PORT_TYPE_SFP28:
1949 		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
1950 				I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
1951 				SFF_8472_COMP_LEN, &sff8472_comp);
1952 		if (ret)
1953 			return ret;
1954 		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
1955 				I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
1956 				SFP_DIAG_TYPE_LEN, &sff_diag_type);
1957 		if (ret)
1958 			return ret;
1959 
1960 		if (!sff8472_comp || (sff_diag_type & 4)) {
1961 			modinfo->type = ETH_MODULE_SFF_8079;
1962 			modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
1963 		} else {
1964 			modinfo->type = ETH_MODULE_SFF_8472;
1965 			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1966 		}
1967 		break;
1968 
1969 	case FW_PORT_TYPE_QSFP:
1970 	case FW_PORT_TYPE_QSFP_10G:
1971 	case FW_PORT_TYPE_CR_QSFP:
1972 	case FW_PORT_TYPE_CR2_QSFP:
1973 	case FW_PORT_TYPE_CR4_QSFP:
1974 		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
1975 				I2C_DEV_ADDR_A0, SFF_REV_ADDR,
1976 				SFF_REV_LEN, &sff_rev);
1977 		/* For QSFP type ports, revision value >= 3
1978 		 * means the SFP is 8636 compliant.
1979 		 */
1980 		if (ret)
1981 			return ret;
1982 		if (sff_rev >= 0x3) {
1983 			modinfo->type = ETH_MODULE_SFF_8636;
1984 			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
1985 		} else {
1986 			modinfo->type = ETH_MODULE_SFF_8436;
1987 			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
1988 		}
1989 		break;
1990 
1991 	default:
1992 		return -EINVAL;
1993 	}
1994 
1995 	return 0;
1996 }
1997 
1998 static int cxgb4_get_module_eeprom(struct net_device *dev,
1999 				   struct ethtool_eeprom *eprom, u8 *data)
2000 {
2001 	int ret = 0, offset = eprom->offset, len = eprom->len;
2002 	struct port_info *pi = netdev_priv(dev);
2003 	struct adapter *adapter = pi->adapter;
2004 
2005 	memset(data, 0, eprom->len);
2006 	if (offset + len <= I2C_PAGE_SIZE)
2007 		return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2008 				 I2C_DEV_ADDR_A0, offset, len, data);
2009 
2010 	/* offset + len spans 0xa0 and 0xa1 pages */
2011 	if (offset <= I2C_PAGE_SIZE) {
2012 		/* read 0xa0 page */
2013 		len = I2C_PAGE_SIZE - offset;
2014 		ret =  t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2015 				 I2C_DEV_ADDR_A0, offset, len, data);
2016 		if (ret)
2017 			return ret;
2018 		offset = I2C_PAGE_SIZE;
2019 		/* Remaining bytes to be read from second page =
2020 		 * Total length - bytes read from first page
2021 		 */
2022 		len = eprom->len - len;
2023 	}
2024 	/* Read additional optical diagnostics from page 0xa2 if supported */
2025 	return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
2026 			 offset, len, &data[eprom->len - len]);
2027 }
2028 
2029 static u32 cxgb4_get_priv_flags(struct net_device *netdev)
2030 {
2031 	struct port_info *pi = netdev_priv(netdev);
2032 	struct adapter *adapter = pi->adapter;
2033 
2034 	return (adapter->eth_flags | pi->eth_flags);
2035 }
2036 
2037 /**
2038  *	set_flags - set/unset specified flags if passed in new_flags
2039  *	@cur_flags: pointer to current flags
2040  *	@new_flags: new incoming flags
2041  *	@flags: set of flags to set/unset
2042  */
2043 static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
2044 {
2045 	*cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
2046 }
2047 
2048 static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
2049 {
2050 	struct port_info *pi = netdev_priv(netdev);
2051 	struct adapter *adapter = pi->adapter;
2052 
2053 	set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
2054 	set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
2055 
2056 	return 0;
2057 }
2058 
2059 static const struct ethtool_ops cxgb_ethtool_ops = {
2060 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2061 				     ETHTOOL_COALESCE_RX_MAX_FRAMES |
2062 				     ETHTOOL_COALESCE_TX_USECS_IRQ |
2063 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2064 	.get_link_ksettings = get_link_ksettings,
2065 	.set_link_ksettings = set_link_ksettings,
2066 	.get_fecparam      = get_fecparam,
2067 	.set_fecparam      = set_fecparam,
2068 	.get_drvinfo       = get_drvinfo,
2069 	.get_msglevel      = get_msglevel,
2070 	.set_msglevel      = set_msglevel,
2071 	.get_ringparam     = get_sge_param,
2072 	.set_ringparam     = set_sge_param,
2073 	.get_coalesce      = get_coalesce,
2074 	.set_coalesce      = set_coalesce,
2075 	.get_eeprom_len    = get_eeprom_len,
2076 	.get_eeprom        = get_eeprom,
2077 	.set_eeprom        = set_eeprom,
2078 	.get_pauseparam    = get_pauseparam,
2079 	.set_pauseparam    = set_pauseparam,
2080 	.get_link          = ethtool_op_get_link,
2081 	.get_strings       = get_strings,
2082 	.set_phys_id       = identify_port,
2083 	.nway_reset        = restart_autoneg,
2084 	.get_sset_count    = get_sset_count,
2085 	.get_ethtool_stats = get_stats,
2086 	.get_regs_len      = get_regs_len,
2087 	.get_regs          = get_regs,
2088 	.get_rxnfc         = get_rxnfc,
2089 	.set_rxnfc         = set_rxnfc,
2090 	.get_rxfh_indir_size = get_rss_table_size,
2091 	.get_rxfh	   = get_rss_table,
2092 	.set_rxfh	   = set_rss_table,
2093 	.flash_device      = set_flash,
2094 	.get_ts_info       = get_ts_info,
2095 	.set_dump          = set_dump,
2096 	.get_dump_flag     = get_dump_flag,
2097 	.get_dump_data     = get_dump_data,
2098 	.get_module_info   = cxgb4_get_module_info,
2099 	.get_module_eeprom = cxgb4_get_module_eeprom,
2100 	.get_priv_flags    = cxgb4_get_priv_flags,
2101 	.set_priv_flags    = cxgb4_set_priv_flags,
2102 };
2103 
2104 void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
2105 {
2106 	struct cxgb4_ethtool_filter_info *eth_filter_info;
2107 	u8 i;
2108 
2109 	if (!adap->ethtool_filters)
2110 		return;
2111 
2112 	eth_filter_info = adap->ethtool_filters->port;
2113 
2114 	if (eth_filter_info) {
2115 		for (i = 0; i < adap->params.nports; i++) {
2116 			kvfree(eth_filter_info[i].loc_array);
2117 			kfree(eth_filter_info[i].bmap);
2118 		}
2119 		kfree(eth_filter_info);
2120 	}
2121 
2122 	kfree(adap->ethtool_filters);
2123 }
2124 
2125 int cxgb4_init_ethtool_filters(struct adapter *adap)
2126 {
2127 	struct cxgb4_ethtool_filter_info *eth_filter_info;
2128 	struct cxgb4_ethtool_filter *eth_filter;
2129 	struct tid_info *tids = &adap->tids;
2130 	u32 nentries, i;
2131 	int ret;
2132 
2133 	eth_filter = kzalloc(sizeof(*eth_filter), GFP_KERNEL);
2134 	if (!eth_filter)
2135 		return -ENOMEM;
2136 
2137 	eth_filter_info = kcalloc(adap->params.nports,
2138 				  sizeof(*eth_filter_info),
2139 				  GFP_KERNEL);
2140 	if (!eth_filter_info) {
2141 		ret = -ENOMEM;
2142 		goto free_eth_filter;
2143 	}
2144 
2145 	eth_filter->port = eth_filter_info;
2146 
2147 	nentries = tids->nhpftids + tids->nftids;
2148 	if (is_hashfilter(adap))
2149 		nentries += tids->nhash +
2150 			    (adap->tids.stid_base - adap->tids.tid_base);
2151 	eth_filter->nentries = nentries;
2152 
2153 	for (i = 0; i < adap->params.nports; i++) {
2154 		eth_filter->port[i].loc_array = kvzalloc(nentries, GFP_KERNEL);
2155 		if (!eth_filter->port[i].loc_array) {
2156 			ret = -ENOMEM;
2157 			goto free_eth_finfo;
2158 		}
2159 
2160 		eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
2161 						   sizeof(unsigned long),
2162 						   GFP_KERNEL);
2163 		if (!eth_filter->port[i].bmap) {
2164 			ret = -ENOMEM;
2165 			goto free_eth_finfo;
2166 		}
2167 	}
2168 
2169 	adap->ethtool_filters = eth_filter;
2170 	return 0;
2171 
2172 free_eth_finfo:
2173 	while (i-- > 0) {
2174 		kfree(eth_filter->port[i].bmap);
2175 		kvfree(eth_filter->port[i].loc_array);
2176 	}
2177 	kfree(eth_filter_info);
2178 
2179 free_eth_filter:
2180 	kfree(eth_filter);
2181 
2182 	return ret;
2183 }
2184 
2185 void cxgb4_set_ethtool_ops(struct net_device *netdev)
2186 {
2187 	netdev->ethtool_ops = &cxgb_ethtool_ops;
2188 }
2189