xref: /linux/drivers/net/ethernet/emulex/benet/be_ethtool.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <linux/ethtool.h>
21 
22 struct be_ethtool_stat {
23 	char desc[ETH_GSTRING_LEN];
24 	int type;
25 	int size;
26 	int offset;
27 };
28 
29 enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
30 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 					offsetof(_struct, field)
32 #define DRVSTAT_TX_INFO(field)	#field, DRVSTAT_TX,\
33 					FIELDINFO(struct be_tx_stats, field)
34 #define DRVSTAT_RX_INFO(field)	#field, DRVSTAT_RX,\
35 					FIELDINFO(struct be_rx_stats, field)
36 #define	DRVSTAT_INFO(field)	#field, DRVSTAT,\
37 					FIELDINFO(struct be_drv_stats, field)
38 
39 static const struct be_ethtool_stat et_stats[] = {
40 	{DRVSTAT_INFO(rx_crc_errors)},
41 	{DRVSTAT_INFO(rx_alignment_symbol_errors)},
42 	{DRVSTAT_INFO(rx_pause_frames)},
43 	{DRVSTAT_INFO(rx_control_frames)},
44 	/* Received packets dropped when the Ethernet length field
45 	 * is not equal to the actual Ethernet data length.
46 	 */
47 	{DRVSTAT_INFO(rx_in_range_errors)},
48 	/* Received packets dropped when their length field is >= 1501 bytes
49 	 * and <= 1535 bytes.
50 	 */
51 	{DRVSTAT_INFO(rx_out_range_errors)},
52 	/* Received packets dropped when they are longer than 9216 bytes */
53 	{DRVSTAT_INFO(rx_frame_too_long)},
54 	/* Received packets dropped when they don't pass the unicast or
55 	 * multicast address filtering.
56 	 */
57 	{DRVSTAT_INFO(rx_address_mismatch_drops)},
58 	/* Received packets dropped when IP packet length field is less than
59 	 * the IP header length field.
60 	 */
61 	{DRVSTAT_INFO(rx_dropped_too_small)},
62 	/* Received packets dropped when IP length field is greater than
63 	 * the actual packet length.
64 	 */
65 	{DRVSTAT_INFO(rx_dropped_too_short)},
66 	/* Received packets dropped when the IP header length field is less
67 	 * than 5.
68 	 */
69 	{DRVSTAT_INFO(rx_dropped_header_too_small)},
70 	/* Received packets dropped when the TCP header length field is less
71 	 * than 5 or the TCP header length + IP header length is more
72 	 * than IP packet length.
73 	 */
74 	{DRVSTAT_INFO(rx_dropped_tcp_length)},
75 	{DRVSTAT_INFO(rx_dropped_runt)},
76 	/* Number of received packets dropped when a fifo for descriptors going
77 	 * into the packet demux block overflows. In normal operation, this
78 	 * fifo must never overflow.
79 	 */
80 	{DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
81 	{DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
82 	{DRVSTAT_INFO(rx_ip_checksum_errs)},
83 	{DRVSTAT_INFO(rx_tcp_checksum_errs)},
84 	{DRVSTAT_INFO(rx_udp_checksum_errs)},
85 	{DRVSTAT_INFO(tx_pauseframes)},
86 	{DRVSTAT_INFO(tx_controlframes)},
87 	{DRVSTAT_INFO(rx_priority_pause_frames)},
88 	/* Received packets dropped when an internal fifo going into
89 	 * main packet buffer tank (PMEM) overflows.
90 	 */
91 	{DRVSTAT_INFO(pmem_fifo_overflow_drop)},
92 	{DRVSTAT_INFO(jabber_events)},
93 	/* Received packets dropped due to lack of available HW packet buffers
94 	 * used to temporarily hold the received packets.
95 	 */
96 	{DRVSTAT_INFO(rx_drops_no_pbuf)},
97 	/* Received packets dropped due to input receive buffer
98 	 * descriptor fifo overflowing.
99 	 */
100 	{DRVSTAT_INFO(rx_drops_no_erx_descr)},
101 	/* Packets dropped because the internal FIFO to the offloaded TCP
102 	 * receive processing block is full. This could happen only for
103 	 * offloaded iSCSI or FCoE trarffic.
104 	 */
105 	{DRVSTAT_INFO(rx_drops_no_tpre_descr)},
106 	/* Received packets dropped when they need more than 8
107 	 * receive buffers. This cannot happen as the driver configures
108 	 * 2048 byte receive buffers.
109 	 */
110 	{DRVSTAT_INFO(rx_drops_too_many_frags)},
111 	{DRVSTAT_INFO(forwarded_packets)},
112 	/* Received packets dropped when the frame length
113 	 * is more than 9018 bytes
114 	 */
115 	{DRVSTAT_INFO(rx_drops_mtu)},
116 	/* Number of packets dropped due to random early drop function */
117 	{DRVSTAT_INFO(eth_red_drops)},
118 	{DRVSTAT_INFO(be_on_die_temperature)}
119 };
120 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
121 
122 /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
123  * are first and second members respectively.
124  */
125 static const struct be_ethtool_stat et_rx_stats[] = {
126 	{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
127 	{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
128 	{DRVSTAT_RX_INFO(rx_compl)},
129 	{DRVSTAT_RX_INFO(rx_mcast_pkts)},
130 	/* Number of page allocation failures while posting receive buffers
131 	 * to HW.
132 	 */
133 	{DRVSTAT_RX_INFO(rx_post_fail)},
134 	/* Recevied packets dropped due to skb allocation failure */
135 	{DRVSTAT_RX_INFO(rx_drops_no_skbs)},
136 	/* Received packets dropped due to lack of available fetched buffers
137 	 * posted by the driver.
138 	 */
139 	{DRVSTAT_RX_INFO(rx_drops_no_frags)}
140 };
141 #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
142 
143 /* Stats related to multi TX queues: get_stats routine assumes compl is the
144  * first member
145  */
146 static const struct be_ethtool_stat et_tx_stats[] = {
147 	{DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
148 	{DRVSTAT_TX_INFO(tx_bytes)},
149 	{DRVSTAT_TX_INFO(tx_pkts)},
150 	/* Number of skbs queued for trasmission by the driver */
151 	{DRVSTAT_TX_INFO(tx_reqs)},
152 	/* Number of TX work request blocks DMAed to HW */
153 	{DRVSTAT_TX_INFO(tx_wrbs)},
154 	/* Number of times the TX queue was stopped due to lack
155 	 * of spaces in the TXQ.
156 	 */
157 	{DRVSTAT_TX_INFO(tx_stops)}
158 };
159 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
160 
161 static const char et_self_tests[][ETH_GSTRING_LEN] = {
162 	"MAC Loopback test",
163 	"PHY Loopback test",
164 	"External Loopback test",
165 	"DDR DMA test",
166 	"Link test"
167 };
168 
169 #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
170 #define BE_MAC_LOOPBACK 0x0
171 #define BE_PHY_LOOPBACK 0x1
172 #define BE_ONE_PORT_EXT_LOOPBACK 0x2
173 #define BE_NO_LOOPBACK 0xff
174 
175 static void be_get_drvinfo(struct net_device *netdev,
176 				struct ethtool_drvinfo *drvinfo)
177 {
178 	struct be_adapter *adapter = netdev_priv(netdev);
179 	char fw_on_flash[FW_VER_LEN];
180 
181 	memset(fw_on_flash, 0 , sizeof(fw_on_flash));
182 	be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
183 
184 	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
185 	strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
186 	strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
187 	if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
188 		strcat(drvinfo->fw_version, " [");
189 		strcat(drvinfo->fw_version, fw_on_flash);
190 		strcat(drvinfo->fw_version, "]");
191 	}
192 
193 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
194 		sizeof(drvinfo->bus_info));
195 	drvinfo->testinfo_len = 0;
196 	drvinfo->regdump_len = 0;
197 	drvinfo->eedump_len = 0;
198 }
199 
200 static u32
201 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
202 {
203 	u32 data_read = 0, eof;
204 	u8 addn_status;
205 	struct be_dma_mem data_len_cmd;
206 	int status;
207 
208 	memset(&data_len_cmd, 0, sizeof(data_len_cmd));
209 	/* data_offset and data_size should be 0 to get reg len */
210 	status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
211 				file_name, &data_read, &eof, &addn_status);
212 
213 	return data_read;
214 }
215 
216 static int
217 lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
218 		u32 buf_len, void *buf)
219 {
220 	struct be_dma_mem read_cmd;
221 	u32 read_len = 0, total_read_len = 0, chunk_size;
222 	u32 eof = 0;
223 	u8 addn_status;
224 	int status = 0;
225 
226 	read_cmd.size = LANCER_READ_FILE_CHUNK;
227 	read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
228 			&read_cmd.dma);
229 
230 	if (!read_cmd.va) {
231 		dev_err(&adapter->pdev->dev,
232 				"Memory allocation failure while reading dump\n");
233 		return -ENOMEM;
234 	}
235 
236 	while ((total_read_len < buf_len) && !eof) {
237 		chunk_size = min_t(u32, (buf_len - total_read_len),
238 				LANCER_READ_FILE_CHUNK);
239 		chunk_size = ALIGN(chunk_size, 4);
240 		status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
241 				total_read_len, file_name, &read_len,
242 				&eof, &addn_status);
243 		if (!status) {
244 			memcpy(buf + total_read_len, read_cmd.va, read_len);
245 			total_read_len += read_len;
246 			eof &= LANCER_READ_FILE_EOF_MASK;
247 		} else {
248 			status = -EIO;
249 			break;
250 		}
251 	}
252 	pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
253 			read_cmd.dma);
254 
255 	return status;
256 }
257 
258 static int
259 be_get_reg_len(struct net_device *netdev)
260 {
261 	struct be_adapter *adapter = netdev_priv(netdev);
262 	u32 log_size = 0;
263 
264 	if (!check_privilege(adapter, MAX_PRIVILEGES))
265 		return 0;
266 
267 	if (be_physfn(adapter)) {
268 		if (lancer_chip(adapter))
269 			log_size = lancer_cmd_get_file_len(adapter,
270 					LANCER_FW_DUMP_FILE);
271 		else
272 			be_cmd_get_reg_len(adapter, &log_size);
273 	}
274 	return log_size;
275 }
276 
277 static void
278 be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
279 {
280 	struct be_adapter *adapter = netdev_priv(netdev);
281 
282 	if (be_physfn(adapter)) {
283 		memset(buf, 0, regs->len);
284 		if (lancer_chip(adapter))
285 			lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
286 					regs->len, buf);
287 		else
288 			be_cmd_get_regs(adapter, regs->len, buf);
289 	}
290 }
291 
292 static int be_get_coalesce(struct net_device *netdev,
293 			   struct ethtool_coalesce *et)
294 {
295 	struct be_adapter *adapter = netdev_priv(netdev);
296 	struct be_eq_obj *eqo = &adapter->eq_obj[0];
297 
298 
299 	et->rx_coalesce_usecs = eqo->cur_eqd;
300 	et->rx_coalesce_usecs_high = eqo->max_eqd;
301 	et->rx_coalesce_usecs_low = eqo->min_eqd;
302 
303 	et->tx_coalesce_usecs = eqo->cur_eqd;
304 	et->tx_coalesce_usecs_high = eqo->max_eqd;
305 	et->tx_coalesce_usecs_low = eqo->min_eqd;
306 
307 	et->use_adaptive_rx_coalesce = eqo->enable_aic;
308 	et->use_adaptive_tx_coalesce = eqo->enable_aic;
309 
310 	return 0;
311 }
312 
313 /* TX attributes are ignored. Only RX attributes are considered
314  * eqd cmd is issued in the worker thread.
315  */
316 static int be_set_coalesce(struct net_device *netdev,
317 			   struct ethtool_coalesce *et)
318 {
319 	struct be_adapter *adapter = netdev_priv(netdev);
320 	struct be_eq_obj *eqo;
321 	int i;
322 
323 	for_all_evt_queues(adapter, eqo, i) {
324 		eqo->enable_aic = et->use_adaptive_rx_coalesce;
325 		eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
326 		eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
327 		eqo->eqd = et->rx_coalesce_usecs;
328 	}
329 
330 	return 0;
331 }
332 
333 static void
334 be_get_ethtool_stats(struct net_device *netdev,
335 		struct ethtool_stats *stats, uint64_t *data)
336 {
337 	struct be_adapter *adapter = netdev_priv(netdev);
338 	struct be_rx_obj *rxo;
339 	struct be_tx_obj *txo;
340 	void *p;
341 	unsigned int i, j, base = 0, start;
342 
343 	for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
344 		p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
345 		data[i] = *(u32 *)p;
346 	}
347 	base += ETHTOOL_STATS_NUM;
348 
349 	for_all_rx_queues(adapter, rxo, j) {
350 		struct be_rx_stats *stats = rx_stats(rxo);
351 
352 		do {
353 			start = u64_stats_fetch_begin_bh(&stats->sync);
354 			data[base] = stats->rx_bytes;
355 			data[base + 1] = stats->rx_pkts;
356 		} while (u64_stats_fetch_retry_bh(&stats->sync, start));
357 
358 		for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
359 			p = (u8 *)stats + et_rx_stats[i].offset;
360 			data[base + i] = *(u32 *)p;
361 		}
362 		base += ETHTOOL_RXSTATS_NUM;
363 	}
364 
365 	for_all_tx_queues(adapter, txo, j) {
366 		struct be_tx_stats *stats = tx_stats(txo);
367 
368 		do {
369 			start = u64_stats_fetch_begin_bh(&stats->sync_compl);
370 			data[base] = stats->tx_compl;
371 		} while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
372 
373 		do {
374 			start = u64_stats_fetch_begin_bh(&stats->sync);
375 			for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
376 				p = (u8 *)stats + et_tx_stats[i].offset;
377 				data[base + i] =
378 					(et_tx_stats[i].size == sizeof(u64)) ?
379 						*(u64 *)p : *(u32 *)p;
380 			}
381 		} while (u64_stats_fetch_retry_bh(&stats->sync, start));
382 		base += ETHTOOL_TXSTATS_NUM;
383 	}
384 }
385 
386 static void
387 be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
388 		uint8_t *data)
389 {
390 	struct be_adapter *adapter = netdev_priv(netdev);
391 	int i, j;
392 
393 	switch (stringset) {
394 	case ETH_SS_STATS:
395 		for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
396 			memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
397 			data += ETH_GSTRING_LEN;
398 		}
399 		for (i = 0; i < adapter->num_rx_qs; i++) {
400 			for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
401 				sprintf(data, "rxq%d: %s", i,
402 					et_rx_stats[j].desc);
403 				data += ETH_GSTRING_LEN;
404 			}
405 		}
406 		for (i = 0; i < adapter->num_tx_qs; i++) {
407 			for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
408 				sprintf(data, "txq%d: %s", i,
409 					et_tx_stats[j].desc);
410 				data += ETH_GSTRING_LEN;
411 			}
412 		}
413 		break;
414 	case ETH_SS_TEST:
415 		for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
416 			memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
417 			data += ETH_GSTRING_LEN;
418 		}
419 		break;
420 	}
421 }
422 
423 static int be_get_sset_count(struct net_device *netdev, int stringset)
424 {
425 	struct be_adapter *adapter = netdev_priv(netdev);
426 
427 	switch (stringset) {
428 	case ETH_SS_TEST:
429 		return ETHTOOL_TESTS_NUM;
430 	case ETH_SS_STATS:
431 		return ETHTOOL_STATS_NUM +
432 			adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
433 			adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
434 	default:
435 		return -EINVAL;
436 	}
437 }
438 
439 static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
440 {
441 	u32 port;
442 
443 	switch (phy_type) {
444 	case PHY_TYPE_BASET_1GB:
445 	case PHY_TYPE_BASEX_1GB:
446 	case PHY_TYPE_SGMII:
447 		port = PORT_TP;
448 		break;
449 	case PHY_TYPE_SFP_PLUS_10GB:
450 		port = dac_cable_len ? PORT_DA : PORT_FIBRE;
451 		break;
452 	case PHY_TYPE_XFP_10GB:
453 	case PHY_TYPE_SFP_1GB:
454 		port = PORT_FIBRE;
455 		break;
456 	case PHY_TYPE_BASET_10GB:
457 		port = PORT_TP;
458 		break;
459 	default:
460 		port = PORT_OTHER;
461 	}
462 
463 	return port;
464 }
465 
466 static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
467 {
468 	u32 val = 0;
469 
470 	switch (if_type) {
471 	case PHY_TYPE_BASET_1GB:
472 	case PHY_TYPE_BASEX_1GB:
473 	case PHY_TYPE_SGMII:
474 		val |= SUPPORTED_TP;
475 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
476 			val |= SUPPORTED_1000baseT_Full;
477 		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
478 			val |= SUPPORTED_100baseT_Full;
479 		if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
480 			val |= SUPPORTED_10baseT_Full;
481 		break;
482 	case PHY_TYPE_KX4_10GB:
483 		val |= SUPPORTED_Backplane;
484 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
485 			val |= SUPPORTED_1000baseKX_Full;
486 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
487 			val |= SUPPORTED_10000baseKX4_Full;
488 		break;
489 	case PHY_TYPE_KR_10GB:
490 		val |= SUPPORTED_Backplane |
491 				SUPPORTED_10000baseKR_Full;
492 		break;
493 	case PHY_TYPE_SFP_PLUS_10GB:
494 	case PHY_TYPE_XFP_10GB:
495 	case PHY_TYPE_SFP_1GB:
496 		val |= SUPPORTED_FIBRE;
497 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
498 			val |= SUPPORTED_10000baseT_Full;
499 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
500 			val |= SUPPORTED_1000baseT_Full;
501 		break;
502 	case PHY_TYPE_BASET_10GB:
503 		val |= SUPPORTED_TP;
504 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
505 			val |= SUPPORTED_10000baseT_Full;
506 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
507 			val |= SUPPORTED_1000baseT_Full;
508 		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
509 			val |= SUPPORTED_100baseT_Full;
510 		break;
511 	default:
512 		val |= SUPPORTED_TP;
513 	}
514 
515 	return val;
516 }
517 
518 bool be_pause_supported(struct be_adapter *adapter)
519 {
520 	return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
521 		adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
522 		false : true;
523 }
524 
525 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
526 {
527 	struct be_adapter *adapter = netdev_priv(netdev);
528 	u8 link_status;
529 	u16 link_speed = 0;
530 	int status;
531 	u32 auto_speeds;
532 	u32 fixed_speeds;
533 	u32 dac_cable_len;
534 	u16 interface_type;
535 
536 	if (adapter->phy.link_speed < 0) {
537 		status = be_cmd_link_status_query(adapter, &link_speed,
538 						  &link_status, 0);
539 		if (!status)
540 			be_link_status_update(adapter, link_status);
541 		ethtool_cmd_speed_set(ecmd, link_speed);
542 
543 		status = be_cmd_get_phy_info(adapter);
544 		if (!status) {
545 			interface_type = adapter->phy.interface_type;
546 			auto_speeds = adapter->phy.auto_speeds_supported;
547 			fixed_speeds = adapter->phy.fixed_speeds_supported;
548 			dac_cable_len = adapter->phy.dac_cable_len;
549 
550 			ecmd->supported =
551 				convert_to_et_setting(interface_type,
552 						      auto_speeds |
553 						      fixed_speeds);
554 			ecmd->advertising =
555 				convert_to_et_setting(interface_type,
556 						      auto_speeds);
557 
558 			ecmd->port = be_get_port_type(interface_type,
559 						      dac_cable_len);
560 
561 			if (adapter->phy.auto_speeds_supported) {
562 				ecmd->supported |= SUPPORTED_Autoneg;
563 				ecmd->autoneg = AUTONEG_ENABLE;
564 				ecmd->advertising |= ADVERTISED_Autoneg;
565 			}
566 
567 			ecmd->supported |= SUPPORTED_Pause;
568 			if (be_pause_supported(adapter))
569 				ecmd->advertising |= ADVERTISED_Pause;
570 
571 			switch (adapter->phy.interface_type) {
572 			case PHY_TYPE_KR_10GB:
573 			case PHY_TYPE_KX4_10GB:
574 				ecmd->transceiver = XCVR_INTERNAL;
575 				break;
576 			default:
577 				ecmd->transceiver = XCVR_EXTERNAL;
578 				break;
579 			}
580 		} else {
581 			ecmd->port = PORT_OTHER;
582 			ecmd->autoneg = AUTONEG_DISABLE;
583 			ecmd->transceiver = XCVR_DUMMY1;
584 		}
585 
586 		/* Save for future use */
587 		adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
588 		adapter->phy.port_type = ecmd->port;
589 		adapter->phy.transceiver = ecmd->transceiver;
590 		adapter->phy.autoneg = ecmd->autoneg;
591 		adapter->phy.advertising = ecmd->advertising;
592 		adapter->phy.supported = ecmd->supported;
593 	} else {
594 		ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
595 		ecmd->port = adapter->phy.port_type;
596 		ecmd->transceiver = adapter->phy.transceiver;
597 		ecmd->autoneg = adapter->phy.autoneg;
598 		ecmd->advertising = adapter->phy.advertising;
599 		ecmd->supported = adapter->phy.supported;
600 	}
601 
602 	ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
603 	ecmd->phy_address = adapter->port_num;
604 
605 	return 0;
606 }
607 
608 static void be_get_ringparam(struct net_device *netdev,
609 			     struct ethtool_ringparam *ring)
610 {
611 	struct be_adapter *adapter = netdev_priv(netdev);
612 
613 	ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
614 	ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
615 }
616 
617 static void
618 be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
619 {
620 	struct be_adapter *adapter = netdev_priv(netdev);
621 
622 	be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
623 	ecmd->autoneg = adapter->phy.fc_autoneg;
624 }
625 
626 static int
627 be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
628 {
629 	struct be_adapter *adapter = netdev_priv(netdev);
630 	int status;
631 
632 	if (ecmd->autoneg != adapter->phy.fc_autoneg)
633 		return -EINVAL;
634 	adapter->tx_fc = ecmd->tx_pause;
635 	adapter->rx_fc = ecmd->rx_pause;
636 
637 	status = be_cmd_set_flow_control(adapter,
638 					adapter->tx_fc, adapter->rx_fc);
639 	if (status)
640 		dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
641 
642 	return status;
643 }
644 
645 static int
646 be_set_phys_id(struct net_device *netdev,
647 	       enum ethtool_phys_id_state state)
648 {
649 	struct be_adapter *adapter = netdev_priv(netdev);
650 
651 	switch (state) {
652 	case ETHTOOL_ID_ACTIVE:
653 		be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
654 					&adapter->beacon_state);
655 		return 1;	/* cycle on/off once per second */
656 
657 	case ETHTOOL_ID_ON:
658 		be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
659 					BEACON_STATE_ENABLED);
660 		break;
661 
662 	case ETHTOOL_ID_OFF:
663 		be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
664 					BEACON_STATE_DISABLED);
665 		break;
666 
667 	case ETHTOOL_ID_INACTIVE:
668 		be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
669 					adapter->beacon_state);
670 	}
671 
672 	return 0;
673 }
674 
675 
676 static void
677 be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
678 {
679 	struct be_adapter *adapter = netdev_priv(netdev);
680 
681 	if (be_is_wol_supported(adapter)) {
682 		wol->supported |= WAKE_MAGIC;
683 		wol->wolopts |= WAKE_MAGIC;
684 	} else
685 		wol->wolopts = 0;
686 	memset(&wol->sopass, 0, sizeof(wol->sopass));
687 }
688 
689 static int
690 be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
691 {
692 	struct be_adapter *adapter = netdev_priv(netdev);
693 
694 	if (wol->wolopts & ~WAKE_MAGIC)
695 		return -EOPNOTSUPP;
696 
697 	if (!be_is_wol_supported(adapter)) {
698 		dev_warn(&adapter->pdev->dev, "WOL not supported\n");
699 		return -EOPNOTSUPP;
700 	}
701 
702 	if (wol->wolopts & WAKE_MAGIC)
703 		adapter->wol = true;
704 	else
705 		adapter->wol = false;
706 
707 	return 0;
708 }
709 
710 static int
711 be_test_ddr_dma(struct be_adapter *adapter)
712 {
713 	int ret, i;
714 	struct be_dma_mem ddrdma_cmd;
715 	static const u64 pattern[2] = {
716 		0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
717 	};
718 
719 	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
720 	ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
721 					   &ddrdma_cmd.dma, GFP_KERNEL);
722 	if (!ddrdma_cmd.va) {
723 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
724 		return -ENOMEM;
725 	}
726 
727 	for (i = 0; i < 2; i++) {
728 		ret = be_cmd_ddr_dma_test(adapter, pattern[i],
729 					4096, &ddrdma_cmd);
730 		if (ret != 0)
731 			goto err;
732 	}
733 
734 err:
735 	dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
736 			  ddrdma_cmd.dma);
737 	return ret;
738 }
739 
740 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
741 				u64 *status)
742 {
743 	be_cmd_set_loopback(adapter, adapter->hba_port_num,
744 				loopback_type, 1);
745 	*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
746 				loopback_type, 1500,
747 				2, 0xabc);
748 	be_cmd_set_loopback(adapter, adapter->hba_port_num,
749 				BE_NO_LOOPBACK, 1);
750 	return *status;
751 }
752 
753 static void
754 be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
755 {
756 	struct be_adapter *adapter = netdev_priv(netdev);
757 	int status;
758 	u8 link_status = 0;
759 
760 	memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
761 
762 	if (test->flags & ETH_TEST_FL_OFFLINE) {
763 		if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
764 						&data[0]) != 0) {
765 			test->flags |= ETH_TEST_FL_FAILED;
766 		}
767 		if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
768 						&data[1]) != 0) {
769 			test->flags |= ETH_TEST_FL_FAILED;
770 		}
771 		if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
772 						&data[2]) != 0) {
773 			test->flags |= ETH_TEST_FL_FAILED;
774 		}
775 	}
776 
777 	if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
778 		data[3] = 1;
779 		test->flags |= ETH_TEST_FL_FAILED;
780 	}
781 
782 	status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
783 	if (status) {
784 		test->flags |= ETH_TEST_FL_FAILED;
785 		data[4] = -1;
786 	} else if (!link_status) {
787 		test->flags |= ETH_TEST_FL_FAILED;
788 		data[4] = 1;
789 	}
790 }
791 
792 static int
793 be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
794 {
795 	struct be_adapter *adapter = netdev_priv(netdev);
796 
797 	return be_load_fw(adapter, efl->data);
798 }
799 
800 static int
801 be_get_eeprom_len(struct net_device *netdev)
802 {
803 	struct be_adapter *adapter = netdev_priv(netdev);
804 
805 	if (!check_privilege(adapter, MAX_PRIVILEGES))
806 		return 0;
807 
808 	if (lancer_chip(adapter)) {
809 		if (be_physfn(adapter))
810 			return lancer_cmd_get_file_len(adapter,
811 					LANCER_VPD_PF_FILE);
812 		else
813 			return lancer_cmd_get_file_len(adapter,
814 					LANCER_VPD_VF_FILE);
815 	} else {
816 		return BE_READ_SEEPROM_LEN;
817 	}
818 }
819 
820 static int
821 be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
822 			uint8_t *data)
823 {
824 	struct be_adapter *adapter = netdev_priv(netdev);
825 	struct be_dma_mem eeprom_cmd;
826 	struct be_cmd_resp_seeprom_read *resp;
827 	int status;
828 
829 	if (!eeprom->len)
830 		return -EINVAL;
831 
832 	if (lancer_chip(adapter)) {
833 		if (be_physfn(adapter))
834 			return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
835 					eeprom->len, data);
836 		else
837 			return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
838 					eeprom->len, data);
839 	}
840 
841 	eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
842 
843 	memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
844 	eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
845 	eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
846 					   &eeprom_cmd.dma, GFP_KERNEL);
847 
848 	if (!eeprom_cmd.va) {
849 		dev_err(&adapter->pdev->dev,
850 			"Memory allocation failure. Could not read eeprom\n");
851 		return -ENOMEM;
852 	}
853 
854 	status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
855 
856 	if (!status) {
857 		resp = eeprom_cmd.va;
858 		memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
859 	}
860 	dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
861 			  eeprom_cmd.dma);
862 
863 	return status;
864 }
865 
866 static u32 be_get_msg_level(struct net_device *netdev)
867 {
868 	struct be_adapter *adapter = netdev_priv(netdev);
869 
870 	if (lancer_chip(adapter)) {
871 		dev_err(&adapter->pdev->dev, "Operation not supported\n");
872 		return -EOPNOTSUPP;
873 	}
874 
875 	return adapter->msg_enable;
876 }
877 
878 static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
879 {
880 	struct be_dma_mem extfat_cmd;
881 	struct be_fat_conf_params *cfgs;
882 	int status;
883 	int i, j;
884 
885 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
886 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
887 	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
888 					     &extfat_cmd.dma);
889 	if (!extfat_cmd.va) {
890 		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
891 			__func__);
892 		goto err;
893 	}
894 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
895 	if (!status) {
896 		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
897 					sizeof(struct be_cmd_resp_hdr));
898 		for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
899 			u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
900 			for (j = 0; j < num_modes; j++) {
901 				if (cfgs->module[i].trace_lvl[j].mode ==
902 								MODE_UART)
903 					cfgs->module[i].trace_lvl[j].dbg_lvl =
904 							cpu_to_le32(level);
905 			}
906 		}
907 		status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd,
908 							cfgs);
909 		if (status)
910 			dev_err(&adapter->pdev->dev,
911 				"Message level set failed\n");
912 	} else {
913 		dev_err(&adapter->pdev->dev, "Message level get failed\n");
914 	}
915 
916 	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
917 			    extfat_cmd.dma);
918 err:
919 	return;
920 }
921 
922 static void be_set_msg_level(struct net_device *netdev, u32 level)
923 {
924 	struct be_adapter *adapter = netdev_priv(netdev);
925 
926 	if (lancer_chip(adapter)) {
927 		dev_err(&adapter->pdev->dev, "Operation not supported\n");
928 		return;
929 	}
930 
931 	if (adapter->msg_enable == level)
932 		return;
933 
934 	if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
935 		be_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
936 				    FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL);
937 	adapter->msg_enable = level;
938 
939 	return;
940 }
941 
942 const struct ethtool_ops be_ethtool_ops = {
943 	.get_settings = be_get_settings,
944 	.get_drvinfo = be_get_drvinfo,
945 	.get_wol = be_get_wol,
946 	.set_wol = be_set_wol,
947 	.get_link = ethtool_op_get_link,
948 	.get_eeprom_len = be_get_eeprom_len,
949 	.get_eeprom = be_read_eeprom,
950 	.get_coalesce = be_get_coalesce,
951 	.set_coalesce = be_set_coalesce,
952 	.get_ringparam = be_get_ringparam,
953 	.get_pauseparam = be_get_pauseparam,
954 	.set_pauseparam = be_set_pauseparam,
955 	.get_strings = be_get_stat_strings,
956 	.set_phys_id = be_set_phys_id,
957 	.get_msglevel = be_get_msg_level,
958 	.set_msglevel = be_set_msg_level,
959 	.get_sset_count = be_get_sset_count,
960 	.get_ethtool_stats = be_get_ethtool_stats,
961 	.get_regs_len = be_get_reg_len,
962 	.get_regs = be_get_regs,
963 	.flash_device = be_do_flash,
964 	.self_test = be_self_test,
965 };
966