xref: /linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c (revision 3e44c471a2dab210f7e9b1e5f7d4d54d52df59eb)
1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 *          Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT.  See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/netdevice.h>
24 #include <linux/net_tstamp.h>
25 #include <linux/ethtool.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pci.h>
28 #include "octeon_config.h"
29 #include "liquidio_common.h"
30 #include "octeon_droq.h"
31 #include "octeon_iq.h"
32 #include "response_manager.h"
33 #include "octeon_device.h"
34 #include "octeon_nic.h"
35 #include "octeon_main.h"
36 #include "octeon_network.h"
37 #include "cn66xx_regs.h"
38 #include "cn66xx_device.h"
39 #include "cn68xx_regs.h"
40 #include "cn68xx_device.h"
41 #include "liquidio_image.h"
42 
43 struct oct_mdio_cmd_context {
44 	int octeon_id;
45 	wait_queue_head_t wc;
46 	int cond;
47 };
48 
49 struct oct_mdio_cmd_resp {
50 	u64 rh;
51 	struct oct_mdio_cmd resp;
52 	u64 status;
53 };
54 
55 #define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
56 
57 /* Octeon's interface mode of operation */
58 enum {
59 	INTERFACE_MODE_DISABLED,
60 	INTERFACE_MODE_RGMII,
61 	INTERFACE_MODE_GMII,
62 	INTERFACE_MODE_SPI,
63 	INTERFACE_MODE_PCIE,
64 	INTERFACE_MODE_XAUI,
65 	INTERFACE_MODE_SGMII,
66 	INTERFACE_MODE_PICMG,
67 	INTERFACE_MODE_NPI,
68 	INTERFACE_MODE_LOOP,
69 	INTERFACE_MODE_SRIO,
70 	INTERFACE_MODE_ILK,
71 	INTERFACE_MODE_RXAUI,
72 	INTERFACE_MODE_QSGMII,
73 	INTERFACE_MODE_AGL,
74 };
75 
76 #define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
77 #define OCT_ETHTOOL_REGDUMP_LEN  4096
78 #define OCT_ETHTOOL_REGSVER  1
79 
80 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
81 	"Instr posted",
82 	"Instr processed",
83 	"Instr dropped",
84 	"Bytes Sent",
85 	"Sgentry_sent",
86 	"Inst cntreg",
87 	"Tx done",
88 	"Tx Iq busy",
89 	"Tx dropped",
90 	"Tx bytes",
91 };
92 
93 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
94 	"OQ Pkts Received",
95 	"OQ Bytes Received",
96 	"Dropped no dispatch",
97 	"Dropped nomem",
98 	"Dropped toomany",
99 	"Stack RX cnt",
100 	"Stack RX Bytes",
101 	"RX dropped",
102 };
103 
104 #define OCTNIC_NCMD_AUTONEG_ON  0x1
105 #define OCTNIC_NCMD_PHY_ON      0x2
106 
107 static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
108 {
109 	struct lio *lio = GET_LIO(netdev);
110 	struct octeon_device *oct = lio->oct_dev;
111 	struct oct_link_info *linfo;
112 
113 	linfo = &lio->linfo;
114 
115 	if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
116 	    linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
117 		ecmd->port = PORT_FIBRE;
118 		ecmd->supported =
119 			(SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
120 			 SUPPORTED_Pause);
121 		ecmd->advertising =
122 			(ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
123 		ecmd->transceiver = XCVR_EXTERNAL;
124 		ecmd->autoneg = AUTONEG_DISABLE;
125 
126 	} else {
127 		dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n");
128 	}
129 
130 	if (linfo->link.s.status) {
131 		ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
132 		ecmd->duplex = linfo->link.s.duplex;
133 	} else {
134 		ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
135 		ecmd->duplex = DUPLEX_UNKNOWN;
136 	}
137 
138 	return 0;
139 }
140 
141 static void
142 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
143 {
144 	struct lio *lio;
145 	struct octeon_device *oct;
146 
147 	lio = GET_LIO(netdev);
148 	oct = lio->oct_dev;
149 
150 	memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
151 	strcpy(drvinfo->driver, "liquidio");
152 	strcpy(drvinfo->version, LIQUIDIO_VERSION);
153 	strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
154 		ETHTOOL_FWVERS_LEN);
155 	strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
156 	drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN;
157 }
158 
159 static void
160 lio_ethtool_get_channels(struct net_device *dev,
161 			 struct ethtool_channels *channel)
162 {
163 	struct lio *lio = GET_LIO(dev);
164 	struct octeon_device *oct = lio->oct_dev;
165 	u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
166 
167 	if (OCTEON_CN6XXX(oct)) {
168 		struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
169 
170 		max_rx = CFG_GET_OQ_MAX_Q(conf6x);
171 		max_tx = CFG_GET_IQ_MAX_Q(conf6x);
172 		rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
173 		tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
174 	}
175 
176 	channel->max_rx = max_rx;
177 	channel->max_tx = max_tx;
178 	channel->rx_count = rx_count;
179 	channel->tx_count = tx_count;
180 }
181 
182 static int lio_get_eeprom_len(struct net_device *netdev)
183 {
184 	u8 buf[128];
185 	struct lio *lio = GET_LIO(netdev);
186 	struct octeon_device *oct_dev = lio->oct_dev;
187 	struct octeon_board_info *board_info;
188 	int len;
189 
190 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
191 	len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
192 		      board_info->name, board_info->serial_number,
193 		      board_info->major, board_info->minor);
194 
195 	return len;
196 }
197 
198 static int
199 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
200 	       u8 *bytes)
201 {
202 	struct lio *lio = GET_LIO(netdev);
203 	struct octeon_device *oct_dev = lio->oct_dev;
204 	struct octeon_board_info *board_info;
205 	int len;
206 
207 	if (eeprom->offset != 0)
208 		return -EINVAL;
209 
210 	eeprom->magic = oct_dev->pci_dev->vendor;
211 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
212 	len =
213 		sprintf((char *)bytes,
214 			"boardname:%s serialnum:%s maj:%lld min:%lld\n",
215 			board_info->name, board_info->serial_number,
216 			board_info->major, board_info->minor);
217 
218 	return 0;
219 }
220 
221 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
222 {
223 	struct lio *lio = GET_LIO(netdev);
224 	struct octeon_device *oct = lio->oct_dev;
225 	struct octnic_ctrl_pkt nctrl;
226 	struct octnic_ctrl_params nparams;
227 	int ret = 0;
228 
229 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
230 
231 	nctrl.ncmd.u64 = 0;
232 	nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
233 	nctrl.ncmd.s.param1 = lio->linfo.ifidx;
234 	nctrl.ncmd.s.param2 = addr;
235 	nctrl.ncmd.s.param3 = val;
236 	nctrl.wait_time = 100;
237 	nctrl.netpndev = (u64)netdev;
238 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
239 
240 	nparams.resp_order = OCTEON_RESP_ORDERED;
241 
242 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
243 	if (ret < 0) {
244 		dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
245 		return -EINVAL;
246 	}
247 
248 	return 0;
249 }
250 
251 /* Callback for when mdio command response arrives
252  */
253 static void octnet_mdio_resp_callback(struct octeon_device *oct,
254 				      u32 status,
255 				      void *buf)
256 {
257 	struct oct_mdio_cmd_resp *mdio_cmd_rsp;
258 	struct oct_mdio_cmd_context *mdio_cmd_ctx;
259 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
260 
261 	mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
262 	mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
263 
264 	oct = lio_get_device(mdio_cmd_ctx->octeon_id);
265 	if (status) {
266 		dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
267 			CVM_CAST64(status));
268 		ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
269 	} else {
270 		ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
271 	}
272 	wake_up_interruptible(&mdio_cmd_ctx->wc);
273 }
274 
275 /* This routine provides PHY access routines for
276  * mdio  clause45 .
277  */
278 static int
279 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
280 {
281 	struct octeon_device *oct_dev = lio->oct_dev;
282 	struct octeon_soft_command *sc;
283 	struct oct_mdio_cmd_resp *mdio_cmd_rsp;
284 	struct oct_mdio_cmd_context *mdio_cmd_ctx;
285 	struct oct_mdio_cmd *mdio_cmd;
286 	int retval = 0;
287 
288 	sc = (struct octeon_soft_command *)
289 		octeon_alloc_soft_command(oct_dev,
290 					  sizeof(struct oct_mdio_cmd),
291 					  sizeof(struct oct_mdio_cmd_resp),
292 					  sizeof(struct oct_mdio_cmd_context));
293 
294 	if (!sc)
295 		return -ENOMEM;
296 
297 	mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
298 	mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
299 	mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
300 
301 	ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
302 	mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
303 	mdio_cmd->op = op;
304 	mdio_cmd->mdio_addr = loc;
305 	if (op)
306 		mdio_cmd->value1 = *value;
307 	mdio_cmd->value2 = lio->linfo.ifidx;
308 	octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
309 
310 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
311 				    0, 0, 0);
312 
313 	sc->wait_time = 1000;
314 	sc->callback = octnet_mdio_resp_callback;
315 	sc->callback_arg = sc;
316 
317 	init_waitqueue_head(&mdio_cmd_ctx->wc);
318 
319 	retval = octeon_send_soft_command(oct_dev, sc);
320 
321 	if (retval) {
322 		dev_err(&oct_dev->pci_dev->dev,
323 			"octnet_mdio45_access instruction failed status: %x\n",
324 			retval);
325 		retval =  -EBUSY;
326 	} else {
327 		/* Sleep on a wait queue till the cond flag indicates that the
328 		 * response arrived
329 		 */
330 		sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
331 		retval = mdio_cmd_rsp->status;
332 		if (retval) {
333 			dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
334 			retval = -EBUSY;
335 		} else {
336 			octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
337 					    sizeof(struct oct_mdio_cmd) / 8);
338 
339 			if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
340 				if (!op)
341 					*value = mdio_cmd_rsp->resp.value1;
342 			} else {
343 				retval = -EINVAL;
344 			}
345 		}
346 	}
347 
348 	octeon_free_soft_command(oct_dev, sc);
349 
350 	return retval;
351 }
352 
353 static int lio_set_phys_id(struct net_device *netdev,
354 			   enum ethtool_phys_id_state state)
355 {
356 	struct lio *lio = GET_LIO(netdev);
357 	struct octeon_device *oct = lio->oct_dev;
358 	int value, ret;
359 
360 	switch (state) {
361 	case ETHTOOL_ID_ACTIVE:
362 		if (oct->chip_id == OCTEON_CN66XX) {
363 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
364 					   VITESSE_PHY_GPIO_DRIVEON);
365 			return 2;
366 
367 		} else if (oct->chip_id == OCTEON_CN68XX) {
368 			/* Save the current LED settings */
369 			ret = octnet_mdio45_access(lio, 0,
370 						   LIO68XX_LED_BEACON_ADDR,
371 						   &lio->phy_beacon_val);
372 			if (ret)
373 				return ret;
374 
375 			ret = octnet_mdio45_access(lio, 0,
376 						   LIO68XX_LED_CTRL_ADDR,
377 						   &lio->led_ctrl_val);
378 			if (ret)
379 				return ret;
380 
381 			/* Configure Beacon values */
382 			value = LIO68XX_LED_BEACON_CFGON;
383 			ret =
384 				octnet_mdio45_access(lio, 1,
385 						     LIO68XX_LED_BEACON_ADDR,
386 						     &value);
387 			if (ret)
388 				return ret;
389 
390 			value = LIO68XX_LED_CTRL_CFGON;
391 			ret =
392 				octnet_mdio45_access(lio, 1,
393 						     LIO68XX_LED_CTRL_ADDR,
394 						     &value);
395 			if (ret)
396 				return ret;
397 		} else {
398 			return -EINVAL;
399 		}
400 		break;
401 
402 	case ETHTOOL_ID_ON:
403 		if (oct->chip_id == OCTEON_CN66XX) {
404 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
405 					   VITESSE_PHY_GPIO_HIGH);
406 
407 		} else if (oct->chip_id == OCTEON_CN68XX) {
408 			return -EINVAL;
409 		} else {
410 			return -EINVAL;
411 		}
412 		break;
413 
414 	case ETHTOOL_ID_OFF:
415 		if (oct->chip_id == OCTEON_CN66XX)
416 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
417 					   VITESSE_PHY_GPIO_LOW);
418 		else if (oct->chip_id == OCTEON_CN68XX)
419 			return -EINVAL;
420 		else
421 			return -EINVAL;
422 
423 		break;
424 
425 	case ETHTOOL_ID_INACTIVE:
426 		if (oct->chip_id == OCTEON_CN66XX) {
427 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
428 					   VITESSE_PHY_GPIO_DRIVEOFF);
429 		} else if (oct->chip_id == OCTEON_CN68XX) {
430 			/* Restore LED settings */
431 			ret = octnet_mdio45_access(lio, 1,
432 						   LIO68XX_LED_CTRL_ADDR,
433 						   &lio->led_ctrl_val);
434 			if (ret)
435 				return ret;
436 
437 			octnet_mdio45_access(lio, 1, LIO68XX_LED_BEACON_ADDR,
438 					     &lio->phy_beacon_val);
439 			if (ret)
440 				return ret;
441 
442 		} else {
443 			return -EINVAL;
444 		}
445 		break;
446 
447 	default:
448 		return -EINVAL;
449 	}
450 
451 	return 0;
452 }
453 
454 static void
455 lio_ethtool_get_ringparam(struct net_device *netdev,
456 			  struct ethtool_ringparam *ering)
457 {
458 	struct lio *lio = GET_LIO(netdev);
459 	struct octeon_device *oct = lio->oct_dev;
460 	u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
461 	    rx_pending = 0;
462 
463 	if (OCTEON_CN6XXX(oct)) {
464 		struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
465 
466 		tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
467 		rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
468 		rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
469 		tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
470 	}
471 
472 	if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
473 		ering->rx_pending = 0;
474 		ering->rx_max_pending = 0;
475 		ering->rx_mini_pending = 0;
476 		ering->rx_jumbo_pending = rx_pending;
477 		ering->rx_mini_max_pending = 0;
478 		ering->rx_jumbo_max_pending = rx_max_pending;
479 	} else {
480 		ering->rx_pending = rx_pending;
481 		ering->rx_max_pending = rx_max_pending;
482 		ering->rx_mini_pending = 0;
483 		ering->rx_jumbo_pending = 0;
484 		ering->rx_mini_max_pending = 0;
485 		ering->rx_jumbo_max_pending = 0;
486 	}
487 
488 	ering->tx_pending = tx_pending;
489 	ering->tx_max_pending = tx_max_pending;
490 }
491 
492 static u32 lio_get_msglevel(struct net_device *netdev)
493 {
494 	struct lio *lio = GET_LIO(netdev);
495 
496 	return lio->msg_enable;
497 }
498 
499 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
500 {
501 	struct lio *lio = GET_LIO(netdev);
502 
503 	if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
504 		if (msglvl & NETIF_MSG_HW)
505 			liquidio_set_feature(netdev,
506 					     OCTNET_CMD_VERBOSE_ENABLE);
507 		else
508 			liquidio_set_feature(netdev,
509 					     OCTNET_CMD_VERBOSE_DISABLE);
510 	}
511 
512 	lio->msg_enable = msglvl;
513 }
514 
515 static void
516 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
517 {
518 	/* Notes: Not supporting any auto negotiation in these
519 	 * drivers. Just report pause frame support.
520 	 */
521 	pause->tx_pause = 1;
522 	pause->rx_pause = 1;    /* TODO: Need to support RX pause frame!!. */
523 }
524 
525 static void
526 lio_get_ethtool_stats(struct net_device *netdev,
527 		      struct ethtool_stats *stats, u64 *data)
528 {
529 	struct lio *lio = GET_LIO(netdev);
530 	struct octeon_device *oct_dev = lio->oct_dev;
531 	int i = 0, j;
532 
533 	for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) {
534 		if (!(oct_dev->io_qmask.iq & (1UL << j)))
535 			continue;
536 		data[i++] =
537 			CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
538 		data[i++] =
539 			CVM_CAST64(
540 				oct_dev->instr_queue[j]->stats.instr_processed);
541 		data[i++] =
542 			CVM_CAST64(
543 				oct_dev->instr_queue[j]->stats.instr_dropped);
544 		data[i++] =
545 			CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
546 		data[i++] =
547 			CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
548 		data[i++] =
549 			readl(oct_dev->instr_queue[j]->inst_cnt_reg);
550 		data[i++] =
551 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
552 		data[i++] =
553 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
554 		data[i++] =
555 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
556 		data[i++] =
557 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
558 	}
559 
560 	/* for (j = 0; j < oct_dev->num_oqs; j++){ */
561 	for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) {
562 		if (!(oct_dev->io_qmask.oq & (1UL << j)))
563 			continue;
564 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
565 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
566 		data[i++] =
567 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
568 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
569 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
570 		data[i++] =
571 			CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
572 		data[i++] =
573 			CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
574 		data[i++] =
575 			CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
576 	}
577 }
578 
579 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
580 {
581 	struct lio *lio = GET_LIO(netdev);
582 	struct octeon_device *oct_dev = lio->oct_dev;
583 	int num_iq_stats, num_oq_stats, i, j;
584 
585 	num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
586 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
587 		if (!(oct_dev->io_qmask.iq & (1UL << i)))
588 			continue;
589 		for (j = 0; j < num_iq_stats; j++) {
590 			sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]);
591 			data += ETH_GSTRING_LEN;
592 		}
593 	}
594 
595 	num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
596 	/* for (i = 0; i < oct_dev->num_oqs; i++) { */
597 	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
598 		if (!(oct_dev->io_qmask.oq & (1UL << i)))
599 			continue;
600 		for (j = 0; j < num_oq_stats; j++) {
601 			sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]);
602 			data += ETH_GSTRING_LEN;
603 		}
604 	}
605 }
606 
607 static int lio_get_sset_count(struct net_device *netdev, int sset)
608 {
609 	struct lio *lio = GET_LIO(netdev);
610 	struct octeon_device *oct_dev = lio->oct_dev;
611 
612 	return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) +
613 	       (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
614 }
615 
616 static int lio_get_intr_coalesce(struct net_device *netdev,
617 				 struct ethtool_coalesce *intr_coal)
618 {
619 	struct lio *lio = GET_LIO(netdev);
620 	struct octeon_device *oct = lio->oct_dev;
621 	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
622 	struct octeon_instr_queue *iq;
623 	struct oct_intrmod_cfg *intrmod_cfg;
624 
625 	intrmod_cfg = &oct->intrmod;
626 
627 	switch (oct->chip_id) {
628 	/* case OCTEON_CN73XX: Todo */
629 	/*      break; */
630 	case OCTEON_CN68XX:
631 	case OCTEON_CN66XX:
632 		if (!intrmod_cfg->intrmod_enable) {
633 			intr_coal->rx_coalesce_usecs =
634 				CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
635 			intr_coal->rx_max_coalesced_frames =
636 				CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
637 		} else {
638 			intr_coal->use_adaptive_rx_coalesce =
639 				intrmod_cfg->intrmod_enable;
640 			intr_coal->rate_sample_interval =
641 				intrmod_cfg->intrmod_check_intrvl;
642 			intr_coal->pkt_rate_high =
643 				intrmod_cfg->intrmod_maxpkt_ratethr;
644 			intr_coal->pkt_rate_low =
645 				intrmod_cfg->intrmod_minpkt_ratethr;
646 			intr_coal->rx_max_coalesced_frames_high =
647 				intrmod_cfg->intrmod_maxcnt_trigger;
648 			intr_coal->rx_coalesce_usecs_high =
649 				intrmod_cfg->intrmod_maxtmr_trigger;
650 			intr_coal->rx_coalesce_usecs_low =
651 				intrmod_cfg->intrmod_mintmr_trigger;
652 			intr_coal->rx_max_coalesced_frames_low =
653 				intrmod_cfg->intrmod_mincnt_trigger;
654 		}
655 
656 		iq = oct->instr_queue[lio->linfo.txpciq[0]];
657 		intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
658 		break;
659 
660 	default:
661 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
662 		return -EINVAL;
663 	}
664 
665 	return 0;
666 }
667 
668 /* Callback function for intrmod */
669 static void octnet_intrmod_callback(struct octeon_device *oct_dev,
670 				    u32 status,
671 				    void *ptr)
672 {
673 	struct oct_intrmod_cmd *cmd = ptr;
674 	struct octeon_soft_command *sc = cmd->sc;
675 
676 	oct_dev = cmd->oct_dev;
677 
678 	if (status)
679 		dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
680 			CVM_CAST64(status));
681 	else
682 		dev_info(&oct_dev->pci_dev->dev,
683 			 "Rx-Adaptive Interrupt moderation enabled:%llx\n",
684 			 oct_dev->intrmod.intrmod_enable);
685 
686 	octeon_free_soft_command(oct_dev, sc);
687 }
688 
689 /*  Configure interrupt moderation parameters */
690 static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
691 {
692 	struct octeon_soft_command *sc;
693 	struct oct_intrmod_cmd *cmd;
694 	struct oct_intrmod_cfg *cfg;
695 	int retval;
696 	struct octeon_device *oct_dev = (struct octeon_device *)oct;
697 
698 	/* Alloc soft command */
699 	sc = (struct octeon_soft_command *)
700 		octeon_alloc_soft_command(oct_dev,
701 					  sizeof(struct oct_intrmod_cfg),
702 					  0,
703 					  sizeof(struct oct_intrmod_cmd));
704 
705 	if (!sc)
706 		return -ENOMEM;
707 
708 	cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
709 	cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
710 
711 	memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
712 	octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
713 	cmd->sc = sc;
714 	cmd->cfg = cfg;
715 	cmd->oct_dev = oct_dev;
716 
717 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
718 				    OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
719 
720 	sc->callback = octnet_intrmod_callback;
721 	sc->callback_arg = cmd;
722 	sc->wait_time = 1000;
723 
724 	retval = octeon_send_soft_command(oct_dev, sc);
725 	if (retval) {
726 		octeon_free_soft_command(oct_dev, sc);
727 		return -EINVAL;
728 	}
729 
730 	return 0;
731 }
732 
733 /* Enable/Disable auto interrupt Moderation */
734 static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
735 				 *intr_coal, int adaptive)
736 {
737 	int ret = 0;
738 	struct octeon_device *oct = lio->oct_dev;
739 	struct oct_intrmod_cfg *intrmod_cfg;
740 
741 	intrmod_cfg = &oct->intrmod;
742 
743 	if (adaptive) {
744 		if (intr_coal->rate_sample_interval)
745 			intrmod_cfg->intrmod_check_intrvl =
746 				intr_coal->rate_sample_interval;
747 		else
748 			intrmod_cfg->intrmod_check_intrvl =
749 				LIO_INTRMOD_CHECK_INTERVAL;
750 
751 		if (intr_coal->pkt_rate_high)
752 			intrmod_cfg->intrmod_maxpkt_ratethr =
753 				intr_coal->pkt_rate_high;
754 		else
755 			intrmod_cfg->intrmod_maxpkt_ratethr =
756 				LIO_INTRMOD_MAXPKT_RATETHR;
757 
758 		if (intr_coal->pkt_rate_low)
759 			intrmod_cfg->intrmod_minpkt_ratethr =
760 				intr_coal->pkt_rate_low;
761 		else
762 			intrmod_cfg->intrmod_minpkt_ratethr =
763 				LIO_INTRMOD_MINPKT_RATETHR;
764 
765 		if (intr_coal->rx_max_coalesced_frames_high)
766 			intrmod_cfg->intrmod_maxcnt_trigger =
767 				intr_coal->rx_max_coalesced_frames_high;
768 		else
769 			intrmod_cfg->intrmod_maxcnt_trigger =
770 				LIO_INTRMOD_MAXCNT_TRIGGER;
771 
772 		if (intr_coal->rx_coalesce_usecs_high)
773 			intrmod_cfg->intrmod_maxtmr_trigger =
774 				intr_coal->rx_coalesce_usecs_high;
775 		else
776 			intrmod_cfg->intrmod_maxtmr_trigger =
777 				LIO_INTRMOD_MAXTMR_TRIGGER;
778 
779 		if (intr_coal->rx_coalesce_usecs_low)
780 			intrmod_cfg->intrmod_mintmr_trigger =
781 				intr_coal->rx_coalesce_usecs_low;
782 		else
783 			intrmod_cfg->intrmod_mintmr_trigger =
784 				LIO_INTRMOD_MINTMR_TRIGGER;
785 
786 		if (intr_coal->rx_max_coalesced_frames_low)
787 			intrmod_cfg->intrmod_mincnt_trigger =
788 				intr_coal->rx_max_coalesced_frames_low;
789 		else
790 			intrmod_cfg->intrmod_mincnt_trigger =
791 				LIO_INTRMOD_MINCNT_TRIGGER;
792 	}
793 
794 	intrmod_cfg->intrmod_enable = adaptive;
795 	ret = octnet_set_intrmod_cfg(oct, intrmod_cfg);
796 
797 	return ret;
798 }
799 
800 static int
801 oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
802 {
803 	int ret;
804 	struct octeon_device *oct = lio->oct_dev;
805 	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
806 	u32 rx_max_coalesced_frames;
807 
808 	if (!intr_coal->rx_max_coalesced_frames)
809 		rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
810 	else
811 		rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames;
812 
813 	/* Disable adaptive interrupt modulation */
814 	ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
815 	if (ret)
816 		return ret;
817 
818 	/* Config Cnt based interrupt values */
819 	octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
820 			 rx_max_coalesced_frames);
821 	CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
822 	return 0;
823 }
824 
825 static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
826 			       *intr_coal)
827 {
828 	int ret;
829 	struct octeon_device *oct = lio->oct_dev;
830 	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
831 	u32 time_threshold, rx_coalesce_usecs;
832 
833 	if (!intr_coal->rx_coalesce_usecs)
834 		rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
835 	else
836 		rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
837 
838 	/* Disable adaptive interrupt modulation */
839 	ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
840 	if (ret)
841 		return ret;
842 
843 	/* Config Time based interrupt values */
844 	time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs);
845 	octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
846 	CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
847 
848 	return 0;
849 }
850 
851 static int lio_set_intr_coalesce(struct net_device *netdev,
852 				 struct ethtool_coalesce *intr_coal)
853 {
854 	struct lio *lio = GET_LIO(netdev);
855 	int ret;
856 	struct octeon_device *oct = lio->oct_dev;
857 	u32 j, q_no;
858 
859 	if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) &&
860 	    (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) {
861 		for (j = 0; j < lio->linfo.num_txpciq; j++) {
862 			q_no = lio->linfo.txpciq[j];
863 			oct->instr_queue[q_no]->fill_threshold =
864 				intr_coal->tx_max_coalesced_frames;
865 		}
866 	} else {
867 		dev_err(&oct->pci_dev->dev,
868 			"LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
869 			intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN,
870 			CN6XXX_DB_MAX);
871 		return -EINVAL;
872 	}
873 
874 	/* User requested adaptive-rx on */
875 	if (intr_coal->use_adaptive_rx_coalesce) {
876 		ret = oct_cfg_adaptive_intr(lio, intr_coal, 1);
877 		if (ret)
878 			goto ret_intrmod;
879 	}
880 
881 	/* User requested adaptive-rx off and rx coalesce */
882 	if ((intr_coal->rx_coalesce_usecs) &&
883 	    (!intr_coal->use_adaptive_rx_coalesce)) {
884 		ret = oct_cfg_rx_intrtime(lio, intr_coal);
885 		if (ret)
886 			goto ret_intrmod;
887 	}
888 
889 	/* User requested adaptive-rx off and rx coalesce */
890 	if ((intr_coal->rx_max_coalesced_frames) &&
891 	    (!intr_coal->use_adaptive_rx_coalesce)) {
892 		ret = oct_cfg_rx_intrcnt(lio, intr_coal);
893 		if (ret)
894 			goto ret_intrmod;
895 	}
896 
897 	/* User requested adaptive-rx off, so use default coalesce params */
898 	if ((!intr_coal->rx_max_coalesced_frames) &&
899 	    (!intr_coal->use_adaptive_rx_coalesce) &&
900 	    (!intr_coal->rx_coalesce_usecs)) {
901 		dev_info(&oct->pci_dev->dev,
902 			 "Turning off adaptive-rx interrupt moderation\n");
903 		dev_info(&oct->pci_dev->dev,
904 			 "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n",
905 			 CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT);
906 		ret = oct_cfg_rx_intrtime(lio, intr_coal);
907 		if (ret)
908 			goto ret_intrmod;
909 
910 		ret = oct_cfg_rx_intrcnt(lio, intr_coal);
911 		if (ret)
912 			goto ret_intrmod;
913 	}
914 
915 	return 0;
916 ret_intrmod:
917 	return ret;
918 }
919 
920 static int lio_get_ts_info(struct net_device *netdev,
921 			   struct ethtool_ts_info *info)
922 {
923 	struct lio *lio = GET_LIO(netdev);
924 
925 	info->so_timestamping =
926 		SOF_TIMESTAMPING_TX_HARDWARE |
927 		SOF_TIMESTAMPING_TX_SOFTWARE |
928 		SOF_TIMESTAMPING_RX_HARDWARE |
929 		SOF_TIMESTAMPING_RX_SOFTWARE |
930 		SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
931 
932 	if (lio->ptp_clock)
933 		info->phc_index = ptp_clock_index(lio->ptp_clock);
934 	else
935 		info->phc_index = -1;
936 
937 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
938 
939 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
940 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
941 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
942 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
943 
944 	return 0;
945 }
946 
947 static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
948 {
949 	struct lio *lio = GET_LIO(netdev);
950 	struct octeon_device *oct = lio->oct_dev;
951 	struct oct_link_info *linfo;
952 	struct octnic_ctrl_pkt nctrl;
953 	struct octnic_ctrl_params nparams;
954 	int ret = 0;
955 
956 	/* get the link info */
957 	linfo = &lio->linfo;
958 
959 	if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
960 		return -EINVAL;
961 
962 	if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
963 						  ecmd->speed != SPEED_10) ||
964 						 (ecmd->duplex != DUPLEX_HALF &&
965 						  ecmd->duplex != DUPLEX_FULL)))
966 		return -EINVAL;
967 
968 	/* Ethtool Support is not provided for XAUI and RXAUI Interfaces
969 	 * as they operate at fixed Speed and Duplex settings
970 	 */
971 	if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
972 	    linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
973 		dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n");
974 		return -EINVAL;
975 	}
976 
977 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
978 
979 	nctrl.ncmd.u64 = 0;
980 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
981 	nctrl.wait_time = 1000;
982 	nctrl.netpndev = (u64)netdev;
983 	nctrl.ncmd.s.param1 = lio->linfo.ifidx;
984 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
985 
986 	/* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
987 	 * to SE core application using ncmd.s.more & ncmd.s.param
988 	 */
989 	if (ecmd->autoneg == AUTONEG_ENABLE) {
990 		/* Autoneg ON */
991 		nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
992 				     OCTNIC_NCMD_AUTONEG_ON;
993 		nctrl.ncmd.s.param2 = ecmd->advertising;
994 	} else {
995 		/* Autoneg OFF */
996 		nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
997 
998 		nctrl.ncmd.s.param3 = ecmd->duplex;
999 
1000 		nctrl.ncmd.s.param2 = ecmd->speed;
1001 	}
1002 
1003 	nparams.resp_order = OCTEON_RESP_ORDERED;
1004 
1005 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
1006 	if (ret < 0) {
1007 		dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
1008 		return -1;
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 static int lio_nway_reset(struct net_device *netdev)
1015 {
1016 	if (netif_running(netdev)) {
1017 		struct ethtool_cmd ecmd;
1018 
1019 		memset(&ecmd, 0, sizeof(struct ethtool_cmd));
1020 		ecmd.autoneg = 0;
1021 		ecmd.speed = 0;
1022 		ecmd.duplex = 0;
1023 		lio_set_settings(netdev, &ecmd);
1024 	}
1025 	return 0;
1026 }
1027 
1028 /* Return register dump len. */
1029 static int lio_get_regs_len(struct net_device *dev)
1030 {
1031 	return OCT_ETHTOOL_REGDUMP_LEN;
1032 }
1033 
1034 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
1035 {
1036 	u32 reg;
1037 	int i, len = 0;
1038 
1039 	/* PCI  Window Registers */
1040 
1041 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
1042 	reg = CN6XXX_WIN_WR_ADDR_LO;
1043 	len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
1044 		       CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
1045 	reg = CN6XXX_WIN_WR_ADDR_HI;
1046 	len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
1047 		       CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
1048 	reg = CN6XXX_WIN_RD_ADDR_LO;
1049 	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
1050 		       CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
1051 	reg = CN6XXX_WIN_RD_ADDR_HI;
1052 	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
1053 		       CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
1054 	reg = CN6XXX_WIN_WR_DATA_LO;
1055 	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
1056 		       CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
1057 	reg = CN6XXX_WIN_WR_DATA_HI;
1058 	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
1059 		       CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
1060 	len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
1061 		       CN6XXX_WIN_WR_MASK_REG,
1062 		       octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
1063 
1064 	/* PCI  Interrupt Register */
1065 	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
1066 		       CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
1067 						CN6XXX_SLI_INT_ENB64_PORT0));
1068 	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
1069 		       CN6XXX_SLI_INT_ENB64_PORT1,
1070 		       octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
1071 	len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
1072 		       octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
1073 
1074 	/* PCI  Output queue registers */
1075 	for (i = 0; i < oct->num_oqs; i++) {
1076 		reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
1077 		len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
1078 			       reg, i, octeon_read_csr(oct, reg));
1079 		reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
1080 		len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
1081 			       reg, i, octeon_read_csr(oct, reg));
1082 	}
1083 	reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
1084 	len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
1085 		       reg, octeon_read_csr(oct, reg));
1086 	reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
1087 	len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
1088 		       reg, octeon_read_csr(oct, reg));
1089 
1090 	/* PCI  Input queue registers */
1091 	for (i = 0; i <= 3; i++) {
1092 		u32 reg;
1093 
1094 		reg = CN6XXX_SLI_IQ_DOORBELL(i);
1095 		len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
1096 			       reg, i, octeon_read_csr(oct, reg));
1097 		reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
1098 		len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
1099 			       reg, i, octeon_read_csr(oct, reg));
1100 	}
1101 
1102 	/* PCI  DMA registers */
1103 
1104 	len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
1105 		       CN6XXX_DMA_CNT(0),
1106 		       octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
1107 	reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
1108 	len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
1109 		       CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
1110 	reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
1111 	len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
1112 		       CN6XXX_DMA_TIME_INT_LEVEL(0),
1113 		       octeon_read_csr(oct, reg));
1114 
1115 	len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
1116 		       CN6XXX_DMA_CNT(1),
1117 		       octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
1118 	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1119 	len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
1120 		       CN6XXX_DMA_PKT_INT_LEVEL(1),
1121 		       octeon_read_csr(oct, reg));
1122 	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1123 	len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
1124 		       CN6XXX_DMA_TIME_INT_LEVEL(1),
1125 		       octeon_read_csr(oct, reg));
1126 
1127 	/* PCI  Index registers */
1128 
1129 	len += sprintf(s + len, "\n");
1130 
1131 	for (i = 0; i < 16; i++) {
1132 		reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
1133 		len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
1134 			       CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
1135 	}
1136 
1137 	return len;
1138 }
1139 
1140 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
1141 {
1142 	u32 val;
1143 	int i, len = 0;
1144 
1145 	/* PCI CONFIG Registers */
1146 
1147 	len += sprintf(s + len,
1148 		       "\n\t Octeon Config space Registers\n\n");
1149 
1150 	for (i = 0; i <= 13; i++) {
1151 		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1152 		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1153 			       (i * 4), i, val);
1154 	}
1155 
1156 	for (i = 30; i <= 34; i++) {
1157 		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1158 		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1159 			       (i * 4), i, val);
1160 	}
1161 
1162 	return len;
1163 }
1164 
1165 /*  Return register dump user app.  */
1166 static void lio_get_regs(struct net_device *dev,
1167 			 struct ethtool_regs *regs, void *regbuf)
1168 {
1169 	struct lio *lio = GET_LIO(dev);
1170 	int len = 0;
1171 	struct octeon_device *oct = lio->oct_dev;
1172 
1173 	memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
1174 	regs->version = OCT_ETHTOOL_REGSVER;
1175 
1176 	switch (oct->chip_id) {
1177 	/* case OCTEON_CN73XX: Todo */
1178 	case OCTEON_CN68XX:
1179 	case OCTEON_CN66XX:
1180 		len += cn6xxx_read_csr_reg(regbuf + len, oct);
1181 		len += cn6xxx_read_config_reg(regbuf + len, oct);
1182 		break;
1183 	default:
1184 		dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
1185 			__func__, oct->chip_id);
1186 	}
1187 }
1188 
1189 static const struct ethtool_ops lio_ethtool_ops = {
1190 	.get_settings		= lio_get_settings,
1191 	.get_link		= ethtool_op_get_link,
1192 	.get_drvinfo		= lio_get_drvinfo,
1193 	.get_ringparam		= lio_ethtool_get_ringparam,
1194 	.get_channels		= lio_ethtool_get_channels,
1195 	.set_phys_id		= lio_set_phys_id,
1196 	.get_eeprom_len		= lio_get_eeprom_len,
1197 	.get_eeprom		= lio_get_eeprom,
1198 	.get_strings		= lio_get_strings,
1199 	.get_ethtool_stats	= lio_get_ethtool_stats,
1200 	.get_pauseparam		= lio_get_pauseparam,
1201 	.get_regs_len		= lio_get_regs_len,
1202 	.get_regs		= lio_get_regs,
1203 	.get_msglevel		= lio_get_msglevel,
1204 	.set_msglevel		= lio_set_msglevel,
1205 	.get_sset_count		= lio_get_sset_count,
1206 	.nway_reset		= lio_nway_reset,
1207 	.set_settings		= lio_set_settings,
1208 	.get_coalesce		= lio_get_intr_coalesce,
1209 	.set_coalesce		= lio_set_intr_coalesce,
1210 	.get_ts_info		= lio_get_ts_info,
1211 };
1212 
1213 void liquidio_set_ethtool_ops(struct net_device *netdev)
1214 {
1215 	netdev->ethtool_ops = &lio_ethtool_ops;
1216 }
1217