xref: /linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c (revision 48dea9a700c8728cc31a1dd44588b97578de86ee)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/netdevice.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/pci.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_nic.h"
27 #include "octeon_main.h"
28 #include "octeon_network.h"
29 #include "cn66xx_regs.h"
30 #include "cn66xx_device.h"
31 #include "cn23xx_pf_device.h"
32 #include "cn23xx_vf_device.h"
33 
34 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
35 
36 struct oct_intrmod_resp {
37 	u64     rh;
38 	struct oct_intrmod_cfg intrmod;
39 	u64     status;
40 };
41 
42 struct oct_mdio_cmd_resp {
43 	u64 rh;
44 	struct oct_mdio_cmd resp;
45 	u64 status;
46 };
47 
48 #define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
49 
50 /* Octeon's interface mode of operation */
51 enum {
52 	INTERFACE_MODE_DISABLED,
53 	INTERFACE_MODE_RGMII,
54 	INTERFACE_MODE_GMII,
55 	INTERFACE_MODE_SPI,
56 	INTERFACE_MODE_PCIE,
57 	INTERFACE_MODE_XAUI,
58 	INTERFACE_MODE_SGMII,
59 	INTERFACE_MODE_PICMG,
60 	INTERFACE_MODE_NPI,
61 	INTERFACE_MODE_LOOP,
62 	INTERFACE_MODE_SRIO,
63 	INTERFACE_MODE_ILK,
64 	INTERFACE_MODE_RXAUI,
65 	INTERFACE_MODE_QSGMII,
66 	INTERFACE_MODE_AGL,
67 	INTERFACE_MODE_XLAUI,
68 	INTERFACE_MODE_XFI,
69 	INTERFACE_MODE_10G_KR,
70 	INTERFACE_MODE_40G_KR4,
71 	INTERFACE_MODE_MIXED,
72 };
73 
74 #define OCT_ETHTOOL_REGDUMP_LEN  4096
75 #define OCT_ETHTOOL_REGDUMP_LEN_23XX  (4096 * 11)
76 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF  (4096 * 2)
77 #define OCT_ETHTOOL_REGSVER  1
78 
79 /* statistics of PF */
80 static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
81 	"rx_packets",
82 	"tx_packets",
83 	"rx_bytes",
84 	"tx_bytes",
85 	"rx_errors",
86 	"tx_errors",
87 	"rx_dropped",
88 	"tx_dropped",
89 
90 	"tx_total_sent",
91 	"tx_total_fwd",
92 	"tx_err_pko",
93 	"tx_err_pki",
94 	"tx_err_link",
95 	"tx_err_drop",
96 
97 	"tx_tso",
98 	"tx_tso_packets",
99 	"tx_tso_err",
100 	"tx_vxlan",
101 
102 	"tx_mcast",
103 	"tx_bcast",
104 
105 	"mac_tx_total_pkts",
106 	"mac_tx_total_bytes",
107 	"mac_tx_mcast_pkts",
108 	"mac_tx_bcast_pkts",
109 	"mac_tx_ctl_packets",
110 	"mac_tx_total_collisions",
111 	"mac_tx_one_collision",
112 	"mac_tx_multi_collision",
113 	"mac_tx_max_collision_fail",
114 	"mac_tx_max_deferral_fail",
115 	"mac_tx_fifo_err",
116 	"mac_tx_runts",
117 
118 	"rx_total_rcvd",
119 	"rx_total_fwd",
120 	"rx_mcast",
121 	"rx_bcast",
122 	"rx_jabber_err",
123 	"rx_l2_err",
124 	"rx_frame_err",
125 	"rx_err_pko",
126 	"rx_err_link",
127 	"rx_err_drop",
128 
129 	"rx_vxlan",
130 	"rx_vxlan_err",
131 
132 	"rx_lro_pkts",
133 	"rx_lro_bytes",
134 	"rx_total_lro",
135 
136 	"rx_lro_aborts",
137 	"rx_lro_aborts_port",
138 	"rx_lro_aborts_seq",
139 	"rx_lro_aborts_tsval",
140 	"rx_lro_aborts_timer",
141 	"rx_fwd_rate",
142 
143 	"mac_rx_total_rcvd",
144 	"mac_rx_bytes",
145 	"mac_rx_total_bcst",
146 	"mac_rx_total_mcst",
147 	"mac_rx_runts",
148 	"mac_rx_ctl_packets",
149 	"mac_rx_fifo_err",
150 	"mac_rx_dma_drop",
151 	"mac_rx_fcs_err",
152 
153 	"link_state_changes",
154 };
155 
156 /* statistics of VF */
157 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
158 	"rx_packets",
159 	"tx_packets",
160 	"rx_bytes",
161 	"tx_bytes",
162 	"rx_errors",
163 	"tx_errors",
164 	"rx_dropped",
165 	"tx_dropped",
166 	"rx_mcast",
167 	"tx_mcast",
168 	"rx_bcast",
169 	"tx_bcast",
170 	"link_state_changes",
171 };
172 
173 /* statistics of host tx queue */
174 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
175 	"packets",
176 	"bytes",
177 	"dropped",
178 	"iq_busy",
179 	"sgentry_sent",
180 
181 	"fw_instr_posted",
182 	"fw_instr_processed",
183 	"fw_instr_dropped",
184 	"fw_bytes_sent",
185 
186 	"tso",
187 	"vxlan",
188 	"txq_restart",
189 };
190 
191 /* statistics of host rx queue */
192 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
193 	"packets",
194 	"bytes",
195 	"dropped",
196 	"dropped_nomem",
197 	"dropped_toomany",
198 	"fw_dropped",
199 	"fw_pkts_received",
200 	"fw_bytes_received",
201 	"fw_dropped_nodispatch",
202 
203 	"vxlan",
204 	"buffer_alloc_failure",
205 };
206 
207 /* LiquidIO driver private flags */
208 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
209 };
210 
211 #define OCTNIC_NCMD_AUTONEG_ON  0x1
212 #define OCTNIC_NCMD_PHY_ON      0x2
213 
214 static int lio_get_link_ksettings(struct net_device *netdev,
215 				  struct ethtool_link_ksettings *ecmd)
216 {
217 	struct lio *lio = GET_LIO(netdev);
218 	struct octeon_device *oct = lio->oct_dev;
219 	struct oct_link_info *linfo;
220 
221 	linfo = &lio->linfo;
222 
223 	ethtool_link_ksettings_zero_link_mode(ecmd, supported);
224 	ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
225 
226 	switch (linfo->link.s.phy_type) {
227 	case LIO_PHY_PORT_TP:
228 		ecmd->base.port = PORT_TP;
229 		ecmd->base.autoneg = AUTONEG_DISABLE;
230 		ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
231 		ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
232 		ethtool_link_ksettings_add_link_mode(ecmd, supported,
233 						     10000baseT_Full);
234 
235 		ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
236 		ethtool_link_ksettings_add_link_mode(ecmd, advertising,
237 						     10000baseT_Full);
238 
239 		break;
240 
241 	case LIO_PHY_PORT_FIBRE:
242 		if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
243 		    linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
244 		    linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
245 		    linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
246 			dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
247 			ecmd->base.transceiver = XCVR_EXTERNAL;
248 		} else {
249 			dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n",
250 				linfo->link.s.if_mode);
251 		}
252 
253 		ecmd->base.port = PORT_FIBRE;
254 		ecmd->base.autoneg = AUTONEG_DISABLE;
255 		ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
256 
257 		ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
258 		ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
259 		if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
260 		    oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
261 			if (OCTEON_CN23XX_PF(oct)) {
262 				ethtool_link_ksettings_add_link_mode
263 					(ecmd, supported, 25000baseSR_Full);
264 				ethtool_link_ksettings_add_link_mode
265 					(ecmd, supported, 25000baseKR_Full);
266 				ethtool_link_ksettings_add_link_mode
267 					(ecmd, supported, 25000baseCR_Full);
268 
269 				if (oct->no_speed_setting == 0)  {
270 					ethtool_link_ksettings_add_link_mode
271 						(ecmd, supported,
272 						 10000baseSR_Full);
273 					ethtool_link_ksettings_add_link_mode
274 						(ecmd, supported,
275 						 10000baseKR_Full);
276 					ethtool_link_ksettings_add_link_mode
277 						(ecmd, supported,
278 						 10000baseCR_Full);
279 				}
280 
281 				if (oct->no_speed_setting == 0) {
282 					liquidio_get_speed(lio);
283 					liquidio_get_fec(lio);
284 				} else {
285 					oct->speed_setting = 25;
286 				}
287 
288 				if (oct->speed_setting == 10) {
289 					ethtool_link_ksettings_add_link_mode
290 						(ecmd, advertising,
291 						 10000baseSR_Full);
292 					ethtool_link_ksettings_add_link_mode
293 						(ecmd, advertising,
294 						 10000baseKR_Full);
295 					ethtool_link_ksettings_add_link_mode
296 						(ecmd, advertising,
297 						 10000baseCR_Full);
298 				}
299 				if (oct->speed_setting == 25) {
300 					ethtool_link_ksettings_add_link_mode
301 						(ecmd, advertising,
302 						 25000baseSR_Full);
303 					ethtool_link_ksettings_add_link_mode
304 						(ecmd, advertising,
305 						 25000baseKR_Full);
306 					ethtool_link_ksettings_add_link_mode
307 						(ecmd, advertising,
308 						 25000baseCR_Full);
309 				}
310 
311 				if (oct->no_speed_setting)
312 					break;
313 
314 				ethtool_link_ksettings_add_link_mode
315 					(ecmd, supported, FEC_RS);
316 				ethtool_link_ksettings_add_link_mode
317 					(ecmd, supported, FEC_NONE);
318 					/*FEC_OFF*/
319 				if (oct->props[lio->ifidx].fec == 1) {
320 					/* ETHTOOL_FEC_RS */
321 					ethtool_link_ksettings_add_link_mode
322 						(ecmd, advertising, FEC_RS);
323 				} else {
324 					/* ETHTOOL_FEC_OFF */
325 					ethtool_link_ksettings_add_link_mode
326 						(ecmd, advertising, FEC_NONE);
327 				}
328 			} else { /* VF */
329 				if (linfo->link.s.speed == 10000) {
330 					ethtool_link_ksettings_add_link_mode
331 						(ecmd, supported,
332 						 10000baseSR_Full);
333 					ethtool_link_ksettings_add_link_mode
334 						(ecmd, supported,
335 						 10000baseKR_Full);
336 					ethtool_link_ksettings_add_link_mode
337 						(ecmd, supported,
338 						 10000baseCR_Full);
339 
340 					ethtool_link_ksettings_add_link_mode
341 						(ecmd, advertising,
342 						 10000baseSR_Full);
343 					ethtool_link_ksettings_add_link_mode
344 						(ecmd, advertising,
345 						 10000baseKR_Full);
346 					ethtool_link_ksettings_add_link_mode
347 						(ecmd, advertising,
348 						 10000baseCR_Full);
349 				}
350 
351 				if (linfo->link.s.speed == 25000) {
352 					ethtool_link_ksettings_add_link_mode
353 						(ecmd, supported,
354 						 25000baseSR_Full);
355 					ethtool_link_ksettings_add_link_mode
356 						(ecmd, supported,
357 						 25000baseKR_Full);
358 					ethtool_link_ksettings_add_link_mode
359 						(ecmd, supported,
360 						 25000baseCR_Full);
361 
362 					ethtool_link_ksettings_add_link_mode
363 						(ecmd, advertising,
364 						 25000baseSR_Full);
365 					ethtool_link_ksettings_add_link_mode
366 						(ecmd, advertising,
367 						 25000baseKR_Full);
368 					ethtool_link_ksettings_add_link_mode
369 						(ecmd, advertising,
370 						 25000baseCR_Full);
371 				}
372 			}
373 		} else {
374 			ethtool_link_ksettings_add_link_mode(ecmd, supported,
375 							     10000baseT_Full);
376 			ethtool_link_ksettings_add_link_mode(ecmd, advertising,
377 							     10000baseT_Full);
378 		}
379 		break;
380 	}
381 
382 	if (linfo->link.s.link_up) {
383 		ecmd->base.speed = linfo->link.s.speed;
384 		ecmd->base.duplex = linfo->link.s.duplex;
385 	} else {
386 		ecmd->base.speed = SPEED_UNKNOWN;
387 		ecmd->base.duplex = DUPLEX_UNKNOWN;
388 	}
389 
390 	return 0;
391 }
392 
393 static int lio_set_link_ksettings(struct net_device *netdev,
394 				  const struct ethtool_link_ksettings *ecmd)
395 {
396 	const int speed = ecmd->base.speed;
397 	struct lio *lio = GET_LIO(netdev);
398 	struct oct_link_info *linfo;
399 	struct octeon_device *oct;
400 
401 	oct = lio->oct_dev;
402 
403 	linfo = &lio->linfo;
404 
405 	if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
406 	      oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID))
407 		return -EOPNOTSUPP;
408 
409 	if (oct->no_speed_setting) {
410 		dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
411 			__func__);
412 		return -EOPNOTSUPP;
413 	}
414 
415 	if ((ecmd->base.duplex != DUPLEX_UNKNOWN &&
416 	     ecmd->base.duplex != linfo->link.s.duplex) ||
417 	     ecmd->base.autoneg != AUTONEG_DISABLE ||
418 	    (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 &&
419 	     ecmd->base.speed != SPEED_UNKNOWN))
420 		return -EOPNOTSUPP;
421 
422 	if ((oct->speed_boot == speed / 1000) &&
423 	    oct->speed_boot == oct->speed_setting)
424 		return 0;
425 
426 	liquidio_set_speed(lio, speed / 1000);
427 
428 	dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n",
429 		oct->speed_setting);
430 
431 	return 0;
432 }
433 
434 static void
435 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
436 {
437 	struct lio *lio;
438 	struct octeon_device *oct;
439 
440 	lio = GET_LIO(netdev);
441 	oct = lio->oct_dev;
442 
443 	memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
444 	strcpy(drvinfo->driver, "liquidio");
445 	strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
446 		ETHTOOL_FWVERS_LEN);
447 	strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
448 }
449 
450 static void
451 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
452 {
453 	struct octeon_device *oct;
454 	struct lio *lio;
455 
456 	lio = GET_LIO(netdev);
457 	oct = lio->oct_dev;
458 
459 	memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
460 	strcpy(drvinfo->driver, "liquidio_vf");
461 	strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
462 		ETHTOOL_FWVERS_LEN);
463 	strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
464 }
465 
466 static int
467 lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues)
468 {
469 	struct lio *lio = GET_LIO(netdev);
470 	struct octeon_device *oct = lio->oct_dev;
471 	struct octnic_ctrl_pkt nctrl;
472 	int ret = 0;
473 
474 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
475 
476 	nctrl.ncmd.u64 = 0;
477 	nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL;
478 	nctrl.ncmd.s.param1 = num_queues;
479 	nctrl.ncmd.s.param2 = num_queues;
480 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
481 	nctrl.netpndev = (u64)netdev;
482 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
483 
484 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
485 	if (ret) {
486 		dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
487 			ret);
488 		return -1;
489 	}
490 
491 	return 0;
492 }
493 
494 static void
495 lio_ethtool_get_channels(struct net_device *dev,
496 			 struct ethtool_channels *channel)
497 {
498 	struct lio *lio = GET_LIO(dev);
499 	struct octeon_device *oct = lio->oct_dev;
500 	u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
501 	u32 combined_count = 0, max_combined = 0;
502 
503 	if (OCTEON_CN6XXX(oct)) {
504 		struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
505 
506 		max_rx = CFG_GET_OQ_MAX_Q(conf6x);
507 		max_tx = CFG_GET_IQ_MAX_Q(conf6x);
508 		rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
509 		tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
510 	} else if (OCTEON_CN23XX_PF(oct)) {
511 		if (oct->sriov_info.sriov_enabled) {
512 			max_combined = lio->linfo.num_txpciq;
513 		} else {
514 			struct octeon_config *conf23_pf =
515 				CHIP_CONF(oct, cn23xx_pf);
516 
517 			max_combined = CFG_GET_IQ_MAX_Q(conf23_pf);
518 		}
519 		combined_count = oct->num_iqs;
520 	} else if (OCTEON_CN23XX_VF(oct)) {
521 		u64 reg_val = 0ULL;
522 		u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
523 
524 		reg_val = octeon_read_csr64(oct, ctrl);
525 		reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
526 		max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
527 		combined_count = oct->num_iqs;
528 	}
529 
530 	channel->max_rx = max_rx;
531 	channel->max_tx = max_tx;
532 	channel->max_combined = max_combined;
533 	channel->rx_count = rx_count;
534 	channel->tx_count = tx_count;
535 	channel->combined_count = combined_count;
536 }
537 
538 static int
539 lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
540 {
541 	struct msix_entry *msix_entries;
542 	int num_msix_irqs = 0;
543 	int i;
544 
545 	if (!oct->msix_on)
546 		return 0;
547 
548 	/* Disable the input and output queues now. No more packets will
549 	 * arrive from Octeon.
550 	 */
551 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
552 
553 	if (oct->msix_on) {
554 		if (OCTEON_CN23XX_PF(oct))
555 			num_msix_irqs = oct->num_msix_irqs - 1;
556 		else if (OCTEON_CN23XX_VF(oct))
557 			num_msix_irqs = oct->num_msix_irqs;
558 
559 		msix_entries = (struct msix_entry *)oct->msix_entries;
560 		for (i = 0; i < num_msix_irqs; i++) {
561 			if (oct->ioq_vector[i].vector) {
562 				/* clear the affinity_cpumask */
563 				irq_set_affinity_hint(msix_entries[i].vector,
564 						      NULL);
565 				free_irq(msix_entries[i].vector,
566 					 &oct->ioq_vector[i]);
567 				oct->ioq_vector[i].vector = 0;
568 			}
569 		}
570 
571 		/* non-iov vector's argument is oct struct */
572 		if (OCTEON_CN23XX_PF(oct))
573 			free_irq(msix_entries[i].vector, oct);
574 
575 		pci_disable_msix(oct->pci_dev);
576 		kfree(oct->msix_entries);
577 		oct->msix_entries = NULL;
578 	}
579 
580 	kfree(oct->irq_name_storage);
581 	oct->irq_name_storage = NULL;
582 
583 	if (octeon_allocate_ioq_vector(oct, num_ioqs)) {
584 		dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
585 		return -1;
586 	}
587 
588 	if (octeon_setup_interrupt(oct, num_ioqs)) {
589 		dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
590 		return -1;
591 	}
592 
593 	/* Enable Octeon device interrupts */
594 	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
595 
596 	return 0;
597 }
598 
599 static int
600 lio_ethtool_set_channels(struct net_device *dev,
601 			 struct ethtool_channels *channel)
602 {
603 	u32 combined_count, max_combined;
604 	struct lio *lio = GET_LIO(dev);
605 	struct octeon_device *oct = lio->oct_dev;
606 	int stopped = 0;
607 
608 	if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) {
609 		dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n");
610 		return -EINVAL;
611 	}
612 
613 	if (!channel->combined_count || channel->other_count ||
614 	    channel->rx_count || channel->tx_count)
615 		return -EINVAL;
616 
617 	combined_count = channel->combined_count;
618 
619 	if (OCTEON_CN23XX_PF(oct)) {
620 		if (oct->sriov_info.sriov_enabled) {
621 			max_combined = lio->linfo.num_txpciq;
622 		} else {
623 			struct octeon_config *conf23_pf =
624 				CHIP_CONF(oct,
625 					  cn23xx_pf);
626 
627 			max_combined =
628 				CFG_GET_IQ_MAX_Q(conf23_pf);
629 		}
630 	} else if (OCTEON_CN23XX_VF(oct)) {
631 		u64 reg_val = 0ULL;
632 		u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
633 
634 		reg_val = octeon_read_csr64(oct, ctrl);
635 		reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
636 		max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
637 	} else {
638 		return -EINVAL;
639 	}
640 
641 	if (combined_count > max_combined || combined_count < 1)
642 		return -EINVAL;
643 
644 	if (combined_count == oct->num_iqs)
645 		return 0;
646 
647 	ifstate_set(lio, LIO_IFSTATE_RESETTING);
648 
649 	if (netif_running(dev)) {
650 		dev->netdev_ops->ndo_stop(dev);
651 		stopped = 1;
652 	}
653 
654 	if (lio_reset_queues(dev, combined_count))
655 		return -EINVAL;
656 
657 	if (stopped)
658 		dev->netdev_ops->ndo_open(dev);
659 
660 	ifstate_reset(lio, LIO_IFSTATE_RESETTING);
661 
662 	return 0;
663 }
664 
665 static int lio_get_eeprom_len(struct net_device *netdev)
666 {
667 	u8 buf[192];
668 	struct lio *lio = GET_LIO(netdev);
669 	struct octeon_device *oct_dev = lio->oct_dev;
670 	struct octeon_board_info *board_info;
671 	int len;
672 
673 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
674 	len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
675 		      board_info->name, board_info->serial_number,
676 		      board_info->major, board_info->minor);
677 
678 	return len;
679 }
680 
681 static int
682 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
683 	       u8 *bytes)
684 {
685 	struct lio *lio = GET_LIO(netdev);
686 	struct octeon_device *oct_dev = lio->oct_dev;
687 	struct octeon_board_info *board_info;
688 
689 	if (eeprom->offset)
690 		return -EINVAL;
691 
692 	eeprom->magic = oct_dev->pci_dev->vendor;
693 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
694 	sprintf((char *)bytes,
695 		"boardname:%s serialnum:%s maj:%lld min:%lld\n",
696 		board_info->name, board_info->serial_number,
697 		board_info->major, board_info->minor);
698 
699 	return 0;
700 }
701 
702 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
703 {
704 	struct lio *lio = GET_LIO(netdev);
705 	struct octeon_device *oct = lio->oct_dev;
706 	struct octnic_ctrl_pkt nctrl;
707 	int ret = 0;
708 
709 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
710 
711 	nctrl.ncmd.u64 = 0;
712 	nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
713 	nctrl.ncmd.s.param1 = addr;
714 	nctrl.ncmd.s.param2 = val;
715 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
716 	nctrl.netpndev = (u64)netdev;
717 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
718 
719 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
720 	if (ret) {
721 		dev_err(&oct->pci_dev->dev,
722 			"Failed to configure gpio value, ret=%d\n", ret);
723 		return -EINVAL;
724 	}
725 
726 	return 0;
727 }
728 
729 static int octnet_id_active(struct net_device *netdev, int val)
730 {
731 	struct lio *lio = GET_LIO(netdev);
732 	struct octeon_device *oct = lio->oct_dev;
733 	struct octnic_ctrl_pkt nctrl;
734 	int ret = 0;
735 
736 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
737 
738 	nctrl.ncmd.u64 = 0;
739 	nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
740 	nctrl.ncmd.s.param1 = val;
741 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
742 	nctrl.netpndev = (u64)netdev;
743 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
744 
745 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
746 	if (ret) {
747 		dev_err(&oct->pci_dev->dev,
748 			"Failed to configure gpio value, ret=%d\n", ret);
749 		return -EINVAL;
750 	}
751 
752 	return 0;
753 }
754 
755 /* This routine provides PHY access routines for
756  * mdio  clause45 .
757  */
758 static int
759 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
760 {
761 	struct octeon_device *oct_dev = lio->oct_dev;
762 	struct octeon_soft_command *sc;
763 	struct oct_mdio_cmd_resp *mdio_cmd_rsp;
764 	struct oct_mdio_cmd *mdio_cmd;
765 	int retval = 0;
766 
767 	sc = (struct octeon_soft_command *)
768 		octeon_alloc_soft_command(oct_dev,
769 					  sizeof(struct oct_mdio_cmd),
770 					  sizeof(struct oct_mdio_cmd_resp), 0);
771 
772 	if (!sc)
773 		return -ENOMEM;
774 
775 	mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
776 	mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
777 
778 	mdio_cmd->op = op;
779 	mdio_cmd->mdio_addr = loc;
780 	if (op)
781 		mdio_cmd->value1 = *value;
782 	octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
783 
784 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
785 
786 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
787 				    0, 0, 0);
788 
789 	init_completion(&sc->complete);
790 	sc->sc_status = OCTEON_REQUEST_PENDING;
791 
792 	retval = octeon_send_soft_command(oct_dev, sc);
793 	if (retval == IQ_SEND_FAILED) {
794 		dev_err(&oct_dev->pci_dev->dev,
795 			"octnet_mdio45_access instruction failed status: %x\n",
796 			retval);
797 		octeon_free_soft_command(oct_dev, sc);
798 		return -EBUSY;
799 	} else {
800 		/* Sleep on a wait queue till the cond flag indicates that the
801 		 * response arrived
802 		 */
803 		retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
804 		if (retval)
805 			return retval;
806 
807 		retval = mdio_cmd_rsp->status;
808 		if (retval) {
809 			dev_err(&oct_dev->pci_dev->dev,
810 				"octnet mdio45 access failed: %x\n", retval);
811 			WRITE_ONCE(sc->caller_is_done, true);
812 			return -EBUSY;
813 		}
814 
815 		octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
816 				    sizeof(struct oct_mdio_cmd) / 8);
817 
818 		if (!op)
819 			*value = mdio_cmd_rsp->resp.value1;
820 
821 		WRITE_ONCE(sc->caller_is_done, true);
822 	}
823 
824 	return retval;
825 }
826 
827 static int lio_set_phys_id(struct net_device *netdev,
828 			   enum ethtool_phys_id_state state)
829 {
830 	struct lio *lio = GET_LIO(netdev);
831 	struct octeon_device *oct = lio->oct_dev;
832 	struct oct_link_info *linfo;
833 	int value, ret;
834 	u32 cur_ver;
835 
836 	linfo = &lio->linfo;
837 	cur_ver = OCT_FW_VER(oct->fw_info.ver.maj,
838 			     oct->fw_info.ver.min,
839 			     oct->fw_info.ver.rev);
840 
841 	switch (state) {
842 	case ETHTOOL_ID_ACTIVE:
843 		if (oct->chip_id == OCTEON_CN66XX) {
844 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
845 					   VITESSE_PHY_GPIO_DRIVEON);
846 			return 2;
847 
848 		} else if (oct->chip_id == OCTEON_CN68XX) {
849 			/* Save the current LED settings */
850 			ret = octnet_mdio45_access(lio, 0,
851 						   LIO68XX_LED_BEACON_ADDR,
852 						   &lio->phy_beacon_val);
853 			if (ret)
854 				return ret;
855 
856 			ret = octnet_mdio45_access(lio, 0,
857 						   LIO68XX_LED_CTRL_ADDR,
858 						   &lio->led_ctrl_val);
859 			if (ret)
860 				return ret;
861 
862 			/* Configure Beacon values */
863 			value = LIO68XX_LED_BEACON_CFGON;
864 			ret = octnet_mdio45_access(lio, 1,
865 						   LIO68XX_LED_BEACON_ADDR,
866 						   &value);
867 			if (ret)
868 				return ret;
869 
870 			value = LIO68XX_LED_CTRL_CFGON;
871 			ret = octnet_mdio45_access(lio, 1,
872 						   LIO68XX_LED_CTRL_ADDR,
873 						   &value);
874 			if (ret)
875 				return ret;
876 		} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
877 			octnet_id_active(netdev, LED_IDENTIFICATION_ON);
878 			if (linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
879 			    cur_ver > OCT_FW_VER(1, 7, 2))
880 				return 2;
881 			else
882 				return 0;
883 		} else {
884 			return -EINVAL;
885 		}
886 		break;
887 
888 	case ETHTOOL_ID_ON:
889 		if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
890 		    linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
891 		    cur_ver > OCT_FW_VER(1, 7, 2))
892 			octnet_id_active(netdev, LED_IDENTIFICATION_ON);
893 		else if (oct->chip_id == OCTEON_CN66XX)
894 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
895 					   VITESSE_PHY_GPIO_HIGH);
896 		else
897 			return -EINVAL;
898 
899 		break;
900 
901 	case ETHTOOL_ID_OFF:
902 		if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
903 		    linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
904 		    cur_ver > OCT_FW_VER(1, 7, 2))
905 			octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
906 		else if (oct->chip_id == OCTEON_CN66XX)
907 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
908 					   VITESSE_PHY_GPIO_LOW);
909 		else
910 			return -EINVAL;
911 
912 		break;
913 
914 	case ETHTOOL_ID_INACTIVE:
915 		if (oct->chip_id == OCTEON_CN66XX) {
916 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
917 					   VITESSE_PHY_GPIO_DRIVEOFF);
918 		} else if (oct->chip_id == OCTEON_CN68XX) {
919 			/* Restore LED settings */
920 			ret = octnet_mdio45_access(lio, 1,
921 						   LIO68XX_LED_CTRL_ADDR,
922 						   &lio->led_ctrl_val);
923 			if (ret)
924 				return ret;
925 
926 			ret = octnet_mdio45_access(lio, 1,
927 						   LIO68XX_LED_BEACON_ADDR,
928 						   &lio->phy_beacon_val);
929 			if (ret)
930 				return ret;
931 		} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
932 			octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
933 
934 			return 0;
935 		} else {
936 			return -EINVAL;
937 		}
938 		break;
939 
940 	default:
941 		return -EINVAL;
942 	}
943 
944 	return 0;
945 }
946 
947 static void
948 lio_ethtool_get_ringparam(struct net_device *netdev,
949 			  struct ethtool_ringparam *ering)
950 {
951 	struct lio *lio = GET_LIO(netdev);
952 	struct octeon_device *oct = lio->oct_dev;
953 	u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
954 	    rx_pending = 0;
955 
956 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
957 		return;
958 
959 	if (OCTEON_CN6XXX(oct)) {
960 		struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
961 
962 		tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
963 		rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
964 		rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
965 		tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
966 	} else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
967 		tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
968 		rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
969 		rx_pending = oct->droq[0]->max_count;
970 		tx_pending = oct->instr_queue[0]->max_count;
971 	}
972 
973 	ering->tx_pending = tx_pending;
974 	ering->tx_max_pending = tx_max_pending;
975 	ering->rx_pending = rx_pending;
976 	ering->rx_max_pending = rx_max_pending;
977 	ering->rx_mini_pending = 0;
978 	ering->rx_jumbo_pending = 0;
979 	ering->rx_mini_max_pending = 0;
980 	ering->rx_jumbo_max_pending = 0;
981 }
982 
983 static int lio_23xx_reconfigure_queue_count(struct lio *lio)
984 {
985 	struct octeon_device *oct = lio->oct_dev;
986 	u32 resp_size, data_size;
987 	struct liquidio_if_cfg_resp *resp;
988 	struct octeon_soft_command *sc;
989 	union oct_nic_if_cfg if_cfg;
990 	struct lio_version *vdata;
991 	u32 ifidx_or_pfnum;
992 	int retval;
993 	int j;
994 
995 	resp_size = sizeof(struct liquidio_if_cfg_resp);
996 	data_size = sizeof(struct lio_version);
997 	sc = (struct octeon_soft_command *)
998 		octeon_alloc_soft_command(oct, data_size,
999 					  resp_size, 0);
1000 	if (!sc) {
1001 		dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
1002 			__func__);
1003 		return -1;
1004 	}
1005 
1006 	resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1007 	vdata = (struct lio_version *)sc->virtdptr;
1008 
1009 	vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1010 	vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1011 	vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1012 
1013 	ifidx_or_pfnum = oct->pf_num;
1014 
1015 	if_cfg.u64 = 0;
1016 	if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
1017 	if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings;
1018 	if_cfg.s.base_queue = oct->sriov_info.pf_srn;
1019 	if_cfg.s.gmx_port_id = oct->pf_num;
1020 
1021 	sc->iq_no = 0;
1022 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1023 				    OPCODE_NIC_QCOUNT_UPDATE, 0,
1024 				    if_cfg.u64, 0);
1025 
1026 	init_completion(&sc->complete);
1027 	sc->sc_status = OCTEON_REQUEST_PENDING;
1028 
1029 	retval = octeon_send_soft_command(oct, sc);
1030 	if (retval == IQ_SEND_FAILED) {
1031 		dev_err(&oct->pci_dev->dev,
1032 			"Sending iq/oq config failed status: %x\n",
1033 			retval);
1034 		octeon_free_soft_command(oct, sc);
1035 		return -EIO;
1036 	}
1037 
1038 	retval = wait_for_sc_completion_timeout(oct, sc, 0);
1039 	if (retval)
1040 		return retval;
1041 
1042 	retval = resp->status;
1043 	if (retval) {
1044 		dev_err(&oct->pci_dev->dev,
1045 			"iq/oq config failed: %x\n", retval);
1046 		WRITE_ONCE(sc->caller_is_done, true);
1047 		return -1;
1048 	}
1049 
1050 	octeon_swap_8B_data((u64 *)(&resp->cfg_info),
1051 			    (sizeof(struct liquidio_if_cfg_info)) >> 3);
1052 
1053 	lio->ifidx = ifidx_or_pfnum;
1054 	lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask);
1055 	lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask);
1056 	for (j = 0; j < lio->linfo.num_rxpciq; j++) {
1057 		lio->linfo.rxpciq[j].u64 =
1058 			resp->cfg_info.linfo.rxpciq[j].u64;
1059 	}
1060 
1061 	for (j = 0; j < lio->linfo.num_txpciq; j++) {
1062 		lio->linfo.txpciq[j].u64 =
1063 			resp->cfg_info.linfo.txpciq[j].u64;
1064 	}
1065 
1066 	lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1067 	lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1068 	lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
1069 	lio->txq = lio->linfo.txpciq[0].s.q_no;
1070 	lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1071 
1072 	dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
1073 		 lio->linfo.num_rxpciq);
1074 
1075 	WRITE_ONCE(sc->caller_is_done, true);
1076 
1077 	return 0;
1078 }
1079 
1080 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
1081 {
1082 	struct lio *lio = GET_LIO(netdev);
1083 	struct octeon_device *oct = lio->oct_dev;
1084 	int i, queue_count_update = 0;
1085 	struct napi_struct *napi, *n;
1086 	int ret;
1087 
1088 	schedule_timeout_uninterruptible(msecs_to_jiffies(100));
1089 
1090 	if (wait_for_pending_requests(oct))
1091 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1092 
1093 	if (lio_wait_for_instr_fetch(oct))
1094 		dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1095 
1096 	if (octeon_set_io_queues_off(oct)) {
1097 		dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n");
1098 		return -1;
1099 	}
1100 
1101 	/* Disable the input and output queues now. No more packets will
1102 	 * arrive from Octeon.
1103 	 */
1104 	oct->fn_list.disable_io_queues(oct);
1105 	/* Delete NAPI */
1106 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1107 		netif_napi_del(napi);
1108 
1109 	if (num_qs != oct->num_iqs) {
1110 		ret = netif_set_real_num_rx_queues(netdev, num_qs);
1111 		if (ret) {
1112 			dev_err(&oct->pci_dev->dev,
1113 				"Setting real number rx failed\n");
1114 			return ret;
1115 		}
1116 
1117 		ret = netif_set_real_num_tx_queues(netdev, num_qs);
1118 		if (ret) {
1119 			dev_err(&oct->pci_dev->dev,
1120 				"Setting real number tx failed\n");
1121 			return ret;
1122 		}
1123 
1124 		/* The value of queue_count_update decides whether it is the
1125 		 * queue count or the descriptor count that is being
1126 		 * re-configured.
1127 		 */
1128 		queue_count_update = 1;
1129 	}
1130 
1131 	/* Re-configuration of queues can happen in two scenarios, SRIOV enabled
1132 	 * and SRIOV disabled. Few things like recreating queue zero, resetting
1133 	 * glists and IRQs are required for both. For the latter, some more
1134 	 * steps like updating sriov_info for the octeon device need to be done.
1135 	 */
1136 	if (queue_count_update) {
1137 		cleanup_rx_oom_poll_fn(netdev);
1138 
1139 		lio_delete_glists(lio);
1140 
1141 		/* Delete mbox for PF which is SRIOV disabled because sriov_info
1142 		 * will be now changed.
1143 		 */
1144 		if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled)
1145 			oct->fn_list.free_mbox(oct);
1146 	}
1147 
1148 	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1149 		if (!(oct->io_qmask.oq & BIT_ULL(i)))
1150 			continue;
1151 		octeon_delete_droq(oct, i);
1152 	}
1153 
1154 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1155 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
1156 			continue;
1157 		octeon_delete_instr_queue(oct, i);
1158 	}
1159 
1160 	if (queue_count_update) {
1161 		/* For PF re-configure sriov related information */
1162 		if ((OCTEON_CN23XX_PF(oct)) &&
1163 		    !oct->sriov_info.sriov_enabled) {
1164 			oct->sriov_info.num_pf_rings = num_qs;
1165 			if (cn23xx_sriov_config(oct)) {
1166 				dev_err(&oct->pci_dev->dev,
1167 					"Queue reset aborted: SRIOV config failed\n");
1168 				return -1;
1169 			}
1170 
1171 			num_qs = oct->sriov_info.num_pf_rings;
1172 		}
1173 	}
1174 
1175 	if (oct->fn_list.setup_device_regs(oct)) {
1176 		dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
1177 		return -1;
1178 	}
1179 
1180 	/* The following are needed in case of queue count re-configuration and
1181 	 * not for descriptor count re-configuration.
1182 	 */
1183 	if (queue_count_update) {
1184 		if (octeon_setup_instr_queues(oct))
1185 			return -1;
1186 
1187 		if (octeon_setup_output_queues(oct))
1188 			return -1;
1189 
1190 		/* Recreating mbox for PF that is SRIOV disabled */
1191 		if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1192 			if (oct->fn_list.setup_mbox(oct)) {
1193 				dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
1194 				return -1;
1195 			}
1196 		}
1197 
1198 		/* Deleting and recreating IRQs whether the interface is SRIOV
1199 		 * enabled or disabled.
1200 		 */
1201 		if (lio_irq_reallocate_irqs(oct, num_qs)) {
1202 			dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n");
1203 			return -1;
1204 		}
1205 
1206 		/* Enable the input and output queues for this Octeon device */
1207 		if (oct->fn_list.enable_io_queues(oct)) {
1208 			dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n");
1209 			return -1;
1210 		}
1211 
1212 		for (i = 0; i < oct->num_oqs; i++)
1213 			writel(oct->droq[i]->max_count,
1214 			       oct->droq[i]->pkts_credit_reg);
1215 
1216 		/* Informing firmware about the new queue count. It is required
1217 		 * for firmware to allocate more number of queues than those at
1218 		 * load time.
1219 		 */
1220 		if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1221 			if (lio_23xx_reconfigure_queue_count(lio))
1222 				return -1;
1223 		}
1224 	}
1225 
1226 	/* Once firmware is aware of the new value, queues can be recreated */
1227 	if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
1228 		dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n");
1229 		return -1;
1230 	}
1231 
1232 	if (queue_count_update) {
1233 		if (lio_setup_glists(oct, lio, num_qs)) {
1234 			dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n");
1235 			return -1;
1236 		}
1237 
1238 		if (setup_rx_oom_poll_fn(netdev)) {
1239 			dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n");
1240 			return 1;
1241 		}
1242 
1243 		/* Send firmware the information about new number of queues
1244 		 * if the interface is a VF or a PF that is SRIOV enabled.
1245 		 */
1246 		if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct))
1247 			if (lio_send_queue_count_update(netdev, num_qs))
1248 				return -1;
1249 	}
1250 
1251 	return 0;
1252 }
1253 
1254 static int lio_ethtool_set_ringparam(struct net_device *netdev,
1255 				     struct ethtool_ringparam *ering)
1256 {
1257 	u32 rx_count, tx_count, rx_count_old, tx_count_old;
1258 	struct lio *lio = GET_LIO(netdev);
1259 	struct octeon_device *oct = lio->oct_dev;
1260 	int stopped = 0;
1261 
1262 	if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct))
1263 		return -EINVAL;
1264 
1265 	if (ering->rx_mini_pending || ering->rx_jumbo_pending)
1266 		return -EINVAL;
1267 
1268 	rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS,
1269 			   CN23XX_MAX_OQ_DESCRIPTORS);
1270 	tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS,
1271 			   CN23XX_MAX_IQ_DESCRIPTORS);
1272 
1273 	rx_count_old = oct->droq[0]->max_count;
1274 	tx_count_old = oct->instr_queue[0]->max_count;
1275 
1276 	if (rx_count == rx_count_old && tx_count == tx_count_old)
1277 		return 0;
1278 
1279 	ifstate_set(lio, LIO_IFSTATE_RESETTING);
1280 
1281 	if (netif_running(netdev)) {
1282 		netdev->netdev_ops->ndo_stop(netdev);
1283 		stopped = 1;
1284 	}
1285 
1286 	/* Change RX/TX DESCS  count */
1287 	if (tx_count != tx_count_old)
1288 		CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1289 					    tx_count);
1290 	if (rx_count != rx_count_old)
1291 		CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1292 					    rx_count);
1293 
1294 	if (lio_reset_queues(netdev, oct->num_iqs))
1295 		goto err_lio_reset_queues;
1296 
1297 	if (stopped)
1298 		netdev->netdev_ops->ndo_open(netdev);
1299 
1300 	ifstate_reset(lio, LIO_IFSTATE_RESETTING);
1301 
1302 	return 0;
1303 
1304 err_lio_reset_queues:
1305 	if (tx_count != tx_count_old)
1306 		CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1307 					    tx_count_old);
1308 	if (rx_count != rx_count_old)
1309 		CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1310 					    rx_count_old);
1311 	return -EINVAL;
1312 }
1313 
1314 static u32 lio_get_msglevel(struct net_device *netdev)
1315 {
1316 	struct lio *lio = GET_LIO(netdev);
1317 
1318 	return lio->msg_enable;
1319 }
1320 
1321 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
1322 {
1323 	struct lio *lio = GET_LIO(netdev);
1324 
1325 	if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
1326 		if (msglvl & NETIF_MSG_HW)
1327 			liquidio_set_feature(netdev,
1328 					     OCTNET_CMD_VERBOSE_ENABLE, 0);
1329 		else
1330 			liquidio_set_feature(netdev,
1331 					     OCTNET_CMD_VERBOSE_DISABLE, 0);
1332 	}
1333 
1334 	lio->msg_enable = msglvl;
1335 }
1336 
1337 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
1338 {
1339 	struct lio *lio = GET_LIO(netdev);
1340 
1341 	lio->msg_enable = msglvl;
1342 }
1343 
1344 static void
1345 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
1346 {
1347 	/* Notes: Not supporting any auto negotiation in these
1348 	 * drivers. Just report pause frame support.
1349 	 */
1350 	struct lio *lio = GET_LIO(netdev);
1351 	struct octeon_device *oct = lio->oct_dev;
1352 
1353 	pause->autoneg = 0;
1354 
1355 	pause->tx_pause = oct->tx_pause;
1356 	pause->rx_pause = oct->rx_pause;
1357 }
1358 
1359 static int
1360 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
1361 {
1362 	/* Notes: Not supporting any auto negotiation in these
1363 	 * drivers.
1364 	 */
1365 	struct lio *lio = GET_LIO(netdev);
1366 	struct octeon_device *oct = lio->oct_dev;
1367 	struct octnic_ctrl_pkt nctrl;
1368 	struct oct_link_info *linfo = &lio->linfo;
1369 
1370 	int ret = 0;
1371 
1372 	if (oct->chip_id != OCTEON_CN23XX_PF_VID)
1373 		return -EINVAL;
1374 
1375 	if (linfo->link.s.duplex == 0) {
1376 		/*no flow control for half duplex*/
1377 		if (pause->rx_pause || pause->tx_pause)
1378 			return -EINVAL;
1379 	}
1380 
1381 	/*do not support autoneg of link flow control*/
1382 	if (pause->autoneg == AUTONEG_ENABLE)
1383 		return -EINVAL;
1384 
1385 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1386 
1387 	nctrl.ncmd.u64 = 0;
1388 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
1389 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1390 	nctrl.netpndev = (u64)netdev;
1391 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1392 
1393 	if (pause->rx_pause) {
1394 		/*enable rx pause*/
1395 		nctrl.ncmd.s.param1 = 1;
1396 	} else {
1397 		/*disable rx pause*/
1398 		nctrl.ncmd.s.param1 = 0;
1399 	}
1400 
1401 	if (pause->tx_pause) {
1402 		/*enable tx pause*/
1403 		nctrl.ncmd.s.param2 = 1;
1404 	} else {
1405 		/*disable tx pause*/
1406 		nctrl.ncmd.s.param2 = 0;
1407 	}
1408 
1409 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1410 	if (ret) {
1411 		dev_err(&oct->pci_dev->dev,
1412 			"Failed to set pause parameter, ret=%d\n", ret);
1413 		return -EINVAL;
1414 	}
1415 
1416 	oct->rx_pause = pause->rx_pause;
1417 	oct->tx_pause = pause->tx_pause;
1418 
1419 	return 0;
1420 }
1421 
1422 static void
1423 lio_get_ethtool_stats(struct net_device *netdev,
1424 		      struct ethtool_stats *stats  __attribute__((unused)),
1425 		      u64 *data)
1426 {
1427 	struct lio *lio = GET_LIO(netdev);
1428 	struct octeon_device *oct_dev = lio->oct_dev;
1429 	struct rtnl_link_stats64 lstats;
1430 	int i = 0, j;
1431 
1432 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1433 		return;
1434 
1435 	netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1436 	/*sum of oct->droq[oq_no]->stats->rx_pkts_received */
1437 	data[i++] = lstats.rx_packets;
1438 	/*sum of oct->instr_queue[iq_no]->stats.tx_done */
1439 	data[i++] = lstats.tx_packets;
1440 	/*sum of oct->droq[oq_no]->stats->rx_bytes_received */
1441 	data[i++] = lstats.rx_bytes;
1442 	/*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1443 	data[i++] = lstats.tx_bytes;
1444 	data[i++] = lstats.rx_errors +
1445 			oct_dev->link_stats.fromwire.fcs_err +
1446 			oct_dev->link_stats.fromwire.jabber_err +
1447 			oct_dev->link_stats.fromwire.l2_err +
1448 			oct_dev->link_stats.fromwire.frame_err;
1449 	data[i++] = lstats.tx_errors;
1450 	/*sum of oct->droq[oq_no]->stats->rx_dropped +
1451 	 *oct->droq[oq_no]->stats->dropped_nodispatch +
1452 	 *oct->droq[oq_no]->stats->dropped_toomany +
1453 	 *oct->droq[oq_no]->stats->dropped_nomem
1454 	 */
1455 	data[i++] = lstats.rx_dropped +
1456 			oct_dev->link_stats.fromwire.fifo_err +
1457 			oct_dev->link_stats.fromwire.dmac_drop +
1458 			oct_dev->link_stats.fromwire.red_drops +
1459 			oct_dev->link_stats.fromwire.fw_err_pko +
1460 			oct_dev->link_stats.fromwire.fw_err_link +
1461 			oct_dev->link_stats.fromwire.fw_err_drop;
1462 	/*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1463 	data[i++] = lstats.tx_dropped +
1464 			oct_dev->link_stats.fromhost.max_collision_fail +
1465 			oct_dev->link_stats.fromhost.max_deferral_fail +
1466 			oct_dev->link_stats.fromhost.total_collisions +
1467 			oct_dev->link_stats.fromhost.fw_err_pko +
1468 			oct_dev->link_stats.fromhost.fw_err_link +
1469 			oct_dev->link_stats.fromhost.fw_err_drop +
1470 			oct_dev->link_stats.fromhost.fw_err_pki;
1471 
1472 	/* firmware tx stats */
1473 	/*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
1474 	 *fromhost.fw_total_sent
1475 	 */
1476 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
1477 	/*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
1478 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
1479 	/*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
1480 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
1481 	/*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
1482 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki);
1483 	/*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
1484 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
1485 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1486 	 *fw_err_drop
1487 	 */
1488 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
1489 
1490 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
1491 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
1492 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1493 	 *fw_tso_fwd
1494 	 */
1495 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
1496 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1497 	 *fw_err_tso
1498 	 */
1499 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
1500 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1501 	 *fw_tx_vxlan
1502 	 */
1503 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
1504 
1505 	/* Multicast packets sent by this port */
1506 	data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1507 	data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1508 
1509 	/* mac tx statistics */
1510 	/*CVMX_BGXX_CMRX_TX_STAT5 */
1511 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
1512 	/*CVMX_BGXX_CMRX_TX_STAT4 */
1513 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
1514 	/*CVMX_BGXX_CMRX_TX_STAT15 */
1515 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
1516 	/*CVMX_BGXX_CMRX_TX_STAT14 */
1517 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
1518 	/*CVMX_BGXX_CMRX_TX_STAT17 */
1519 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
1520 	/*CVMX_BGXX_CMRX_TX_STAT0 */
1521 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
1522 	/*CVMX_BGXX_CMRX_TX_STAT3 */
1523 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
1524 	/*CVMX_BGXX_CMRX_TX_STAT2 */
1525 	data[i++] =
1526 		CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
1527 	/*CVMX_BGXX_CMRX_TX_STAT0 */
1528 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
1529 	/*CVMX_BGXX_CMRX_TX_STAT1 */
1530 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
1531 	/*CVMX_BGXX_CMRX_TX_STAT16 */
1532 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
1533 	/*CVMX_BGXX_CMRX_TX_STAT6 */
1534 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
1535 
1536 	/* RX firmware stats */
1537 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1538 	 *fw_total_rcvd
1539 	 */
1540 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
1541 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1542 	 *fw_total_fwd
1543 	 */
1544 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
1545 	/* Multicast packets received on this port */
1546 	data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1547 	data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1548 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
1549 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
1550 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
1551 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
1552 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
1553 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
1554 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1555 	 *fw_err_pko
1556 	 */
1557 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
1558 	/*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
1559 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
1560 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1561 	 *fromwire.fw_err_drop
1562 	 */
1563 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
1564 
1565 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1566 	 *fromwire.fw_rx_vxlan
1567 	 */
1568 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
1569 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1570 	 *fromwire.fw_rx_vxlan_err
1571 	 */
1572 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
1573 
1574 	/* LRO */
1575 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1576 	 *fw_lro_pkts
1577 	 */
1578 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
1579 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1580 	 *fw_lro_octs
1581 	 */
1582 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
1583 	/*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
1584 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
1585 	/*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1586 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
1587 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1588 	 *fw_lro_aborts_port
1589 	 */
1590 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
1591 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1592 	 *fw_lro_aborts_seq
1593 	 */
1594 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
1595 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1596 	 *fw_lro_aborts_tsval
1597 	 */
1598 	data[i++] =
1599 		CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
1600 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1601 	 *fw_lro_aborts_timer
1602 	 */
1603 	/* intrmod: packet forward rate */
1604 	data[i++] =
1605 		CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
1606 	/*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1607 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
1608 
1609 	/* mac: link-level stats */
1610 	/*CVMX_BGXX_CMRX_RX_STAT0 */
1611 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
1612 	/*CVMX_BGXX_CMRX_RX_STAT1 */
1613 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
1614 	/*CVMX_PKI_STATX_STAT5 */
1615 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
1616 	/*CVMX_PKI_STATX_STAT5 */
1617 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
1618 	/*wqe->word2.err_code or wqe->word2.err_level */
1619 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
1620 	/*CVMX_BGXX_CMRX_RX_STAT2 */
1621 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
1622 	/*CVMX_BGXX_CMRX_RX_STAT6 */
1623 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
1624 	/*CVMX_BGXX_CMRX_RX_STAT4 */
1625 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
1626 	/*wqe->word2.err_code or wqe->word2.err_level */
1627 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
1628 	/*lio->link_changes*/
1629 	data[i++] = CVM_CAST64(lio->link_changes);
1630 
1631 	for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
1632 		if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
1633 			continue;
1634 		/*packets to network port*/
1635 		/*# of packets tx to network */
1636 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1637 		/*# of bytes tx to network */
1638 		data[i++] =
1639 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1640 		/*# of packets dropped */
1641 		data[i++] =
1642 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
1643 		/*# of tx fails due to queue full */
1644 		data[i++] =
1645 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
1646 		/*XXX gather entries sent */
1647 		data[i++] =
1648 			CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
1649 
1650 		/*instruction to firmware: data and control */
1651 		/*# of instructions to the queue */
1652 		data[i++] =
1653 			CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
1654 		/*# of instructions processed */
1655 		data[i++] = CVM_CAST64(
1656 				oct_dev->instr_queue[j]->stats.instr_processed);
1657 		/*# of instructions could not be processed */
1658 		data[i++] = CVM_CAST64(
1659 				oct_dev->instr_queue[j]->stats.instr_dropped);
1660 		/*bytes sent through the queue */
1661 		data[i++] =
1662 			CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
1663 
1664 		/*tso request*/
1665 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1666 		/*vxlan request*/
1667 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1668 		/*txq restart*/
1669 		data[i++] =
1670 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
1671 	}
1672 
1673 	/* RX */
1674 	for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
1675 		if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
1676 			continue;
1677 
1678 		/*packets send to TCP/IP network stack */
1679 		/*# of packets to network stack */
1680 		data[i++] =
1681 			CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
1682 		/*# of bytes to network stack */
1683 		data[i++] =
1684 			CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
1685 		/*# of packets dropped */
1686 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1687 				       oct_dev->droq[j]->stats.dropped_toomany +
1688 				       oct_dev->droq[j]->stats.rx_dropped);
1689 		data[i++] =
1690 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1691 		data[i++] =
1692 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1693 		data[i++] =
1694 			CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1695 
1696 		/*control and data path*/
1697 		data[i++] =
1698 			CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1699 		data[i++] =
1700 			CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1701 		data[i++] =
1702 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1703 
1704 		data[i++] =
1705 			CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1706 		data[i++] =
1707 			CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1708 	}
1709 }
1710 
1711 static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1712 				     struct ethtool_stats *stats
1713 				     __attribute__((unused)),
1714 				     u64 *data)
1715 {
1716 	struct rtnl_link_stats64 lstats;
1717 	struct lio *lio = GET_LIO(netdev);
1718 	struct octeon_device *oct_dev = lio->oct_dev;
1719 	int i = 0, j, vj;
1720 
1721 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1722 		return;
1723 
1724 	netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1725 	/* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1726 	data[i++] = lstats.rx_packets;
1727 	/* sum of oct->instr_queue[iq_no]->stats.tx_done */
1728 	data[i++] = lstats.tx_packets;
1729 	/* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1730 	data[i++] = lstats.rx_bytes;
1731 	/* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1732 	data[i++] = lstats.tx_bytes;
1733 	data[i++] = lstats.rx_errors;
1734 	data[i++] = lstats.tx_errors;
1735 	 /* sum of oct->droq[oq_no]->stats->rx_dropped +
1736 	  * oct->droq[oq_no]->stats->dropped_nodispatch +
1737 	  * oct->droq[oq_no]->stats->dropped_toomany +
1738 	  * oct->droq[oq_no]->stats->dropped_nomem
1739 	  */
1740 	data[i++] = lstats.rx_dropped;
1741 	/* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1742 	data[i++] = lstats.tx_dropped +
1743 		oct_dev->link_stats.fromhost.fw_err_drop;
1744 
1745 	data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1746 	data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1747 	data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1748 	data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1749 
1750 	/* lio->link_changes */
1751 	data[i++] = CVM_CAST64(lio->link_changes);
1752 
1753 	for (vj = 0; vj < oct_dev->num_iqs; vj++) {
1754 		j = lio->linfo.txpciq[vj].s.q_no;
1755 
1756 		/* packets to network port */
1757 		/* # of packets tx to network */
1758 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1759 		 /* # of bytes tx to network */
1760 		data[i++] = CVM_CAST64(
1761 				oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1762 		/* # of packets dropped */
1763 		data[i++] = CVM_CAST64(
1764 				oct_dev->instr_queue[j]->stats.tx_dropped);
1765 		/* # of tx fails due to queue full */
1766 		data[i++] = CVM_CAST64(
1767 				oct_dev->instr_queue[j]->stats.tx_iq_busy);
1768 		/* XXX gather entries sent */
1769 		data[i++] = CVM_CAST64(
1770 				oct_dev->instr_queue[j]->stats.sgentry_sent);
1771 
1772 		/* instruction to firmware: data and control */
1773 		/* # of instructions to the queue */
1774 		data[i++] = CVM_CAST64(
1775 				oct_dev->instr_queue[j]->stats.instr_posted);
1776 		/* # of instructions processed */
1777 		data[i++] =
1778 		    CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
1779 		/* # of instructions could not be processed */
1780 		data[i++] =
1781 		    CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
1782 		/* bytes sent through the queue */
1783 		data[i++] = CVM_CAST64(
1784 				oct_dev->instr_queue[j]->stats.bytes_sent);
1785 		/* tso request */
1786 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1787 		/* vxlan request */
1788 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1789 		/* txq restart */
1790 		data[i++] = CVM_CAST64(
1791 				oct_dev->instr_queue[j]->stats.tx_restart);
1792 	}
1793 
1794 	/* RX */
1795 	for (vj = 0; vj < oct_dev->num_oqs; vj++) {
1796 		j = lio->linfo.rxpciq[vj].s.q_no;
1797 
1798 		/* packets send to TCP/IP network stack */
1799 		/* # of packets to network stack */
1800 		data[i++] = CVM_CAST64(
1801 				oct_dev->droq[j]->stats.rx_pkts_received);
1802 		/* # of bytes to network stack */
1803 		data[i++] = CVM_CAST64(
1804 				oct_dev->droq[j]->stats.rx_bytes_received);
1805 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1806 				       oct_dev->droq[j]->stats.dropped_toomany +
1807 				       oct_dev->droq[j]->stats.rx_dropped);
1808 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1809 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1810 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1811 
1812 		/* control and data path */
1813 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1814 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1815 		data[i++] =
1816 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1817 
1818 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1819 		data[i++] =
1820 		    CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1821 	}
1822 }
1823 
1824 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
1825 {
1826 	struct octeon_device *oct_dev = lio->oct_dev;
1827 	int i;
1828 
1829 	switch (oct_dev->chip_id) {
1830 	case OCTEON_CN23XX_PF_VID:
1831 	case OCTEON_CN23XX_VF_VID:
1832 		for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
1833 			sprintf(data, "%s", oct_priv_flags_strings[i]);
1834 			data += ETH_GSTRING_LEN;
1835 		}
1836 		break;
1837 	case OCTEON_CN68XX:
1838 	case OCTEON_CN66XX:
1839 		break;
1840 	default:
1841 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1842 		break;
1843 	}
1844 }
1845 
1846 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1847 {
1848 	struct lio *lio = GET_LIO(netdev);
1849 	struct octeon_device *oct_dev = lio->oct_dev;
1850 	int num_iq_stats, num_oq_stats, i, j;
1851 	int num_stats;
1852 
1853 	switch (stringset) {
1854 	case ETH_SS_STATS:
1855 		num_stats = ARRAY_SIZE(oct_stats_strings);
1856 		for (j = 0; j < num_stats; j++) {
1857 			sprintf(data, "%s", oct_stats_strings[j]);
1858 			data += ETH_GSTRING_LEN;
1859 		}
1860 
1861 		num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1862 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1863 			if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1864 				continue;
1865 			for (j = 0; j < num_iq_stats; j++) {
1866 				sprintf(data, "tx-%d-%s", i,
1867 					oct_iq_stats_strings[j]);
1868 				data += ETH_GSTRING_LEN;
1869 			}
1870 		}
1871 
1872 		num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1873 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1874 			if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1875 				continue;
1876 			for (j = 0; j < num_oq_stats; j++) {
1877 				sprintf(data, "rx-%d-%s", i,
1878 					oct_droq_stats_strings[j]);
1879 				data += ETH_GSTRING_LEN;
1880 			}
1881 		}
1882 		break;
1883 
1884 	case ETH_SS_PRIV_FLAGS:
1885 		lio_get_priv_flags_strings(lio, data);
1886 		break;
1887 	default:
1888 		netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1889 		break;
1890 	}
1891 }
1892 
1893 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
1894 			       u8 *data)
1895 {
1896 	int num_iq_stats, num_oq_stats, i, j;
1897 	struct lio *lio = GET_LIO(netdev);
1898 	struct octeon_device *oct_dev = lio->oct_dev;
1899 	int num_stats;
1900 
1901 	switch (stringset) {
1902 	case ETH_SS_STATS:
1903 		num_stats = ARRAY_SIZE(oct_vf_stats_strings);
1904 		for (j = 0; j < num_stats; j++) {
1905 			sprintf(data, "%s", oct_vf_stats_strings[j]);
1906 			data += ETH_GSTRING_LEN;
1907 		}
1908 
1909 		num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1910 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1911 			if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1912 				continue;
1913 			for (j = 0; j < num_iq_stats; j++) {
1914 				sprintf(data, "tx-%d-%s", i,
1915 					oct_iq_stats_strings[j]);
1916 				data += ETH_GSTRING_LEN;
1917 			}
1918 		}
1919 
1920 		num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1921 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1922 			if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1923 				continue;
1924 			for (j = 0; j < num_oq_stats; j++) {
1925 				sprintf(data, "rx-%d-%s", i,
1926 					oct_droq_stats_strings[j]);
1927 				data += ETH_GSTRING_LEN;
1928 			}
1929 		}
1930 		break;
1931 
1932 	case ETH_SS_PRIV_FLAGS:
1933 		lio_get_priv_flags_strings(lio, data);
1934 		break;
1935 	default:
1936 		netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1937 		break;
1938 	}
1939 }
1940 
1941 static int lio_get_priv_flags_ss_count(struct lio *lio)
1942 {
1943 	struct octeon_device *oct_dev = lio->oct_dev;
1944 
1945 	switch (oct_dev->chip_id) {
1946 	case OCTEON_CN23XX_PF_VID:
1947 	case OCTEON_CN23XX_VF_VID:
1948 		return ARRAY_SIZE(oct_priv_flags_strings);
1949 	case OCTEON_CN68XX:
1950 	case OCTEON_CN66XX:
1951 		return -EOPNOTSUPP;
1952 	default:
1953 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1954 		return -EOPNOTSUPP;
1955 	}
1956 }
1957 
1958 static int lio_get_sset_count(struct net_device *netdev, int sset)
1959 {
1960 	struct lio *lio = GET_LIO(netdev);
1961 	struct octeon_device *oct_dev = lio->oct_dev;
1962 
1963 	switch (sset) {
1964 	case ETH_SS_STATS:
1965 		return (ARRAY_SIZE(oct_stats_strings) +
1966 			ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1967 			ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1968 	case ETH_SS_PRIV_FLAGS:
1969 		return lio_get_priv_flags_ss_count(lio);
1970 	default:
1971 		return -EOPNOTSUPP;
1972 	}
1973 }
1974 
1975 static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
1976 {
1977 	struct lio *lio = GET_LIO(netdev);
1978 	struct octeon_device *oct_dev = lio->oct_dev;
1979 
1980 	switch (sset) {
1981 	case ETH_SS_STATS:
1982 		return (ARRAY_SIZE(oct_vf_stats_strings) +
1983 			ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1984 			ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1985 	case ETH_SS_PRIV_FLAGS:
1986 		return lio_get_priv_flags_ss_count(lio);
1987 	default:
1988 		return -EOPNOTSUPP;
1989 	}
1990 }
1991 
1992 /*  get interrupt moderation parameters */
1993 static int octnet_get_intrmod_cfg(struct lio *lio,
1994 				  struct oct_intrmod_cfg *intr_cfg)
1995 {
1996 	struct octeon_soft_command *sc;
1997 	struct oct_intrmod_resp *resp;
1998 	int retval;
1999 	struct octeon_device *oct_dev = lio->oct_dev;
2000 
2001 	/* Alloc soft command */
2002 	sc = (struct octeon_soft_command *)
2003 		octeon_alloc_soft_command(oct_dev,
2004 					  0,
2005 					  sizeof(struct oct_intrmod_resp), 0);
2006 
2007 	if (!sc)
2008 		return -ENOMEM;
2009 
2010 	resp = (struct oct_intrmod_resp *)sc->virtrptr;
2011 	memset(resp, 0, sizeof(struct oct_intrmod_resp));
2012 
2013 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2014 
2015 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
2016 				    OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
2017 
2018 	init_completion(&sc->complete);
2019 	sc->sc_status = OCTEON_REQUEST_PENDING;
2020 
2021 	retval = octeon_send_soft_command(oct_dev, sc);
2022 	if (retval == IQ_SEND_FAILED) {
2023 		octeon_free_soft_command(oct_dev, sc);
2024 		return -EINVAL;
2025 	}
2026 
2027 	/* Sleep on a wait queue till the cond flag indicates that the
2028 	 * response arrived or timed-out.
2029 	 */
2030 	retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2031 	if (retval)
2032 		return -ENODEV;
2033 
2034 	if (resp->status) {
2035 		dev_err(&oct_dev->pci_dev->dev,
2036 			"Get interrupt moderation parameters failed\n");
2037 		WRITE_ONCE(sc->caller_is_done, true);
2038 		return -ENODEV;
2039 	}
2040 
2041 	octeon_swap_8B_data((u64 *)&resp->intrmod,
2042 			    (sizeof(struct oct_intrmod_cfg)) / 8);
2043 	memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
2044 	WRITE_ONCE(sc->caller_is_done, true);
2045 
2046 	return 0;
2047 }
2048 
2049 /*  Configure interrupt moderation parameters */
2050 static int octnet_set_intrmod_cfg(struct lio *lio,
2051 				  struct oct_intrmod_cfg *intr_cfg)
2052 {
2053 	struct octeon_soft_command *sc;
2054 	struct oct_intrmod_cfg *cfg;
2055 	int retval;
2056 	struct octeon_device *oct_dev = lio->oct_dev;
2057 
2058 	/* Alloc soft command */
2059 	sc = (struct octeon_soft_command *)
2060 		octeon_alloc_soft_command(oct_dev,
2061 					  sizeof(struct oct_intrmod_cfg),
2062 					  16, 0);
2063 
2064 	if (!sc)
2065 		return -ENOMEM;
2066 
2067 	cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
2068 
2069 	memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
2070 	octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
2071 
2072 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2073 
2074 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
2075 				    OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
2076 
2077 	init_completion(&sc->complete);
2078 	sc->sc_status = OCTEON_REQUEST_PENDING;
2079 
2080 	retval = octeon_send_soft_command(oct_dev, sc);
2081 	if (retval == IQ_SEND_FAILED) {
2082 		octeon_free_soft_command(oct_dev, sc);
2083 		return -EINVAL;
2084 	}
2085 
2086 	/* Sleep on a wait queue till the cond flag indicates that the
2087 	 * response arrived or timed-out.
2088 	 */
2089 	retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2090 	if (retval)
2091 		return retval;
2092 
2093 	retval = sc->sc_status;
2094 	if (retval == 0) {
2095 		dev_info(&oct_dev->pci_dev->dev,
2096 			 "Rx-Adaptive Interrupt moderation %s\n",
2097 			 (intr_cfg->rx_enable) ?
2098 			 "enabled" : "disabled");
2099 		WRITE_ONCE(sc->caller_is_done, true);
2100 		return 0;
2101 	}
2102 
2103 	dev_err(&oct_dev->pci_dev->dev,
2104 		"intrmod config failed. Status: %x\n", retval);
2105 	WRITE_ONCE(sc->caller_is_done, true);
2106 	return -ENODEV;
2107 }
2108 
2109 static int lio_get_intr_coalesce(struct net_device *netdev,
2110 				 struct ethtool_coalesce *intr_coal)
2111 {
2112 	struct lio *lio = GET_LIO(netdev);
2113 	struct octeon_device *oct = lio->oct_dev;
2114 	struct octeon_instr_queue *iq;
2115 	struct oct_intrmod_cfg intrmod_cfg;
2116 
2117 	if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
2118 		return -ENODEV;
2119 
2120 	switch (oct->chip_id) {
2121 	case OCTEON_CN23XX_PF_VID:
2122 	case OCTEON_CN23XX_VF_VID: {
2123 		if (!intrmod_cfg.rx_enable) {
2124 			intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
2125 			intr_coal->rx_max_coalesced_frames =
2126 				oct->rx_max_coalesced_frames;
2127 		}
2128 		if (!intrmod_cfg.tx_enable)
2129 			intr_coal->tx_max_coalesced_frames =
2130 				oct->tx_max_coalesced_frames;
2131 		break;
2132 	}
2133 	case OCTEON_CN68XX:
2134 	case OCTEON_CN66XX: {
2135 		struct octeon_cn6xxx *cn6xxx =
2136 			(struct octeon_cn6xxx *)oct->chip;
2137 
2138 		if (!intrmod_cfg.rx_enable) {
2139 			intr_coal->rx_coalesce_usecs =
2140 				CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
2141 			intr_coal->rx_max_coalesced_frames =
2142 				CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
2143 		}
2144 		iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
2145 		intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
2146 		break;
2147 	}
2148 	default:
2149 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
2150 		return -EINVAL;
2151 	}
2152 	if (intrmod_cfg.rx_enable) {
2153 		intr_coal->use_adaptive_rx_coalesce =
2154 			intrmod_cfg.rx_enable;
2155 		intr_coal->rate_sample_interval =
2156 			intrmod_cfg.check_intrvl;
2157 		intr_coal->pkt_rate_high =
2158 			intrmod_cfg.maxpkt_ratethr;
2159 		intr_coal->pkt_rate_low =
2160 			intrmod_cfg.minpkt_ratethr;
2161 		intr_coal->rx_max_coalesced_frames_high =
2162 			intrmod_cfg.rx_maxcnt_trigger;
2163 		intr_coal->rx_coalesce_usecs_high =
2164 			intrmod_cfg.rx_maxtmr_trigger;
2165 		intr_coal->rx_coalesce_usecs_low =
2166 			intrmod_cfg.rx_mintmr_trigger;
2167 		intr_coal->rx_max_coalesced_frames_low =
2168 			intrmod_cfg.rx_mincnt_trigger;
2169 	}
2170 	if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
2171 	    (intrmod_cfg.tx_enable)) {
2172 		intr_coal->use_adaptive_tx_coalesce =
2173 			intrmod_cfg.tx_enable;
2174 		intr_coal->tx_max_coalesced_frames_high =
2175 			intrmod_cfg.tx_maxcnt_trigger;
2176 		intr_coal->tx_max_coalesced_frames_low =
2177 			intrmod_cfg.tx_mincnt_trigger;
2178 	}
2179 	return 0;
2180 }
2181 
2182 /* Enable/Disable auto interrupt Moderation */
2183 static int oct_cfg_adaptive_intr(struct lio *lio,
2184 				 struct oct_intrmod_cfg *intrmod_cfg,
2185 				 struct ethtool_coalesce *intr_coal)
2186 {
2187 	int ret = 0;
2188 
2189 	if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
2190 		intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
2191 		intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
2192 		intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
2193 	}
2194 	if (intrmod_cfg->rx_enable) {
2195 		intrmod_cfg->rx_maxcnt_trigger =
2196 			intr_coal->rx_max_coalesced_frames_high;
2197 		intrmod_cfg->rx_maxtmr_trigger =
2198 			intr_coal->rx_coalesce_usecs_high;
2199 		intrmod_cfg->rx_mintmr_trigger =
2200 			intr_coal->rx_coalesce_usecs_low;
2201 		intrmod_cfg->rx_mincnt_trigger =
2202 			intr_coal->rx_max_coalesced_frames_low;
2203 	}
2204 	if (intrmod_cfg->tx_enable) {
2205 		intrmod_cfg->tx_maxcnt_trigger =
2206 			intr_coal->tx_max_coalesced_frames_high;
2207 		intrmod_cfg->tx_mincnt_trigger =
2208 			intr_coal->tx_max_coalesced_frames_low;
2209 	}
2210 
2211 	ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
2212 
2213 	return ret;
2214 }
2215 
2216 static int
2217 oct_cfg_rx_intrcnt(struct lio *lio,
2218 		   struct oct_intrmod_cfg *intrmod,
2219 		   struct ethtool_coalesce *intr_coal)
2220 {
2221 	struct octeon_device *oct = lio->oct_dev;
2222 	u32 rx_max_coalesced_frames;
2223 
2224 	/* Config Cnt based interrupt values */
2225 	switch (oct->chip_id) {
2226 	case OCTEON_CN68XX:
2227 	case OCTEON_CN66XX: {
2228 		struct octeon_cn6xxx *cn6xxx =
2229 			(struct octeon_cn6xxx *)oct->chip;
2230 
2231 		if (!intr_coal->rx_max_coalesced_frames)
2232 			rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
2233 		else
2234 			rx_max_coalesced_frames =
2235 				intr_coal->rx_max_coalesced_frames;
2236 		octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
2237 				 rx_max_coalesced_frames);
2238 		CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
2239 		break;
2240 	}
2241 	case OCTEON_CN23XX_PF_VID: {
2242 		int q_no;
2243 
2244 		if (!intr_coal->rx_max_coalesced_frames)
2245 			rx_max_coalesced_frames = intrmod->rx_frames;
2246 		else
2247 			rx_max_coalesced_frames =
2248 			    intr_coal->rx_max_coalesced_frames;
2249 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2250 			q_no += oct->sriov_info.pf_srn;
2251 			octeon_write_csr64(
2252 			    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2253 			    (octeon_read_csr64(
2254 				 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2255 			     (0x3fffff00000000UL)) |
2256 				(rx_max_coalesced_frames - 1));
2257 			/*consider setting resend bit*/
2258 		}
2259 		intrmod->rx_frames = rx_max_coalesced_frames;
2260 		oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2261 		break;
2262 	}
2263 	case OCTEON_CN23XX_VF_VID: {
2264 		int q_no;
2265 
2266 		if (!intr_coal->rx_max_coalesced_frames)
2267 			rx_max_coalesced_frames = intrmod->rx_frames;
2268 		else
2269 			rx_max_coalesced_frames =
2270 			    intr_coal->rx_max_coalesced_frames;
2271 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2272 			octeon_write_csr64(
2273 			    oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2274 			    (octeon_read_csr64(
2275 				 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2276 			     (0x3fffff00000000UL)) |
2277 				(rx_max_coalesced_frames - 1));
2278 			/*consider writing to resend bit here*/
2279 		}
2280 		intrmod->rx_frames = rx_max_coalesced_frames;
2281 		oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2282 		break;
2283 	}
2284 	default:
2285 		return -EINVAL;
2286 	}
2287 	return 0;
2288 }
2289 
2290 static int oct_cfg_rx_intrtime(struct lio *lio,
2291 			       struct oct_intrmod_cfg *intrmod,
2292 			       struct ethtool_coalesce *intr_coal)
2293 {
2294 	struct octeon_device *oct = lio->oct_dev;
2295 	u32 time_threshold, rx_coalesce_usecs;
2296 
2297 	/* Config Time based interrupt values */
2298 	switch (oct->chip_id) {
2299 	case OCTEON_CN68XX:
2300 	case OCTEON_CN66XX: {
2301 		struct octeon_cn6xxx *cn6xxx =
2302 			(struct octeon_cn6xxx *)oct->chip;
2303 		if (!intr_coal->rx_coalesce_usecs)
2304 			rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
2305 		else
2306 			rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2307 
2308 		time_threshold = lio_cn6xxx_get_oq_ticks(oct,
2309 							 rx_coalesce_usecs);
2310 		octeon_write_csr(oct,
2311 				 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
2312 				 time_threshold);
2313 
2314 		CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
2315 		break;
2316 	}
2317 	case OCTEON_CN23XX_PF_VID: {
2318 		u64 time_threshold;
2319 		int q_no;
2320 
2321 		if (!intr_coal->rx_coalesce_usecs)
2322 			rx_coalesce_usecs = intrmod->rx_usecs;
2323 		else
2324 			rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2325 		time_threshold =
2326 		    cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2327 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2328 			q_no += oct->sriov_info.pf_srn;
2329 			octeon_write_csr64(oct,
2330 					   CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2331 					   (intrmod->rx_frames |
2332 					    ((u64)time_threshold << 32)));
2333 			/*consider writing to resend bit here*/
2334 		}
2335 		intrmod->rx_usecs = rx_coalesce_usecs;
2336 		oct->rx_coalesce_usecs = rx_coalesce_usecs;
2337 		break;
2338 	}
2339 	case OCTEON_CN23XX_VF_VID: {
2340 		u64 time_threshold;
2341 		int q_no;
2342 
2343 		if (!intr_coal->rx_coalesce_usecs)
2344 			rx_coalesce_usecs = intrmod->rx_usecs;
2345 		else
2346 			rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2347 
2348 		time_threshold =
2349 		    cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2350 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2351 			octeon_write_csr64(
2352 				oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2353 				(intrmod->rx_frames |
2354 				 ((u64)time_threshold << 32)));
2355 			/*consider setting resend bit*/
2356 		}
2357 		intrmod->rx_usecs = rx_coalesce_usecs;
2358 		oct->rx_coalesce_usecs = rx_coalesce_usecs;
2359 		break;
2360 	}
2361 	default:
2362 		return -EINVAL;
2363 	}
2364 
2365 	return 0;
2366 }
2367 
2368 static int
2369 oct_cfg_tx_intrcnt(struct lio *lio,
2370 		   struct oct_intrmod_cfg *intrmod,
2371 		   struct ethtool_coalesce *intr_coal)
2372 {
2373 	struct octeon_device *oct = lio->oct_dev;
2374 	u32 iq_intr_pkt;
2375 	void __iomem *inst_cnt_reg;
2376 	u64 val;
2377 
2378 	/* Config Cnt based interrupt values */
2379 	switch (oct->chip_id) {
2380 	case OCTEON_CN68XX:
2381 	case OCTEON_CN66XX:
2382 		break;
2383 	case OCTEON_CN23XX_VF_VID:
2384 	case OCTEON_CN23XX_PF_VID: {
2385 		int q_no;
2386 
2387 		if (!intr_coal->tx_max_coalesced_frames)
2388 			iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
2389 				      CN23XX_PKT_IN_DONE_WMARK_MASK;
2390 		else
2391 			iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
2392 				      CN23XX_PKT_IN_DONE_WMARK_MASK;
2393 		for (q_no = 0; q_no < oct->num_iqs; q_no++) {
2394 			inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
2395 			val = readq(inst_cnt_reg);
2396 			/*clear wmark and count.dont want to write count back*/
2397 			val = (val & 0xFFFF000000000000ULL) |
2398 			      ((u64)(iq_intr_pkt - 1)
2399 			       << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
2400 			writeq(val, inst_cnt_reg);
2401 			/*consider setting resend bit*/
2402 		}
2403 		intrmod->tx_frames = iq_intr_pkt;
2404 		oct->tx_max_coalesced_frames = iq_intr_pkt;
2405 		break;
2406 	}
2407 	default:
2408 		return -EINVAL;
2409 	}
2410 	return 0;
2411 }
2412 
2413 static int lio_set_intr_coalesce(struct net_device *netdev,
2414 				 struct ethtool_coalesce *intr_coal)
2415 {
2416 	struct lio *lio = GET_LIO(netdev);
2417 	int ret;
2418 	struct octeon_device *oct = lio->oct_dev;
2419 	struct oct_intrmod_cfg intrmod = {0};
2420 	u32 j, q_no;
2421 	int db_max, db_min;
2422 
2423 	switch (oct->chip_id) {
2424 	case OCTEON_CN68XX:
2425 	case OCTEON_CN66XX:
2426 		db_min = CN6XXX_DB_MIN;
2427 		db_max = CN6XXX_DB_MAX;
2428 		if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
2429 		    (intr_coal->tx_max_coalesced_frames <= db_max)) {
2430 			for (j = 0; j < lio->linfo.num_txpciq; j++) {
2431 				q_no = lio->linfo.txpciq[j].s.q_no;
2432 				oct->instr_queue[q_no]->fill_threshold =
2433 					intr_coal->tx_max_coalesced_frames;
2434 			}
2435 		} else {
2436 			dev_err(&oct->pci_dev->dev,
2437 				"LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
2438 				intr_coal->tx_max_coalesced_frames,
2439 				db_min, db_max);
2440 			return -EINVAL;
2441 		}
2442 		break;
2443 	case OCTEON_CN23XX_PF_VID:
2444 	case OCTEON_CN23XX_VF_VID:
2445 		break;
2446 	default:
2447 		return -EINVAL;
2448 	}
2449 
2450 	intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
2451 	intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
2452 	intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2453 	intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2454 	intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2455 
2456 	ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
2457 
2458 	if (!intr_coal->use_adaptive_rx_coalesce) {
2459 		ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
2460 		if (ret)
2461 			goto ret_intrmod;
2462 
2463 		ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
2464 		if (ret)
2465 			goto ret_intrmod;
2466 	} else {
2467 		oct->rx_coalesce_usecs =
2468 			CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2469 		oct->rx_max_coalesced_frames =
2470 			CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2471 	}
2472 
2473 	if (!intr_coal->use_adaptive_tx_coalesce) {
2474 		ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
2475 		if (ret)
2476 			goto ret_intrmod;
2477 	} else {
2478 		oct->tx_max_coalesced_frames =
2479 			CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2480 	}
2481 
2482 	return 0;
2483 ret_intrmod:
2484 	return ret;
2485 }
2486 
2487 static int lio_get_ts_info(struct net_device *netdev,
2488 			   struct ethtool_ts_info *info)
2489 {
2490 	struct lio *lio = GET_LIO(netdev);
2491 
2492 	info->so_timestamping =
2493 #ifdef PTP_HARDWARE_TIMESTAMPING
2494 		SOF_TIMESTAMPING_TX_HARDWARE |
2495 		SOF_TIMESTAMPING_RX_HARDWARE |
2496 		SOF_TIMESTAMPING_RAW_HARDWARE |
2497 		SOF_TIMESTAMPING_TX_SOFTWARE |
2498 #endif
2499 		SOF_TIMESTAMPING_RX_SOFTWARE |
2500 		SOF_TIMESTAMPING_SOFTWARE;
2501 
2502 	if (lio->ptp_clock)
2503 		info->phc_index = ptp_clock_index(lio->ptp_clock);
2504 	else
2505 		info->phc_index = -1;
2506 
2507 #ifdef PTP_HARDWARE_TIMESTAMPING
2508 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
2509 
2510 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2511 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2512 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2513 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
2514 #endif
2515 
2516 	return 0;
2517 }
2518 
2519 /* Return register dump len. */
2520 static int lio_get_regs_len(struct net_device *dev)
2521 {
2522 	struct lio *lio = GET_LIO(dev);
2523 	struct octeon_device *oct = lio->oct_dev;
2524 
2525 	switch (oct->chip_id) {
2526 	case OCTEON_CN23XX_PF_VID:
2527 		return OCT_ETHTOOL_REGDUMP_LEN_23XX;
2528 	case OCTEON_CN23XX_VF_VID:
2529 		return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
2530 	default:
2531 		return OCT_ETHTOOL_REGDUMP_LEN;
2532 	}
2533 }
2534 
2535 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
2536 {
2537 	u32 reg;
2538 	u8 pf_num = oct->pf_num;
2539 	int len = 0;
2540 	int i;
2541 
2542 	/* PCI  Window Registers */
2543 
2544 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2545 
2546 	/*0x29030 or 0x29040*/
2547 	reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
2548 	len += sprintf(s + len,
2549 		       "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2550 		       reg, oct->pcie_port, oct->pf_num,
2551 		       (u64)octeon_read_csr64(oct, reg));
2552 
2553 	/*0x27080 or 0x27090*/
2554 	reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
2555 	len +=
2556 	    sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2557 		    reg, oct->pcie_port, oct->pf_num,
2558 		    (u64)octeon_read_csr64(oct, reg));
2559 
2560 	/*0x27000 or 0x27010*/
2561 	reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
2562 	len +=
2563 	    sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2564 		    reg, oct->pcie_port, oct->pf_num,
2565 		    (u64)octeon_read_csr64(oct, reg));
2566 
2567 	/*0x29120*/
2568 	reg = 0x29120;
2569 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
2570 		       (u64)octeon_read_csr64(oct, reg));
2571 
2572 	/*0x27300*/
2573 	reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2574 	      (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2575 	len += sprintf(
2576 	    s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
2577 	    oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
2578 
2579 	/*0x27200*/
2580 	reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2581 	      (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2582 	len += sprintf(s + len,
2583 		       "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2584 		       reg, oct->pcie_port, oct->pf_num,
2585 		       (u64)octeon_read_csr64(oct, reg));
2586 
2587 	/*29130*/
2588 	reg = CN23XX_SLI_PKT_CNT_INT;
2589 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
2590 		       (u64)octeon_read_csr64(oct, reg));
2591 
2592 	/*0x29140*/
2593 	reg = CN23XX_SLI_PKT_TIME_INT;
2594 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
2595 		       (u64)octeon_read_csr64(oct, reg));
2596 
2597 	/*0x29160*/
2598 	reg = 0x29160;
2599 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
2600 		       (u64)octeon_read_csr64(oct, reg));
2601 
2602 	/*0x29180*/
2603 	reg = CN23XX_SLI_OQ_WMARK;
2604 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2605 		       reg, (u64)octeon_read_csr64(oct, reg));
2606 
2607 	/*0x291E0*/
2608 	reg = CN23XX_SLI_PKT_IOQ_RING_RST;
2609 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
2610 		       (u64)octeon_read_csr64(oct, reg));
2611 
2612 	/*0x29210*/
2613 	reg = CN23XX_SLI_GBL_CONTROL;
2614 	len += sprintf(s + len,
2615 		       "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
2616 		       (u64)octeon_read_csr64(oct, reg));
2617 
2618 	/*0x29220*/
2619 	reg = 0x29220;
2620 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2621 		       reg, (u64)octeon_read_csr64(oct, reg));
2622 
2623 	/*PF only*/
2624 	if (pf_num == 0) {
2625 		/*0x29260*/
2626 		reg = CN23XX_SLI_OUT_BP_EN_W1S;
2627 		len += sprintf(s + len,
2628 			       "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S):  %016llx\n",
2629 			       reg, (u64)octeon_read_csr64(oct, reg));
2630 	} else if (pf_num == 1) {
2631 		/*0x29270*/
2632 		reg = CN23XX_SLI_OUT_BP_EN2_W1S;
2633 		len += sprintf(s + len,
2634 			       "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2635 			       reg, (u64)octeon_read_csr64(oct, reg));
2636 	}
2637 
2638 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2639 		reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
2640 		len +=
2641 		    sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2642 			    reg, i, (u64)octeon_read_csr64(oct, reg));
2643 	}
2644 
2645 	/*0x10040*/
2646 	for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2647 		reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2648 		len += sprintf(s + len,
2649 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2650 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2651 	}
2652 
2653 	/*0x10080*/
2654 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2655 		reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
2656 		len += sprintf(s + len,
2657 			       "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2658 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2659 	}
2660 
2661 	/*0x10090*/
2662 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2663 		reg = CN23XX_SLI_OQ_SIZE(i);
2664 		len += sprintf(
2665 		    s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2666 		    reg, i, (u64)octeon_read_csr64(oct, reg));
2667 	}
2668 
2669 	/*0x10050*/
2670 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2671 		reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
2672 		len += sprintf(
2673 			s + len,
2674 			"\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2675 			reg, i, (u64)octeon_read_csr64(oct, reg));
2676 	}
2677 
2678 	/*0x10070*/
2679 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2680 		reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
2681 		len += sprintf(s + len,
2682 			       "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2683 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2684 	}
2685 
2686 	/*0x100a0*/
2687 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2688 		reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
2689 		len += sprintf(s + len,
2690 			       "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2691 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2692 	}
2693 
2694 	/*0x100b0*/
2695 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2696 		reg = CN23XX_SLI_OQ_PKTS_SENT(i);
2697 		len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2698 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2699 	}
2700 
2701 	/*0x100c0*/
2702 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2703 		reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
2704 		len += sprintf(s + len,
2705 			       "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2706 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2707 
2708 		/*0x10000*/
2709 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2710 			reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
2711 			len += sprintf(
2712 				s + len,
2713 				"\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2714 				reg, i, (u64)octeon_read_csr64(oct, reg));
2715 		}
2716 
2717 		/*0x10010*/
2718 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2719 			reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
2720 			len += sprintf(
2721 			    s + len,
2722 			    "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
2723 			    i, (u64)octeon_read_csr64(oct, reg));
2724 		}
2725 
2726 		/*0x10020*/
2727 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2728 			reg = CN23XX_SLI_IQ_DOORBELL(i);
2729 			len += sprintf(
2730 			    s + len,
2731 			    "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2732 			    reg, i, (u64)octeon_read_csr64(oct, reg));
2733 		}
2734 
2735 		/*0x10030*/
2736 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2737 			reg = CN23XX_SLI_IQ_SIZE(i);
2738 			len += sprintf(
2739 			    s + len,
2740 			    "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2741 			    reg, i, (u64)octeon_read_csr64(oct, reg));
2742 		}
2743 
2744 		/*0x10040*/
2745 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2746 			reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2747 		len += sprintf(s + len,
2748 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2749 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2750 	}
2751 
2752 	return len;
2753 }
2754 
2755 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
2756 {
2757 	int len = 0;
2758 	u32 reg;
2759 	int i;
2760 
2761 	/* PCI  Window Registers */
2762 
2763 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2764 
2765 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2766 		reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
2767 		len += sprintf(s + len,
2768 			       "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2769 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2770 	}
2771 
2772 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2773 		reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2774 		len += sprintf(s + len,
2775 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2776 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2777 	}
2778 
2779 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2780 		reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
2781 		len += sprintf(s + len,
2782 			       "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2783 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2784 	}
2785 
2786 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2787 		reg = CN23XX_VF_SLI_OQ_SIZE(i);
2788 		len += sprintf(s + len,
2789 			       "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2790 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2791 	}
2792 
2793 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2794 		reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
2795 		len += sprintf(s + len,
2796 			       "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2797 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2798 	}
2799 
2800 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2801 		reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
2802 		len += sprintf(s + len,
2803 			       "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2804 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2805 	}
2806 
2807 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2808 		reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
2809 		len += sprintf(s + len,
2810 			       "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2811 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2812 	}
2813 
2814 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2815 		reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
2816 		len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2817 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2818 	}
2819 
2820 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2821 		reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
2822 		len += sprintf(s + len,
2823 			       "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2824 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2825 	}
2826 
2827 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2828 		reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
2829 		len += sprintf(s + len,
2830 			       "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2831 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2832 	}
2833 
2834 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2835 		reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
2836 		len += sprintf(s + len,
2837 			       "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2838 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2839 	}
2840 
2841 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2842 		reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
2843 		len += sprintf(s + len,
2844 			       "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2845 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2846 	}
2847 
2848 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2849 		reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
2850 		len += sprintf(s + len,
2851 			       "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2852 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2853 	}
2854 
2855 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2856 		reg = CN23XX_VF_SLI_IQ_SIZE(i);
2857 		len += sprintf(s + len,
2858 			       "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2859 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2860 	}
2861 
2862 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2863 		reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2864 		len += sprintf(s + len,
2865 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2866 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2867 	}
2868 
2869 	return len;
2870 }
2871 
2872 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2873 {
2874 	u32 reg;
2875 	int i, len = 0;
2876 
2877 	/* PCI  Window Registers */
2878 
2879 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2880 	reg = CN6XXX_WIN_WR_ADDR_LO;
2881 	len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2882 		       CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2883 	reg = CN6XXX_WIN_WR_ADDR_HI;
2884 	len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2885 		       CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2886 	reg = CN6XXX_WIN_RD_ADDR_LO;
2887 	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2888 		       CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2889 	reg = CN6XXX_WIN_RD_ADDR_HI;
2890 	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2891 		       CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2892 	reg = CN6XXX_WIN_WR_DATA_LO;
2893 	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2894 		       CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2895 	reg = CN6XXX_WIN_WR_DATA_HI;
2896 	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2897 		       CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2898 	len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2899 		       CN6XXX_WIN_WR_MASK_REG,
2900 		       octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2901 
2902 	/* PCI  Interrupt Register */
2903 	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2904 		       CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2905 						CN6XXX_SLI_INT_ENB64_PORT0));
2906 	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2907 		       CN6XXX_SLI_INT_ENB64_PORT1,
2908 		       octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2909 	len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2910 		       octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2911 
2912 	/* PCI  Output queue registers */
2913 	for (i = 0; i < oct->num_oqs; i++) {
2914 		reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2915 		len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2916 			       reg, i, octeon_read_csr(oct, reg));
2917 		reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2918 		len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2919 			       reg, i, octeon_read_csr(oct, reg));
2920 	}
2921 	reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2922 	len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2923 		       reg, octeon_read_csr(oct, reg));
2924 	reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2925 	len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2926 		       reg, octeon_read_csr(oct, reg));
2927 
2928 	/* PCI  Input queue registers */
2929 	for (i = 0; i <= 3; i++) {
2930 		u32 reg;
2931 
2932 		reg = CN6XXX_SLI_IQ_DOORBELL(i);
2933 		len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2934 			       reg, i, octeon_read_csr(oct, reg));
2935 		reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2936 		len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2937 			       reg, i, octeon_read_csr(oct, reg));
2938 	}
2939 
2940 	/* PCI  DMA registers */
2941 
2942 	len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2943 		       CN6XXX_DMA_CNT(0),
2944 		       octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2945 	reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2946 	len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2947 		       CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2948 	reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2949 	len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2950 		       CN6XXX_DMA_TIME_INT_LEVEL(0),
2951 		       octeon_read_csr(oct, reg));
2952 
2953 	len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2954 		       CN6XXX_DMA_CNT(1),
2955 		       octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2956 	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2957 	len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2958 		       CN6XXX_DMA_PKT_INT_LEVEL(1),
2959 		       octeon_read_csr(oct, reg));
2960 	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2961 	len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2962 		       CN6XXX_DMA_TIME_INT_LEVEL(1),
2963 		       octeon_read_csr(oct, reg));
2964 
2965 	/* PCI  Index registers */
2966 
2967 	len += sprintf(s + len, "\n");
2968 
2969 	for (i = 0; i < 16; i++) {
2970 		reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2971 		len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2972 			       CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2973 	}
2974 
2975 	return len;
2976 }
2977 
2978 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2979 {
2980 	u32 val;
2981 	int i, len = 0;
2982 
2983 	/* PCI CONFIG Registers */
2984 
2985 	len += sprintf(s + len,
2986 		       "\n\t Octeon Config space Registers\n\n");
2987 
2988 	for (i = 0; i <= 13; i++) {
2989 		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2990 		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2991 			       (i * 4), i, val);
2992 	}
2993 
2994 	for (i = 30; i <= 34; i++) {
2995 		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2996 		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2997 			       (i * 4), i, val);
2998 	}
2999 
3000 	return len;
3001 }
3002 
3003 /*  Return register dump user app.  */
3004 static void lio_get_regs(struct net_device *dev,
3005 			 struct ethtool_regs *regs, void *regbuf)
3006 {
3007 	struct lio *lio = GET_LIO(dev);
3008 	int len = 0;
3009 	struct octeon_device *oct = lio->oct_dev;
3010 
3011 	regs->version = OCT_ETHTOOL_REGSVER;
3012 
3013 	switch (oct->chip_id) {
3014 	case OCTEON_CN23XX_PF_VID:
3015 		memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
3016 		len += cn23xx_read_csr_reg(regbuf + len, oct);
3017 		break;
3018 	case OCTEON_CN23XX_VF_VID:
3019 		memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
3020 		len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
3021 		break;
3022 	case OCTEON_CN68XX:
3023 	case OCTEON_CN66XX:
3024 		memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
3025 		len += cn6xxx_read_csr_reg(regbuf + len, oct);
3026 		len += cn6xxx_read_config_reg(regbuf + len, oct);
3027 		break;
3028 	default:
3029 		dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
3030 			__func__, oct->chip_id);
3031 	}
3032 }
3033 
3034 static u32 lio_get_priv_flags(struct net_device *netdev)
3035 {
3036 	struct lio *lio = GET_LIO(netdev);
3037 
3038 	return lio->oct_dev->priv_flags;
3039 }
3040 
3041 static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
3042 {
3043 	struct lio *lio = GET_LIO(netdev);
3044 	bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
3045 
3046 	lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
3047 			  intr_by_tx_bytes);
3048 	return 0;
3049 }
3050 
3051 static int lio_get_fecparam(struct net_device *netdev,
3052 			    struct ethtool_fecparam *fec)
3053 {
3054 	struct lio *lio = GET_LIO(netdev);
3055 	struct octeon_device *oct = lio->oct_dev;
3056 
3057 	fec->active_fec = ETHTOOL_FEC_NONE;
3058 	fec->fec = ETHTOOL_FEC_NONE;
3059 
3060 	if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
3061 	    oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
3062 		if (oct->no_speed_setting == 1)
3063 			return 0;
3064 
3065 		liquidio_get_fec(lio);
3066 		fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF);
3067 		if (oct->props[lio->ifidx].fec == 1)
3068 			fec->active_fec = ETHTOOL_FEC_RS;
3069 		else
3070 			fec->active_fec = ETHTOOL_FEC_OFF;
3071 	}
3072 
3073 	return 0;
3074 }
3075 
3076 static int lio_set_fecparam(struct net_device *netdev,
3077 			    struct ethtool_fecparam *fec)
3078 {
3079 	struct lio *lio = GET_LIO(netdev);
3080 	struct octeon_device *oct = lio->oct_dev;
3081 
3082 	if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
3083 	    oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
3084 		if (oct->no_speed_setting == 1)
3085 			return -EOPNOTSUPP;
3086 
3087 		if (fec->fec & ETHTOOL_FEC_OFF)
3088 			liquidio_set_fec(lio, 0);
3089 		else if (fec->fec & ETHTOOL_FEC_RS)
3090 			liquidio_set_fec(lio, 1);
3091 		else
3092 			return -EOPNOTSUPP;
3093 	} else {
3094 		return -EOPNOTSUPP;
3095 	}
3096 
3097 	return 0;
3098 }
3099 
3100 #define LIO_ETHTOOL_COALESCE	(ETHTOOL_COALESCE_RX_USECS |		\
3101 				 ETHTOOL_COALESCE_MAX_FRAMES |		\
3102 				 ETHTOOL_COALESCE_USE_ADAPTIVE |	\
3103 				 ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW |	\
3104 				 ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW |	\
3105 				 ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH |	\
3106 				 ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH |	\
3107 				 ETHTOOL_COALESCE_PKT_RATE_RX_USECS)
3108 
3109 static const struct ethtool_ops lio_ethtool_ops = {
3110 	.supported_coalesce_params = LIO_ETHTOOL_COALESCE,
3111 	.get_link_ksettings	= lio_get_link_ksettings,
3112 	.set_link_ksettings	= lio_set_link_ksettings,
3113 	.get_fecparam		= lio_get_fecparam,
3114 	.set_fecparam		= lio_set_fecparam,
3115 	.get_link		= ethtool_op_get_link,
3116 	.get_drvinfo		= lio_get_drvinfo,
3117 	.get_ringparam		= lio_ethtool_get_ringparam,
3118 	.set_ringparam		= lio_ethtool_set_ringparam,
3119 	.get_channels		= lio_ethtool_get_channels,
3120 	.set_channels		= lio_ethtool_set_channels,
3121 	.set_phys_id		= lio_set_phys_id,
3122 	.get_eeprom_len		= lio_get_eeprom_len,
3123 	.get_eeprom		= lio_get_eeprom,
3124 	.get_strings		= lio_get_strings,
3125 	.get_ethtool_stats	= lio_get_ethtool_stats,
3126 	.get_pauseparam		= lio_get_pauseparam,
3127 	.set_pauseparam		= lio_set_pauseparam,
3128 	.get_regs_len		= lio_get_regs_len,
3129 	.get_regs		= lio_get_regs,
3130 	.get_msglevel		= lio_get_msglevel,
3131 	.set_msglevel		= lio_set_msglevel,
3132 	.get_sset_count		= lio_get_sset_count,
3133 	.get_coalesce		= lio_get_intr_coalesce,
3134 	.set_coalesce		= lio_set_intr_coalesce,
3135 	.get_priv_flags		= lio_get_priv_flags,
3136 	.set_priv_flags		= lio_set_priv_flags,
3137 	.get_ts_info		= lio_get_ts_info,
3138 };
3139 
3140 static const struct ethtool_ops lio_vf_ethtool_ops = {
3141 	.supported_coalesce_params = LIO_ETHTOOL_COALESCE,
3142 	.get_link_ksettings	= lio_get_link_ksettings,
3143 	.get_link		= ethtool_op_get_link,
3144 	.get_drvinfo		= lio_get_vf_drvinfo,
3145 	.get_ringparam		= lio_ethtool_get_ringparam,
3146 	.set_ringparam          = lio_ethtool_set_ringparam,
3147 	.get_channels		= lio_ethtool_get_channels,
3148 	.set_channels		= lio_ethtool_set_channels,
3149 	.get_strings		= lio_vf_get_strings,
3150 	.get_ethtool_stats	= lio_vf_get_ethtool_stats,
3151 	.get_regs_len		= lio_get_regs_len,
3152 	.get_regs		= lio_get_regs,
3153 	.get_msglevel		= lio_get_msglevel,
3154 	.set_msglevel		= lio_vf_set_msglevel,
3155 	.get_sset_count		= lio_vf_get_sset_count,
3156 	.get_coalesce		= lio_get_intr_coalesce,
3157 	.set_coalesce		= lio_set_intr_coalesce,
3158 	.get_priv_flags		= lio_get_priv_flags,
3159 	.set_priv_flags		= lio_set_priv_flags,
3160 	.get_ts_info		= lio_get_ts_info,
3161 };
3162 
3163 void liquidio_set_ethtool_ops(struct net_device *netdev)
3164 {
3165 	struct lio *lio = GET_LIO(netdev);
3166 	struct octeon_device *oct = lio->oct_dev;
3167 
3168 	if (OCTEON_CN23XX_VF(oct))
3169 		netdev->ethtool_ops = &lio_vf_ethtool_ops;
3170 	else
3171 		netdev->ethtool_ops = &lio_ethtool_ops;
3172 }
3173