xref: /linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c (revision f6f3bac08ff9855d803081a353a1fafaa8845739)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/netdevice.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/pci.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_nic.h"
27 #include "octeon_main.h"
28 #include "octeon_network.h"
29 #include "cn66xx_regs.h"
30 #include "cn66xx_device.h"
31 #include "cn23xx_pf_device.h"
32 #include "cn23xx_vf_device.h"
33 
34 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
35 
36 struct oct_intrmod_resp {
37 	u64     rh;
38 	struct oct_intrmod_cfg intrmod;
39 	u64     status;
40 };
41 
42 struct oct_mdio_cmd_resp {
43 	u64 rh;
44 	struct oct_mdio_cmd resp;
45 	u64 status;
46 };
47 
48 #define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
49 
50 /* Octeon's interface mode of operation */
51 enum {
52 	INTERFACE_MODE_DISABLED,
53 	INTERFACE_MODE_RGMII,
54 	INTERFACE_MODE_GMII,
55 	INTERFACE_MODE_SPI,
56 	INTERFACE_MODE_PCIE,
57 	INTERFACE_MODE_XAUI,
58 	INTERFACE_MODE_SGMII,
59 	INTERFACE_MODE_PICMG,
60 	INTERFACE_MODE_NPI,
61 	INTERFACE_MODE_LOOP,
62 	INTERFACE_MODE_SRIO,
63 	INTERFACE_MODE_ILK,
64 	INTERFACE_MODE_RXAUI,
65 	INTERFACE_MODE_QSGMII,
66 	INTERFACE_MODE_AGL,
67 	INTERFACE_MODE_XLAUI,
68 	INTERFACE_MODE_XFI,
69 	INTERFACE_MODE_10G_KR,
70 	INTERFACE_MODE_40G_KR4,
71 	INTERFACE_MODE_MIXED,
72 };
73 
74 #define OCT_ETHTOOL_REGDUMP_LEN  4096
75 #define OCT_ETHTOOL_REGDUMP_LEN_23XX  (4096 * 11)
76 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF  (4096 * 2)
77 #define OCT_ETHTOOL_REGSVER  1
78 
79 /* statistics of PF */
80 static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
81 	"rx_packets",
82 	"tx_packets",
83 	"rx_bytes",
84 	"tx_bytes",
85 	"rx_errors",
86 	"tx_errors",
87 	"rx_dropped",
88 	"tx_dropped",
89 
90 	"tx_total_sent",
91 	"tx_total_fwd",
92 	"tx_err_pko",
93 	"tx_err_pki",
94 	"tx_err_link",
95 	"tx_err_drop",
96 
97 	"tx_tso",
98 	"tx_tso_packets",
99 	"tx_tso_err",
100 	"tx_vxlan",
101 
102 	"tx_mcast",
103 	"tx_bcast",
104 
105 	"mac_tx_total_pkts",
106 	"mac_tx_total_bytes",
107 	"mac_tx_mcast_pkts",
108 	"mac_tx_bcast_pkts",
109 	"mac_tx_ctl_packets",
110 	"mac_tx_total_collisions",
111 	"mac_tx_one_collision",
112 	"mac_tx_multi_collision",
113 	"mac_tx_max_collision_fail",
114 	"mac_tx_max_deferal_fail",
115 	"mac_tx_fifo_err",
116 	"mac_tx_runts",
117 
118 	"rx_total_rcvd",
119 	"rx_total_fwd",
120 	"rx_mcast",
121 	"rx_bcast",
122 	"rx_jabber_err",
123 	"rx_l2_err",
124 	"rx_frame_err",
125 	"rx_err_pko",
126 	"rx_err_link",
127 	"rx_err_drop",
128 
129 	"rx_vxlan",
130 	"rx_vxlan_err",
131 
132 	"rx_lro_pkts",
133 	"rx_lro_bytes",
134 	"rx_total_lro",
135 
136 	"rx_lro_aborts",
137 	"rx_lro_aborts_port",
138 	"rx_lro_aborts_seq",
139 	"rx_lro_aborts_tsval",
140 	"rx_lro_aborts_timer",
141 	"rx_fwd_rate",
142 
143 	"mac_rx_total_rcvd",
144 	"mac_rx_bytes",
145 	"mac_rx_total_bcst",
146 	"mac_rx_total_mcst",
147 	"mac_rx_runts",
148 	"mac_rx_ctl_packets",
149 	"mac_rx_fifo_err",
150 	"mac_rx_dma_drop",
151 	"mac_rx_fcs_err",
152 
153 	"link_state_changes",
154 };
155 
156 /* statistics of VF */
157 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
158 	"rx_packets",
159 	"tx_packets",
160 	"rx_bytes",
161 	"tx_bytes",
162 	"rx_errors",
163 	"tx_errors",
164 	"rx_dropped",
165 	"tx_dropped",
166 	"rx_mcast",
167 	"tx_mcast",
168 	"rx_bcast",
169 	"tx_bcast",
170 	"link_state_changes",
171 };
172 
173 /* statistics of host tx queue */
174 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
175 	"packets",
176 	"bytes",
177 	"dropped",
178 	"iq_busy",
179 	"sgentry_sent",
180 
181 	"fw_instr_posted",
182 	"fw_instr_processed",
183 	"fw_instr_dropped",
184 	"fw_bytes_sent",
185 
186 	"tso",
187 	"vxlan",
188 	"txq_restart",
189 };
190 
191 /* statistics of host rx queue */
192 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
193 	"packets",
194 	"bytes",
195 	"dropped",
196 	"dropped_nomem",
197 	"dropped_toomany",
198 	"fw_dropped",
199 	"fw_pkts_received",
200 	"fw_bytes_received",
201 	"fw_dropped_nodispatch",
202 
203 	"vxlan",
204 	"buffer_alloc_failure",
205 };
206 
207 /* LiquidIO driver private flags */
208 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
209 };
210 
211 #define OCTNIC_NCMD_AUTONEG_ON  0x1
212 #define OCTNIC_NCMD_PHY_ON      0x2
213 
214 static int lio_get_link_ksettings(struct net_device *netdev,
215 				  struct ethtool_link_ksettings *ecmd)
216 {
217 	struct lio *lio = GET_LIO(netdev);
218 	struct octeon_device *oct = lio->oct_dev;
219 	struct oct_link_info *linfo;
220 
221 	linfo = &lio->linfo;
222 
223 	ethtool_link_ksettings_zero_link_mode(ecmd, supported);
224 	ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
225 
226 	switch (linfo->link.s.phy_type) {
227 	case LIO_PHY_PORT_TP:
228 		ecmd->base.port = PORT_TP;
229 		ecmd->base.autoneg = AUTONEG_DISABLE;
230 		ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
231 		ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
232 		ethtool_link_ksettings_add_link_mode(ecmd, supported,
233 						     10000baseT_Full);
234 
235 		ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
236 		ethtool_link_ksettings_add_link_mode(ecmd, advertising,
237 						     10000baseT_Full);
238 
239 		break;
240 
241 	case LIO_PHY_PORT_FIBRE:
242 		if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
243 		    linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
244 		    linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
245 		    linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
246 			dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
247 		} else {
248 			dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n",
249 				linfo->link.s.if_mode);
250 		}
251 
252 		ecmd->base.port = PORT_FIBRE;
253 		ecmd->base.autoneg = AUTONEG_DISABLE;
254 		ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
255 
256 		ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
257 		ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
258 		if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
259 		    oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
260 			if (OCTEON_CN23XX_PF(oct)) {
261 				ethtool_link_ksettings_add_link_mode
262 					(ecmd, supported, 25000baseSR_Full);
263 				ethtool_link_ksettings_add_link_mode
264 					(ecmd, supported, 25000baseKR_Full);
265 				ethtool_link_ksettings_add_link_mode
266 					(ecmd, supported, 25000baseCR_Full);
267 
268 				if (oct->no_speed_setting == 0)  {
269 					ethtool_link_ksettings_add_link_mode
270 						(ecmd, supported,
271 						 10000baseSR_Full);
272 					ethtool_link_ksettings_add_link_mode
273 						(ecmd, supported,
274 						 10000baseKR_Full);
275 					ethtool_link_ksettings_add_link_mode
276 						(ecmd, supported,
277 						 10000baseCR_Full);
278 				}
279 
280 				if (oct->no_speed_setting == 0)
281 					liquidio_get_speed(lio);
282 				else
283 					oct->speed_setting = 25;
284 
285 				if (oct->speed_setting == 10) {
286 					ethtool_link_ksettings_add_link_mode
287 						(ecmd, advertising,
288 						 10000baseSR_Full);
289 					ethtool_link_ksettings_add_link_mode
290 						(ecmd, advertising,
291 						 10000baseKR_Full);
292 					ethtool_link_ksettings_add_link_mode
293 						(ecmd, advertising,
294 						 10000baseCR_Full);
295 				}
296 				if (oct->speed_setting == 25) {
297 					ethtool_link_ksettings_add_link_mode
298 						(ecmd, advertising,
299 						 25000baseSR_Full);
300 					ethtool_link_ksettings_add_link_mode
301 						(ecmd, advertising,
302 						 25000baseKR_Full);
303 					ethtool_link_ksettings_add_link_mode
304 						(ecmd, advertising,
305 						 25000baseCR_Full);
306 				}
307 			} else { /* VF */
308 				if (linfo->link.s.speed == 10000) {
309 					ethtool_link_ksettings_add_link_mode
310 						(ecmd, supported,
311 						 10000baseSR_Full);
312 					ethtool_link_ksettings_add_link_mode
313 						(ecmd, supported,
314 						 10000baseKR_Full);
315 					ethtool_link_ksettings_add_link_mode
316 						(ecmd, supported,
317 						 10000baseCR_Full);
318 
319 					ethtool_link_ksettings_add_link_mode
320 						(ecmd, advertising,
321 						 10000baseSR_Full);
322 					ethtool_link_ksettings_add_link_mode
323 						(ecmd, advertising,
324 						 10000baseKR_Full);
325 					ethtool_link_ksettings_add_link_mode
326 						(ecmd, advertising,
327 						 10000baseCR_Full);
328 				}
329 
330 				if (linfo->link.s.speed == 25000) {
331 					ethtool_link_ksettings_add_link_mode
332 						(ecmd, supported,
333 						 25000baseSR_Full);
334 					ethtool_link_ksettings_add_link_mode
335 						(ecmd, supported,
336 						 25000baseKR_Full);
337 					ethtool_link_ksettings_add_link_mode
338 						(ecmd, supported,
339 						 25000baseCR_Full);
340 
341 					ethtool_link_ksettings_add_link_mode
342 						(ecmd, advertising,
343 						 25000baseSR_Full);
344 					ethtool_link_ksettings_add_link_mode
345 						(ecmd, advertising,
346 						 25000baseKR_Full);
347 					ethtool_link_ksettings_add_link_mode
348 						(ecmd, advertising,
349 						 25000baseCR_Full);
350 				}
351 			}
352 		} else {
353 			ethtool_link_ksettings_add_link_mode(ecmd, supported,
354 							     10000baseT_Full);
355 			ethtool_link_ksettings_add_link_mode(ecmd, advertising,
356 							     10000baseT_Full);
357 		}
358 		break;
359 	}
360 
361 	if (linfo->link.s.link_up) {
362 		ecmd->base.speed = linfo->link.s.speed;
363 		ecmd->base.duplex = linfo->link.s.duplex;
364 	} else {
365 		ecmd->base.speed = SPEED_UNKNOWN;
366 		ecmd->base.duplex = DUPLEX_UNKNOWN;
367 	}
368 
369 	return 0;
370 }
371 
372 static int lio_set_link_ksettings(struct net_device *netdev,
373 				  const struct ethtool_link_ksettings *ecmd)
374 {
375 	const int speed = ecmd->base.speed;
376 	struct lio *lio = GET_LIO(netdev);
377 	struct oct_link_info *linfo;
378 	struct octeon_device *oct;
379 
380 	oct = lio->oct_dev;
381 
382 	linfo = &lio->linfo;
383 
384 	if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
385 	      oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID))
386 		return -EOPNOTSUPP;
387 
388 	if (oct->no_speed_setting) {
389 		dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
390 			__func__);
391 		return -EOPNOTSUPP;
392 	}
393 
394 	if ((ecmd->base.duplex != DUPLEX_UNKNOWN &&
395 	     ecmd->base.duplex != linfo->link.s.duplex) ||
396 	     ecmd->base.autoneg != AUTONEG_DISABLE ||
397 	    (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 &&
398 	     ecmd->base.speed != SPEED_UNKNOWN))
399 		return -EOPNOTSUPP;
400 
401 	if ((oct->speed_boot == speed / 1000) &&
402 	    oct->speed_boot == oct->speed_setting)
403 		return 0;
404 
405 	liquidio_set_speed(lio, speed / 1000);
406 
407 	dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n",
408 		oct->speed_setting);
409 
410 	return 0;
411 }
412 
413 static void
414 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
415 {
416 	struct lio *lio;
417 	struct octeon_device *oct;
418 
419 	lio = GET_LIO(netdev);
420 	oct = lio->oct_dev;
421 
422 	memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
423 	strcpy(drvinfo->driver, "liquidio");
424 	strcpy(drvinfo->version, LIQUIDIO_VERSION);
425 	strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
426 		ETHTOOL_FWVERS_LEN);
427 	strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
428 }
429 
430 static void
431 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
432 {
433 	struct octeon_device *oct;
434 	struct lio *lio;
435 
436 	lio = GET_LIO(netdev);
437 	oct = lio->oct_dev;
438 
439 	memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
440 	strcpy(drvinfo->driver, "liquidio_vf");
441 	strcpy(drvinfo->version, LIQUIDIO_VERSION);
442 	strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
443 		ETHTOOL_FWVERS_LEN);
444 	strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
445 }
446 
447 static int
448 lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues)
449 {
450 	struct lio *lio = GET_LIO(netdev);
451 	struct octeon_device *oct = lio->oct_dev;
452 	struct octnic_ctrl_pkt nctrl;
453 	int ret = 0;
454 
455 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
456 
457 	nctrl.ncmd.u64 = 0;
458 	nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL;
459 	nctrl.ncmd.s.param1 = num_queues;
460 	nctrl.ncmd.s.param2 = num_queues;
461 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
462 	nctrl.netpndev = (u64)netdev;
463 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
464 
465 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
466 	if (ret) {
467 		dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
468 			ret);
469 		return -1;
470 	}
471 
472 	return 0;
473 }
474 
475 static void
476 lio_ethtool_get_channels(struct net_device *dev,
477 			 struct ethtool_channels *channel)
478 {
479 	struct lio *lio = GET_LIO(dev);
480 	struct octeon_device *oct = lio->oct_dev;
481 	u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
482 	u32 combined_count = 0, max_combined = 0;
483 
484 	if (OCTEON_CN6XXX(oct)) {
485 		struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
486 
487 		max_rx = CFG_GET_OQ_MAX_Q(conf6x);
488 		max_tx = CFG_GET_IQ_MAX_Q(conf6x);
489 		rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
490 		tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
491 	} else if (OCTEON_CN23XX_PF(oct)) {
492 		if (oct->sriov_info.sriov_enabled) {
493 			max_combined = lio->linfo.num_txpciq;
494 		} else {
495 			struct octeon_config *conf23_pf =
496 				CHIP_CONF(oct, cn23xx_pf);
497 
498 			max_combined = CFG_GET_IQ_MAX_Q(conf23_pf);
499 		}
500 		combined_count = oct->num_iqs;
501 	} else if (OCTEON_CN23XX_VF(oct)) {
502 		u64 reg_val = 0ULL;
503 		u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
504 
505 		reg_val = octeon_read_csr64(oct, ctrl);
506 		reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
507 		max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
508 		combined_count = oct->num_iqs;
509 	}
510 
511 	channel->max_rx = max_rx;
512 	channel->max_tx = max_tx;
513 	channel->max_combined = max_combined;
514 	channel->rx_count = rx_count;
515 	channel->tx_count = tx_count;
516 	channel->combined_count = combined_count;
517 }
518 
519 static int
520 lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
521 {
522 	struct msix_entry *msix_entries;
523 	int num_msix_irqs = 0;
524 	int i;
525 
526 	if (!oct->msix_on)
527 		return 0;
528 
529 	/* Disable the input and output queues now. No more packets will
530 	 * arrive from Octeon.
531 	 */
532 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
533 
534 	if (oct->msix_on) {
535 		if (OCTEON_CN23XX_PF(oct))
536 			num_msix_irqs = oct->num_msix_irqs - 1;
537 		else if (OCTEON_CN23XX_VF(oct))
538 			num_msix_irqs = oct->num_msix_irqs;
539 
540 		msix_entries = (struct msix_entry *)oct->msix_entries;
541 		for (i = 0; i < num_msix_irqs; i++) {
542 			if (oct->ioq_vector[i].vector) {
543 				/* clear the affinity_cpumask */
544 				irq_set_affinity_hint(msix_entries[i].vector,
545 						      NULL);
546 				free_irq(msix_entries[i].vector,
547 					 &oct->ioq_vector[i]);
548 				oct->ioq_vector[i].vector = 0;
549 			}
550 		}
551 
552 		/* non-iov vector's argument is oct struct */
553 		if (OCTEON_CN23XX_PF(oct))
554 			free_irq(msix_entries[i].vector, oct);
555 
556 		pci_disable_msix(oct->pci_dev);
557 		kfree(oct->msix_entries);
558 		oct->msix_entries = NULL;
559 	}
560 
561 	kfree(oct->irq_name_storage);
562 	oct->irq_name_storage = NULL;
563 
564 	if (octeon_allocate_ioq_vector(oct, num_ioqs)) {
565 		dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
566 		return -1;
567 	}
568 
569 	if (octeon_setup_interrupt(oct, num_ioqs)) {
570 		dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
571 		return -1;
572 	}
573 
574 	/* Enable Octeon device interrupts */
575 	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
576 
577 	return 0;
578 }
579 
580 static int
581 lio_ethtool_set_channels(struct net_device *dev,
582 			 struct ethtool_channels *channel)
583 {
584 	u32 combined_count, max_combined;
585 	struct lio *lio = GET_LIO(dev);
586 	struct octeon_device *oct = lio->oct_dev;
587 	int stopped = 0;
588 
589 	if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) {
590 		dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n");
591 		return -EINVAL;
592 	}
593 
594 	if (!channel->combined_count || channel->other_count ||
595 	    channel->rx_count || channel->tx_count)
596 		return -EINVAL;
597 
598 	combined_count = channel->combined_count;
599 
600 	if (OCTEON_CN23XX_PF(oct)) {
601 		if (oct->sriov_info.sriov_enabled) {
602 			max_combined = lio->linfo.num_txpciq;
603 		} else {
604 			struct octeon_config *conf23_pf =
605 				CHIP_CONF(oct,
606 					  cn23xx_pf);
607 
608 			max_combined =
609 				CFG_GET_IQ_MAX_Q(conf23_pf);
610 		}
611 	} else if (OCTEON_CN23XX_VF(oct)) {
612 		u64 reg_val = 0ULL;
613 		u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
614 
615 		reg_val = octeon_read_csr64(oct, ctrl);
616 		reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
617 		max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
618 	} else {
619 		return -EINVAL;
620 	}
621 
622 	if (combined_count > max_combined || combined_count < 1)
623 		return -EINVAL;
624 
625 	if (combined_count == oct->num_iqs)
626 		return 0;
627 
628 	ifstate_set(lio, LIO_IFSTATE_RESETTING);
629 
630 	if (netif_running(dev)) {
631 		dev->netdev_ops->ndo_stop(dev);
632 		stopped = 1;
633 	}
634 
635 	if (lio_reset_queues(dev, combined_count))
636 		return -EINVAL;
637 
638 	if (stopped)
639 		dev->netdev_ops->ndo_open(dev);
640 
641 	ifstate_reset(lio, LIO_IFSTATE_RESETTING);
642 
643 	return 0;
644 }
645 
646 static int lio_get_eeprom_len(struct net_device *netdev)
647 {
648 	u8 buf[192];
649 	struct lio *lio = GET_LIO(netdev);
650 	struct octeon_device *oct_dev = lio->oct_dev;
651 	struct octeon_board_info *board_info;
652 	int len;
653 
654 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
655 	len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
656 		      board_info->name, board_info->serial_number,
657 		      board_info->major, board_info->minor);
658 
659 	return len;
660 }
661 
662 static int
663 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
664 	       u8 *bytes)
665 {
666 	struct lio *lio = GET_LIO(netdev);
667 	struct octeon_device *oct_dev = lio->oct_dev;
668 	struct octeon_board_info *board_info;
669 
670 	if (eeprom->offset)
671 		return -EINVAL;
672 
673 	eeprom->magic = oct_dev->pci_dev->vendor;
674 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
675 	sprintf((char *)bytes,
676 		"boardname:%s serialnum:%s maj:%lld min:%lld\n",
677 		board_info->name, board_info->serial_number,
678 		board_info->major, board_info->minor);
679 
680 	return 0;
681 }
682 
683 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
684 {
685 	struct lio *lio = GET_LIO(netdev);
686 	struct octeon_device *oct = lio->oct_dev;
687 	struct octnic_ctrl_pkt nctrl;
688 	int ret = 0;
689 
690 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
691 
692 	nctrl.ncmd.u64 = 0;
693 	nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
694 	nctrl.ncmd.s.param1 = addr;
695 	nctrl.ncmd.s.param2 = val;
696 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
697 	nctrl.netpndev = (u64)netdev;
698 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
699 
700 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
701 	if (ret) {
702 		dev_err(&oct->pci_dev->dev,
703 			"Failed to configure gpio value, ret=%d\n", ret);
704 		return -EINVAL;
705 	}
706 
707 	return 0;
708 }
709 
710 static int octnet_id_active(struct net_device *netdev, int val)
711 {
712 	struct lio *lio = GET_LIO(netdev);
713 	struct octeon_device *oct = lio->oct_dev;
714 	struct octnic_ctrl_pkt nctrl;
715 	int ret = 0;
716 
717 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
718 
719 	nctrl.ncmd.u64 = 0;
720 	nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
721 	nctrl.ncmd.s.param1 = val;
722 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
723 	nctrl.netpndev = (u64)netdev;
724 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
725 
726 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
727 	if (ret) {
728 		dev_err(&oct->pci_dev->dev,
729 			"Failed to configure gpio value, ret=%d\n", ret);
730 		return -EINVAL;
731 	}
732 
733 	return 0;
734 }
735 
736 /* This routine provides PHY access routines for
737  * mdio  clause45 .
738  */
739 static int
740 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
741 {
742 	struct octeon_device *oct_dev = lio->oct_dev;
743 	struct octeon_soft_command *sc;
744 	struct oct_mdio_cmd_resp *mdio_cmd_rsp;
745 	struct oct_mdio_cmd *mdio_cmd;
746 	int retval = 0;
747 
748 	sc = (struct octeon_soft_command *)
749 		octeon_alloc_soft_command(oct_dev,
750 					  sizeof(struct oct_mdio_cmd),
751 					  sizeof(struct oct_mdio_cmd_resp), 0);
752 
753 	if (!sc)
754 		return -ENOMEM;
755 
756 	mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
757 	mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
758 
759 	mdio_cmd->op = op;
760 	mdio_cmd->mdio_addr = loc;
761 	if (op)
762 		mdio_cmd->value1 = *value;
763 	octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
764 
765 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
766 
767 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
768 				    0, 0, 0);
769 
770 	init_completion(&sc->complete);
771 	sc->sc_status = OCTEON_REQUEST_PENDING;
772 
773 	retval = octeon_send_soft_command(oct_dev, sc);
774 	if (retval == IQ_SEND_FAILED) {
775 		dev_err(&oct_dev->pci_dev->dev,
776 			"octnet_mdio45_access instruction failed status: %x\n",
777 			retval);
778 		octeon_free_soft_command(oct_dev, sc);
779 		return -EBUSY;
780 	} else {
781 		/* Sleep on a wait queue till the cond flag indicates that the
782 		 * response arrived
783 		 */
784 		retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
785 		if (retval)
786 			return retval;
787 
788 		retval = mdio_cmd_rsp->status;
789 		if (retval) {
790 			dev_err(&oct_dev->pci_dev->dev,
791 				"octnet mdio45 access failed: %x\n", retval);
792 			WRITE_ONCE(sc->caller_is_done, true);
793 			return -EBUSY;
794 		}
795 
796 		octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
797 				    sizeof(struct oct_mdio_cmd) / 8);
798 
799 		if (!op)
800 			*value = mdio_cmd_rsp->resp.value1;
801 
802 		WRITE_ONCE(sc->caller_is_done, true);
803 	}
804 
805 	return retval;
806 }
807 
808 static int lio_set_phys_id(struct net_device *netdev,
809 			   enum ethtool_phys_id_state state)
810 {
811 	struct lio *lio = GET_LIO(netdev);
812 	struct octeon_device *oct = lio->oct_dev;
813 	struct oct_link_info *linfo;
814 	int value, ret;
815 	u32 cur_ver;
816 
817 	linfo = &lio->linfo;
818 	cur_ver = OCT_FW_VER(oct->fw_info.ver.maj,
819 			     oct->fw_info.ver.min,
820 			     oct->fw_info.ver.rev);
821 
822 	switch (state) {
823 	case ETHTOOL_ID_ACTIVE:
824 		if (oct->chip_id == OCTEON_CN66XX) {
825 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
826 					   VITESSE_PHY_GPIO_DRIVEON);
827 			return 2;
828 
829 		} else if (oct->chip_id == OCTEON_CN68XX) {
830 			/* Save the current LED settings */
831 			ret = octnet_mdio45_access(lio, 0,
832 						   LIO68XX_LED_BEACON_ADDR,
833 						   &lio->phy_beacon_val);
834 			if (ret)
835 				return ret;
836 
837 			ret = octnet_mdio45_access(lio, 0,
838 						   LIO68XX_LED_CTRL_ADDR,
839 						   &lio->led_ctrl_val);
840 			if (ret)
841 				return ret;
842 
843 			/* Configure Beacon values */
844 			value = LIO68XX_LED_BEACON_CFGON;
845 			ret = octnet_mdio45_access(lio, 1,
846 						   LIO68XX_LED_BEACON_ADDR,
847 						   &value);
848 			if (ret)
849 				return ret;
850 
851 			value = LIO68XX_LED_CTRL_CFGON;
852 			ret = octnet_mdio45_access(lio, 1,
853 						   LIO68XX_LED_CTRL_ADDR,
854 						   &value);
855 			if (ret)
856 				return ret;
857 		} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
858 			octnet_id_active(netdev, LED_IDENTIFICATION_ON);
859 			if (linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
860 			    cur_ver > OCT_FW_VER(1, 7, 2))
861 				return 2;
862 			else
863 				return 0;
864 		} else {
865 			return -EINVAL;
866 		}
867 		break;
868 
869 	case ETHTOOL_ID_ON:
870 		if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
871 		    linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
872 		    cur_ver > OCT_FW_VER(1, 7, 2))
873 			octnet_id_active(netdev, LED_IDENTIFICATION_ON);
874 		else if (oct->chip_id == OCTEON_CN66XX)
875 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
876 					   VITESSE_PHY_GPIO_HIGH);
877 		else
878 			return -EINVAL;
879 
880 		break;
881 
882 	case ETHTOOL_ID_OFF:
883 		if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
884 		    linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
885 		    cur_ver > OCT_FW_VER(1, 7, 2))
886 			octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
887 		else if (oct->chip_id == OCTEON_CN66XX)
888 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
889 					   VITESSE_PHY_GPIO_LOW);
890 		else
891 			return -EINVAL;
892 
893 		break;
894 
895 	case ETHTOOL_ID_INACTIVE:
896 		if (oct->chip_id == OCTEON_CN66XX) {
897 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
898 					   VITESSE_PHY_GPIO_DRIVEOFF);
899 		} else if (oct->chip_id == OCTEON_CN68XX) {
900 			/* Restore LED settings */
901 			ret = octnet_mdio45_access(lio, 1,
902 						   LIO68XX_LED_CTRL_ADDR,
903 						   &lio->led_ctrl_val);
904 			if (ret)
905 				return ret;
906 
907 			ret = octnet_mdio45_access(lio, 1,
908 						   LIO68XX_LED_BEACON_ADDR,
909 						   &lio->phy_beacon_val);
910 			if (ret)
911 				return ret;
912 		} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
913 			octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
914 
915 			return 0;
916 		} else {
917 			return -EINVAL;
918 		}
919 		break;
920 
921 	default:
922 		return -EINVAL;
923 	}
924 
925 	return 0;
926 }
927 
928 static void
929 lio_ethtool_get_ringparam(struct net_device *netdev,
930 			  struct ethtool_ringparam *ering)
931 {
932 	struct lio *lio = GET_LIO(netdev);
933 	struct octeon_device *oct = lio->oct_dev;
934 	u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
935 	    rx_pending = 0;
936 
937 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
938 		return;
939 
940 	if (OCTEON_CN6XXX(oct)) {
941 		struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
942 
943 		tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
944 		rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
945 		rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
946 		tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
947 	} else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
948 		tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
949 		rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
950 		rx_pending = oct->droq[0]->max_count;
951 		tx_pending = oct->instr_queue[0]->max_count;
952 	}
953 
954 	ering->tx_pending = tx_pending;
955 	ering->tx_max_pending = tx_max_pending;
956 	ering->rx_pending = rx_pending;
957 	ering->rx_max_pending = rx_max_pending;
958 	ering->rx_mini_pending = 0;
959 	ering->rx_jumbo_pending = 0;
960 	ering->rx_mini_max_pending = 0;
961 	ering->rx_jumbo_max_pending = 0;
962 }
963 
964 static int lio_23xx_reconfigure_queue_count(struct lio *lio)
965 {
966 	struct octeon_device *oct = lio->oct_dev;
967 	u32 resp_size, data_size;
968 	struct liquidio_if_cfg_resp *resp;
969 	struct octeon_soft_command *sc;
970 	union oct_nic_if_cfg if_cfg;
971 	struct lio_version *vdata;
972 	u32 ifidx_or_pfnum;
973 	int retval;
974 	int j;
975 
976 	resp_size = sizeof(struct liquidio_if_cfg_resp);
977 	data_size = sizeof(struct lio_version);
978 	sc = (struct octeon_soft_command *)
979 		octeon_alloc_soft_command(oct, data_size,
980 					  resp_size, 0);
981 	if (!sc) {
982 		dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
983 			__func__);
984 		return -1;
985 	}
986 
987 	resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
988 	vdata = (struct lio_version *)sc->virtdptr;
989 
990 	vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
991 	vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
992 	vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
993 
994 	ifidx_or_pfnum = oct->pf_num;
995 
996 	if_cfg.u64 = 0;
997 	if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
998 	if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings;
999 	if_cfg.s.base_queue = oct->sriov_info.pf_srn;
1000 	if_cfg.s.gmx_port_id = oct->pf_num;
1001 
1002 	sc->iq_no = 0;
1003 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1004 				    OPCODE_NIC_QCOUNT_UPDATE, 0,
1005 				    if_cfg.u64, 0);
1006 
1007 	init_completion(&sc->complete);
1008 	sc->sc_status = OCTEON_REQUEST_PENDING;
1009 
1010 	retval = octeon_send_soft_command(oct, sc);
1011 	if (retval == IQ_SEND_FAILED) {
1012 		dev_err(&oct->pci_dev->dev,
1013 			"Sending iq/oq config failed status: %x\n",
1014 			retval);
1015 		octeon_free_soft_command(oct, sc);
1016 		return -EIO;
1017 	}
1018 
1019 	retval = wait_for_sc_completion_timeout(oct, sc, 0);
1020 	if (retval)
1021 		return retval;
1022 
1023 	retval = resp->status;
1024 	if (retval) {
1025 		dev_err(&oct->pci_dev->dev,
1026 			"iq/oq config failed: %x\n", retval);
1027 		WRITE_ONCE(sc->caller_is_done, true);
1028 		return -1;
1029 	}
1030 
1031 	octeon_swap_8B_data((u64 *)(&resp->cfg_info),
1032 			    (sizeof(struct liquidio_if_cfg_info)) >> 3);
1033 
1034 	lio->ifidx = ifidx_or_pfnum;
1035 	lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask);
1036 	lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask);
1037 	for (j = 0; j < lio->linfo.num_rxpciq; j++) {
1038 		lio->linfo.rxpciq[j].u64 =
1039 			resp->cfg_info.linfo.rxpciq[j].u64;
1040 	}
1041 
1042 	for (j = 0; j < lio->linfo.num_txpciq; j++) {
1043 		lio->linfo.txpciq[j].u64 =
1044 			resp->cfg_info.linfo.txpciq[j].u64;
1045 	}
1046 
1047 	lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1048 	lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1049 	lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
1050 	lio->txq = lio->linfo.txpciq[0].s.q_no;
1051 	lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1052 
1053 	dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
1054 		 lio->linfo.num_rxpciq);
1055 
1056 	WRITE_ONCE(sc->caller_is_done, true);
1057 
1058 	return 0;
1059 }
1060 
1061 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
1062 {
1063 	struct lio *lio = GET_LIO(netdev);
1064 	struct octeon_device *oct = lio->oct_dev;
1065 	int i, queue_count_update = 0;
1066 	struct napi_struct *napi, *n;
1067 	int ret;
1068 
1069 	schedule_timeout_uninterruptible(msecs_to_jiffies(100));
1070 
1071 	if (wait_for_pending_requests(oct))
1072 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1073 
1074 	if (lio_wait_for_instr_fetch(oct))
1075 		dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1076 
1077 	if (octeon_set_io_queues_off(oct)) {
1078 		dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n");
1079 		return -1;
1080 	}
1081 
1082 	/* Disable the input and output queues now. No more packets will
1083 	 * arrive from Octeon.
1084 	 */
1085 	oct->fn_list.disable_io_queues(oct);
1086 	/* Delete NAPI */
1087 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1088 		netif_napi_del(napi);
1089 
1090 	if (num_qs != oct->num_iqs) {
1091 		ret = netif_set_real_num_rx_queues(netdev, num_qs);
1092 		if (ret) {
1093 			dev_err(&oct->pci_dev->dev,
1094 				"Setting real number rx failed\n");
1095 			return ret;
1096 		}
1097 
1098 		ret = netif_set_real_num_tx_queues(netdev, num_qs);
1099 		if (ret) {
1100 			dev_err(&oct->pci_dev->dev,
1101 				"Setting real number tx failed\n");
1102 			return ret;
1103 		}
1104 
1105 		/* The value of queue_count_update decides whether it is the
1106 		 * queue count or the descriptor count that is being
1107 		 * re-configured.
1108 		 */
1109 		queue_count_update = 1;
1110 	}
1111 
1112 	/* Re-configuration of queues can happen in two scenarios, SRIOV enabled
1113 	 * and SRIOV disabled. Few things like recreating queue zero, resetting
1114 	 * glists and IRQs are required for both. For the latter, some more
1115 	 * steps like updating sriov_info for the octeon device need to be done.
1116 	 */
1117 	if (queue_count_update) {
1118 		lio_delete_glists(lio);
1119 
1120 		/* Delete mbox for PF which is SRIOV disabled because sriov_info
1121 		 * will be now changed.
1122 		 */
1123 		if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled)
1124 			oct->fn_list.free_mbox(oct);
1125 	}
1126 
1127 	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1128 		if (!(oct->io_qmask.oq & BIT_ULL(i)))
1129 			continue;
1130 		octeon_delete_droq(oct, i);
1131 	}
1132 
1133 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1134 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
1135 			continue;
1136 		octeon_delete_instr_queue(oct, i);
1137 	}
1138 
1139 	if (queue_count_update) {
1140 		/* For PF re-configure sriov related information */
1141 		if ((OCTEON_CN23XX_PF(oct)) &&
1142 		    !oct->sriov_info.sriov_enabled) {
1143 			oct->sriov_info.num_pf_rings = num_qs;
1144 			if (cn23xx_sriov_config(oct)) {
1145 				dev_err(&oct->pci_dev->dev,
1146 					"Queue reset aborted: SRIOV config failed\n");
1147 				return -1;
1148 			}
1149 
1150 			num_qs = oct->sriov_info.num_pf_rings;
1151 		}
1152 	}
1153 
1154 	if (oct->fn_list.setup_device_regs(oct)) {
1155 		dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
1156 		return -1;
1157 	}
1158 
1159 	/* The following are needed in case of queue count re-configuration and
1160 	 * not for descriptor count re-configuration.
1161 	 */
1162 	if (queue_count_update) {
1163 		if (octeon_setup_instr_queues(oct))
1164 			return -1;
1165 
1166 		if (octeon_setup_output_queues(oct))
1167 			return -1;
1168 
1169 		/* Recreating mbox for PF that is SRIOV disabled */
1170 		if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1171 			if (oct->fn_list.setup_mbox(oct)) {
1172 				dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
1173 				return -1;
1174 			}
1175 		}
1176 
1177 		/* Deleting and recreating IRQs whether the interface is SRIOV
1178 		 * enabled or disabled.
1179 		 */
1180 		if (lio_irq_reallocate_irqs(oct, num_qs)) {
1181 			dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n");
1182 			return -1;
1183 		}
1184 
1185 		/* Enable the input and output queues for this Octeon device */
1186 		if (oct->fn_list.enable_io_queues(oct)) {
1187 			dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n");
1188 			return -1;
1189 		}
1190 
1191 		for (i = 0; i < oct->num_oqs; i++)
1192 			writel(oct->droq[i]->max_count,
1193 			       oct->droq[i]->pkts_credit_reg);
1194 
1195 		/* Informing firmware about the new queue count. It is required
1196 		 * for firmware to allocate more number of queues than those at
1197 		 * load time.
1198 		 */
1199 		if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1200 			if (lio_23xx_reconfigure_queue_count(lio))
1201 				return -1;
1202 		}
1203 	}
1204 
1205 	/* Once firmware is aware of the new value, queues can be recreated */
1206 	if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
1207 		dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n");
1208 		return -1;
1209 	}
1210 
1211 	if (queue_count_update) {
1212 		if (lio_setup_glists(oct, lio, num_qs)) {
1213 			dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n");
1214 			return -1;
1215 		}
1216 
1217 		/* Send firmware the information about new number of queues
1218 		 * if the interface is a VF or a PF that is SRIOV enabled.
1219 		 */
1220 		if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct))
1221 			if (lio_send_queue_count_update(netdev, num_qs))
1222 				return -1;
1223 	}
1224 
1225 	return 0;
1226 }
1227 
1228 static int lio_ethtool_set_ringparam(struct net_device *netdev,
1229 				     struct ethtool_ringparam *ering)
1230 {
1231 	u32 rx_count, tx_count, rx_count_old, tx_count_old;
1232 	struct lio *lio = GET_LIO(netdev);
1233 	struct octeon_device *oct = lio->oct_dev;
1234 	int stopped = 0;
1235 
1236 	if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct))
1237 		return -EINVAL;
1238 
1239 	if (ering->rx_mini_pending || ering->rx_jumbo_pending)
1240 		return -EINVAL;
1241 
1242 	rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS,
1243 			   CN23XX_MAX_OQ_DESCRIPTORS);
1244 	tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS,
1245 			   CN23XX_MAX_IQ_DESCRIPTORS);
1246 
1247 	rx_count_old = oct->droq[0]->max_count;
1248 	tx_count_old = oct->instr_queue[0]->max_count;
1249 
1250 	if (rx_count == rx_count_old && tx_count == tx_count_old)
1251 		return 0;
1252 
1253 	ifstate_set(lio, LIO_IFSTATE_RESETTING);
1254 
1255 	if (netif_running(netdev)) {
1256 		netdev->netdev_ops->ndo_stop(netdev);
1257 		stopped = 1;
1258 	}
1259 
1260 	/* Change RX/TX DESCS  count */
1261 	if (tx_count != tx_count_old)
1262 		CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1263 					    tx_count);
1264 	if (rx_count != rx_count_old)
1265 		CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1266 					    rx_count);
1267 
1268 	if (lio_reset_queues(netdev, oct->num_iqs))
1269 		goto err_lio_reset_queues;
1270 
1271 	if (stopped)
1272 		netdev->netdev_ops->ndo_open(netdev);
1273 
1274 	ifstate_reset(lio, LIO_IFSTATE_RESETTING);
1275 
1276 	return 0;
1277 
1278 err_lio_reset_queues:
1279 	if (tx_count != tx_count_old)
1280 		CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1281 					    tx_count_old);
1282 	if (rx_count != rx_count_old)
1283 		CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1284 					    rx_count_old);
1285 	return -EINVAL;
1286 }
1287 
1288 static u32 lio_get_msglevel(struct net_device *netdev)
1289 {
1290 	struct lio *lio = GET_LIO(netdev);
1291 
1292 	return lio->msg_enable;
1293 }
1294 
1295 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
1296 {
1297 	struct lio *lio = GET_LIO(netdev);
1298 
1299 	if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
1300 		if (msglvl & NETIF_MSG_HW)
1301 			liquidio_set_feature(netdev,
1302 					     OCTNET_CMD_VERBOSE_ENABLE, 0);
1303 		else
1304 			liquidio_set_feature(netdev,
1305 					     OCTNET_CMD_VERBOSE_DISABLE, 0);
1306 	}
1307 
1308 	lio->msg_enable = msglvl;
1309 }
1310 
1311 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
1312 {
1313 	struct lio *lio = GET_LIO(netdev);
1314 
1315 	lio->msg_enable = msglvl;
1316 }
1317 
1318 static void
1319 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
1320 {
1321 	/* Notes: Not supporting any auto negotiation in these
1322 	 * drivers. Just report pause frame support.
1323 	 */
1324 	struct lio *lio = GET_LIO(netdev);
1325 	struct octeon_device *oct = lio->oct_dev;
1326 
1327 	pause->autoneg = 0;
1328 
1329 	pause->tx_pause = oct->tx_pause;
1330 	pause->rx_pause = oct->rx_pause;
1331 }
1332 
1333 static int
1334 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
1335 {
1336 	/* Notes: Not supporting any auto negotiation in these
1337 	 * drivers.
1338 	 */
1339 	struct lio *lio = GET_LIO(netdev);
1340 	struct octeon_device *oct = lio->oct_dev;
1341 	struct octnic_ctrl_pkt nctrl;
1342 	struct oct_link_info *linfo = &lio->linfo;
1343 
1344 	int ret = 0;
1345 
1346 	if (oct->chip_id != OCTEON_CN23XX_PF_VID)
1347 		return -EINVAL;
1348 
1349 	if (linfo->link.s.duplex == 0) {
1350 		/*no flow control for half duplex*/
1351 		if (pause->rx_pause || pause->tx_pause)
1352 			return -EINVAL;
1353 	}
1354 
1355 	/*do not support autoneg of link flow control*/
1356 	if (pause->autoneg == AUTONEG_ENABLE)
1357 		return -EINVAL;
1358 
1359 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1360 
1361 	nctrl.ncmd.u64 = 0;
1362 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
1363 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1364 	nctrl.netpndev = (u64)netdev;
1365 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1366 
1367 	if (pause->rx_pause) {
1368 		/*enable rx pause*/
1369 		nctrl.ncmd.s.param1 = 1;
1370 	} else {
1371 		/*disable rx pause*/
1372 		nctrl.ncmd.s.param1 = 0;
1373 	}
1374 
1375 	if (pause->tx_pause) {
1376 		/*enable tx pause*/
1377 		nctrl.ncmd.s.param2 = 1;
1378 	} else {
1379 		/*disable tx pause*/
1380 		nctrl.ncmd.s.param2 = 0;
1381 	}
1382 
1383 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1384 	if (ret) {
1385 		dev_err(&oct->pci_dev->dev,
1386 			"Failed to set pause parameter, ret=%d\n", ret);
1387 		return -EINVAL;
1388 	}
1389 
1390 	oct->rx_pause = pause->rx_pause;
1391 	oct->tx_pause = pause->tx_pause;
1392 
1393 	return 0;
1394 }
1395 
1396 static void
1397 lio_get_ethtool_stats(struct net_device *netdev,
1398 		      struct ethtool_stats *stats  __attribute__((unused)),
1399 		      u64 *data)
1400 {
1401 	struct lio *lio = GET_LIO(netdev);
1402 	struct octeon_device *oct_dev = lio->oct_dev;
1403 	struct rtnl_link_stats64 lstats;
1404 	int i = 0, j;
1405 
1406 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1407 		return;
1408 
1409 	netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1410 	/*sum of oct->droq[oq_no]->stats->rx_pkts_received */
1411 	data[i++] = lstats.rx_packets;
1412 	/*sum of oct->instr_queue[iq_no]->stats.tx_done */
1413 	data[i++] = lstats.tx_packets;
1414 	/*sum of oct->droq[oq_no]->stats->rx_bytes_received */
1415 	data[i++] = lstats.rx_bytes;
1416 	/*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1417 	data[i++] = lstats.tx_bytes;
1418 	data[i++] = lstats.rx_errors +
1419 			oct_dev->link_stats.fromwire.fcs_err +
1420 			oct_dev->link_stats.fromwire.jabber_err +
1421 			oct_dev->link_stats.fromwire.l2_err +
1422 			oct_dev->link_stats.fromwire.frame_err;
1423 	data[i++] = lstats.tx_errors;
1424 	/*sum of oct->droq[oq_no]->stats->rx_dropped +
1425 	 *oct->droq[oq_no]->stats->dropped_nodispatch +
1426 	 *oct->droq[oq_no]->stats->dropped_toomany +
1427 	 *oct->droq[oq_no]->stats->dropped_nomem
1428 	 */
1429 	data[i++] = lstats.rx_dropped +
1430 			oct_dev->link_stats.fromwire.fifo_err +
1431 			oct_dev->link_stats.fromwire.dmac_drop +
1432 			oct_dev->link_stats.fromwire.red_drops +
1433 			oct_dev->link_stats.fromwire.fw_err_pko +
1434 			oct_dev->link_stats.fromwire.fw_err_link +
1435 			oct_dev->link_stats.fromwire.fw_err_drop;
1436 	/*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1437 	data[i++] = lstats.tx_dropped +
1438 			oct_dev->link_stats.fromhost.max_collision_fail +
1439 			oct_dev->link_stats.fromhost.max_deferral_fail +
1440 			oct_dev->link_stats.fromhost.total_collisions +
1441 			oct_dev->link_stats.fromhost.fw_err_pko +
1442 			oct_dev->link_stats.fromhost.fw_err_link +
1443 			oct_dev->link_stats.fromhost.fw_err_drop +
1444 			oct_dev->link_stats.fromhost.fw_err_pki;
1445 
1446 	/* firmware tx stats */
1447 	/*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
1448 	 *fromhost.fw_total_sent
1449 	 */
1450 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
1451 	/*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
1452 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
1453 	/*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
1454 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
1455 	/*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
1456 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki);
1457 	/*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
1458 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
1459 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1460 	 *fw_err_drop
1461 	 */
1462 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
1463 
1464 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
1465 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
1466 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1467 	 *fw_tso_fwd
1468 	 */
1469 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
1470 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1471 	 *fw_err_tso
1472 	 */
1473 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
1474 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1475 	 *fw_tx_vxlan
1476 	 */
1477 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
1478 
1479 	/* Multicast packets sent by this port */
1480 	data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1481 	data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1482 
1483 	/* mac tx statistics */
1484 	/*CVMX_BGXX_CMRX_TX_STAT5 */
1485 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
1486 	/*CVMX_BGXX_CMRX_TX_STAT4 */
1487 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
1488 	/*CVMX_BGXX_CMRX_TX_STAT15 */
1489 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
1490 	/*CVMX_BGXX_CMRX_TX_STAT14 */
1491 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
1492 	/*CVMX_BGXX_CMRX_TX_STAT17 */
1493 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
1494 	/*CVMX_BGXX_CMRX_TX_STAT0 */
1495 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
1496 	/*CVMX_BGXX_CMRX_TX_STAT3 */
1497 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
1498 	/*CVMX_BGXX_CMRX_TX_STAT2 */
1499 	data[i++] =
1500 		CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
1501 	/*CVMX_BGXX_CMRX_TX_STAT0 */
1502 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
1503 	/*CVMX_BGXX_CMRX_TX_STAT1 */
1504 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
1505 	/*CVMX_BGXX_CMRX_TX_STAT16 */
1506 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
1507 	/*CVMX_BGXX_CMRX_TX_STAT6 */
1508 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
1509 
1510 	/* RX firmware stats */
1511 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1512 	 *fw_total_rcvd
1513 	 */
1514 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
1515 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1516 	 *fw_total_fwd
1517 	 */
1518 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
1519 	/* Multicast packets received on this port */
1520 	data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1521 	data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1522 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
1523 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
1524 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
1525 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
1526 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
1527 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
1528 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1529 	 *fw_err_pko
1530 	 */
1531 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
1532 	/*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
1533 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
1534 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1535 	 *fromwire.fw_err_drop
1536 	 */
1537 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
1538 
1539 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1540 	 *fromwire.fw_rx_vxlan
1541 	 */
1542 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
1543 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1544 	 *fromwire.fw_rx_vxlan_err
1545 	 */
1546 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
1547 
1548 	/* LRO */
1549 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1550 	 *fw_lro_pkts
1551 	 */
1552 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
1553 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1554 	 *fw_lro_octs
1555 	 */
1556 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
1557 	/*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
1558 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
1559 	/*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1560 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
1561 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1562 	 *fw_lro_aborts_port
1563 	 */
1564 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
1565 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1566 	 *fw_lro_aborts_seq
1567 	 */
1568 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
1569 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1570 	 *fw_lro_aborts_tsval
1571 	 */
1572 	data[i++] =
1573 		CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
1574 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1575 	 *fw_lro_aborts_timer
1576 	 */
1577 	/* intrmod: packet forward rate */
1578 	data[i++] =
1579 		CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
1580 	/*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1581 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
1582 
1583 	/* mac: link-level stats */
1584 	/*CVMX_BGXX_CMRX_RX_STAT0 */
1585 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
1586 	/*CVMX_BGXX_CMRX_RX_STAT1 */
1587 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
1588 	/*CVMX_PKI_STATX_STAT5 */
1589 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
1590 	/*CVMX_PKI_STATX_STAT5 */
1591 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
1592 	/*wqe->word2.err_code or wqe->word2.err_level */
1593 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
1594 	/*CVMX_BGXX_CMRX_RX_STAT2 */
1595 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
1596 	/*CVMX_BGXX_CMRX_RX_STAT6 */
1597 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
1598 	/*CVMX_BGXX_CMRX_RX_STAT4 */
1599 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
1600 	/*wqe->word2.err_code or wqe->word2.err_level */
1601 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
1602 	/*lio->link_changes*/
1603 	data[i++] = CVM_CAST64(lio->link_changes);
1604 
1605 	for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
1606 		if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
1607 			continue;
1608 		/*packets to network port*/
1609 		/*# of packets tx to network */
1610 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1611 		/*# of bytes tx to network */
1612 		data[i++] =
1613 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1614 		/*# of packets dropped */
1615 		data[i++] =
1616 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
1617 		/*# of tx fails due to queue full */
1618 		data[i++] =
1619 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
1620 		/*XXX gather entries sent */
1621 		data[i++] =
1622 			CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
1623 
1624 		/*instruction to firmware: data and control */
1625 		/*# of instructions to the queue */
1626 		data[i++] =
1627 			CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
1628 		/*# of instructions processed */
1629 		data[i++] = CVM_CAST64(
1630 				oct_dev->instr_queue[j]->stats.instr_processed);
1631 		/*# of instructions could not be processed */
1632 		data[i++] = CVM_CAST64(
1633 				oct_dev->instr_queue[j]->stats.instr_dropped);
1634 		/*bytes sent through the queue */
1635 		data[i++] =
1636 			CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
1637 
1638 		/*tso request*/
1639 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1640 		/*vxlan request*/
1641 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1642 		/*txq restart*/
1643 		data[i++] =
1644 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
1645 	}
1646 
1647 	/* RX */
1648 	for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
1649 		if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
1650 			continue;
1651 
1652 		/*packets send to TCP/IP network stack */
1653 		/*# of packets to network stack */
1654 		data[i++] =
1655 			CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
1656 		/*# of bytes to network stack */
1657 		data[i++] =
1658 			CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
1659 		/*# of packets dropped */
1660 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1661 				       oct_dev->droq[j]->stats.dropped_toomany +
1662 				       oct_dev->droq[j]->stats.rx_dropped);
1663 		data[i++] =
1664 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1665 		data[i++] =
1666 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1667 		data[i++] =
1668 			CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1669 
1670 		/*control and data path*/
1671 		data[i++] =
1672 			CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1673 		data[i++] =
1674 			CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1675 		data[i++] =
1676 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1677 
1678 		data[i++] =
1679 			CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1680 		data[i++] =
1681 			CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1682 	}
1683 }
1684 
1685 static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1686 				     struct ethtool_stats *stats
1687 				     __attribute__((unused)),
1688 				     u64 *data)
1689 {
1690 	struct rtnl_link_stats64 lstats;
1691 	struct lio *lio = GET_LIO(netdev);
1692 	struct octeon_device *oct_dev = lio->oct_dev;
1693 	int i = 0, j, vj;
1694 
1695 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1696 		return;
1697 
1698 	netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1699 	/* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1700 	data[i++] = lstats.rx_packets;
1701 	/* sum of oct->instr_queue[iq_no]->stats.tx_done */
1702 	data[i++] = lstats.tx_packets;
1703 	/* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1704 	data[i++] = lstats.rx_bytes;
1705 	/* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1706 	data[i++] = lstats.tx_bytes;
1707 	data[i++] = lstats.rx_errors;
1708 	data[i++] = lstats.tx_errors;
1709 	 /* sum of oct->droq[oq_no]->stats->rx_dropped +
1710 	  * oct->droq[oq_no]->stats->dropped_nodispatch +
1711 	  * oct->droq[oq_no]->stats->dropped_toomany +
1712 	  * oct->droq[oq_no]->stats->dropped_nomem
1713 	  */
1714 	data[i++] = lstats.rx_dropped;
1715 	/* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1716 	data[i++] = lstats.tx_dropped;
1717 
1718 	data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1719 	data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1720 	data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1721 	data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1722 
1723 	/* lio->link_changes */
1724 	data[i++] = CVM_CAST64(lio->link_changes);
1725 
1726 	for (vj = 0; vj < oct_dev->num_iqs; vj++) {
1727 		j = lio->linfo.txpciq[vj].s.q_no;
1728 
1729 		/* packets to network port */
1730 		/* # of packets tx to network */
1731 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1732 		 /* # of bytes tx to network */
1733 		data[i++] = CVM_CAST64(
1734 				oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1735 		/* # of packets dropped */
1736 		data[i++] = CVM_CAST64(
1737 				oct_dev->instr_queue[j]->stats.tx_dropped);
1738 		/* # of tx fails due to queue full */
1739 		data[i++] = CVM_CAST64(
1740 				oct_dev->instr_queue[j]->stats.tx_iq_busy);
1741 		/* XXX gather entries sent */
1742 		data[i++] = CVM_CAST64(
1743 				oct_dev->instr_queue[j]->stats.sgentry_sent);
1744 
1745 		/* instruction to firmware: data and control */
1746 		/* # of instructions to the queue */
1747 		data[i++] = CVM_CAST64(
1748 				oct_dev->instr_queue[j]->stats.instr_posted);
1749 		/* # of instructions processed */
1750 		data[i++] =
1751 		    CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
1752 		/* # of instructions could not be processed */
1753 		data[i++] =
1754 		    CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
1755 		/* bytes sent through the queue */
1756 		data[i++] = CVM_CAST64(
1757 				oct_dev->instr_queue[j]->stats.bytes_sent);
1758 		/* tso request */
1759 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1760 		/* vxlan request */
1761 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1762 		/* txq restart */
1763 		data[i++] = CVM_CAST64(
1764 				oct_dev->instr_queue[j]->stats.tx_restart);
1765 	}
1766 
1767 	/* RX */
1768 	for (vj = 0; vj < oct_dev->num_oqs; vj++) {
1769 		j = lio->linfo.rxpciq[vj].s.q_no;
1770 
1771 		/* packets send to TCP/IP network stack */
1772 		/* # of packets to network stack */
1773 		data[i++] = CVM_CAST64(
1774 				oct_dev->droq[j]->stats.rx_pkts_received);
1775 		/* # of bytes to network stack */
1776 		data[i++] = CVM_CAST64(
1777 				oct_dev->droq[j]->stats.rx_bytes_received);
1778 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1779 				       oct_dev->droq[j]->stats.dropped_toomany +
1780 				       oct_dev->droq[j]->stats.rx_dropped);
1781 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1782 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1783 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1784 
1785 		/* control and data path */
1786 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1787 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1788 		data[i++] =
1789 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1790 
1791 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1792 		data[i++] =
1793 		    CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1794 	}
1795 }
1796 
1797 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
1798 {
1799 	struct octeon_device *oct_dev = lio->oct_dev;
1800 	int i;
1801 
1802 	switch (oct_dev->chip_id) {
1803 	case OCTEON_CN23XX_PF_VID:
1804 	case OCTEON_CN23XX_VF_VID:
1805 		for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
1806 			sprintf(data, "%s", oct_priv_flags_strings[i]);
1807 			data += ETH_GSTRING_LEN;
1808 		}
1809 		break;
1810 	case OCTEON_CN68XX:
1811 	case OCTEON_CN66XX:
1812 		break;
1813 	default:
1814 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1815 		break;
1816 	}
1817 }
1818 
1819 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1820 {
1821 	struct lio *lio = GET_LIO(netdev);
1822 	struct octeon_device *oct_dev = lio->oct_dev;
1823 	int num_iq_stats, num_oq_stats, i, j;
1824 	int num_stats;
1825 
1826 	switch (stringset) {
1827 	case ETH_SS_STATS:
1828 		num_stats = ARRAY_SIZE(oct_stats_strings);
1829 		for (j = 0; j < num_stats; j++) {
1830 			sprintf(data, "%s", oct_stats_strings[j]);
1831 			data += ETH_GSTRING_LEN;
1832 		}
1833 
1834 		num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1835 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1836 			if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1837 				continue;
1838 			for (j = 0; j < num_iq_stats; j++) {
1839 				sprintf(data, "tx-%d-%s", i,
1840 					oct_iq_stats_strings[j]);
1841 				data += ETH_GSTRING_LEN;
1842 			}
1843 		}
1844 
1845 		num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1846 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1847 			if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1848 				continue;
1849 			for (j = 0; j < num_oq_stats; j++) {
1850 				sprintf(data, "rx-%d-%s", i,
1851 					oct_droq_stats_strings[j]);
1852 				data += ETH_GSTRING_LEN;
1853 			}
1854 		}
1855 		break;
1856 
1857 	case ETH_SS_PRIV_FLAGS:
1858 		lio_get_priv_flags_strings(lio, data);
1859 		break;
1860 	default:
1861 		netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1862 		break;
1863 	}
1864 }
1865 
1866 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
1867 			       u8 *data)
1868 {
1869 	int num_iq_stats, num_oq_stats, i, j;
1870 	struct lio *lio = GET_LIO(netdev);
1871 	struct octeon_device *oct_dev = lio->oct_dev;
1872 	int num_stats;
1873 
1874 	switch (stringset) {
1875 	case ETH_SS_STATS:
1876 		num_stats = ARRAY_SIZE(oct_vf_stats_strings);
1877 		for (j = 0; j < num_stats; j++) {
1878 			sprintf(data, "%s", oct_vf_stats_strings[j]);
1879 			data += ETH_GSTRING_LEN;
1880 		}
1881 
1882 		num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1883 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1884 			if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1885 				continue;
1886 			for (j = 0; j < num_iq_stats; j++) {
1887 				sprintf(data, "tx-%d-%s", i,
1888 					oct_iq_stats_strings[j]);
1889 				data += ETH_GSTRING_LEN;
1890 			}
1891 		}
1892 
1893 		num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1894 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1895 			if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1896 				continue;
1897 			for (j = 0; j < num_oq_stats; j++) {
1898 				sprintf(data, "rx-%d-%s", i,
1899 					oct_droq_stats_strings[j]);
1900 				data += ETH_GSTRING_LEN;
1901 			}
1902 		}
1903 		break;
1904 
1905 	case ETH_SS_PRIV_FLAGS:
1906 		lio_get_priv_flags_strings(lio, data);
1907 		break;
1908 	default:
1909 		netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1910 		break;
1911 	}
1912 }
1913 
1914 static int lio_get_priv_flags_ss_count(struct lio *lio)
1915 {
1916 	struct octeon_device *oct_dev = lio->oct_dev;
1917 
1918 	switch (oct_dev->chip_id) {
1919 	case OCTEON_CN23XX_PF_VID:
1920 	case OCTEON_CN23XX_VF_VID:
1921 		return ARRAY_SIZE(oct_priv_flags_strings);
1922 	case OCTEON_CN68XX:
1923 	case OCTEON_CN66XX:
1924 		return -EOPNOTSUPP;
1925 	default:
1926 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1927 		return -EOPNOTSUPP;
1928 	}
1929 }
1930 
1931 static int lio_get_sset_count(struct net_device *netdev, int sset)
1932 {
1933 	struct lio *lio = GET_LIO(netdev);
1934 	struct octeon_device *oct_dev = lio->oct_dev;
1935 
1936 	switch (sset) {
1937 	case ETH_SS_STATS:
1938 		return (ARRAY_SIZE(oct_stats_strings) +
1939 			ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1940 			ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1941 	case ETH_SS_PRIV_FLAGS:
1942 		return lio_get_priv_flags_ss_count(lio);
1943 	default:
1944 		return -EOPNOTSUPP;
1945 	}
1946 }
1947 
1948 static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
1949 {
1950 	struct lio *lio = GET_LIO(netdev);
1951 	struct octeon_device *oct_dev = lio->oct_dev;
1952 
1953 	switch (sset) {
1954 	case ETH_SS_STATS:
1955 		return (ARRAY_SIZE(oct_vf_stats_strings) +
1956 			ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1957 			ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1958 	case ETH_SS_PRIV_FLAGS:
1959 		return lio_get_priv_flags_ss_count(lio);
1960 	default:
1961 		return -EOPNOTSUPP;
1962 	}
1963 }
1964 
1965 /*  get interrupt moderation parameters */
1966 static int octnet_get_intrmod_cfg(struct lio *lio,
1967 				  struct oct_intrmod_cfg *intr_cfg)
1968 {
1969 	struct octeon_soft_command *sc;
1970 	struct oct_intrmod_resp *resp;
1971 	int retval;
1972 	struct octeon_device *oct_dev = lio->oct_dev;
1973 
1974 	/* Alloc soft command */
1975 	sc = (struct octeon_soft_command *)
1976 		octeon_alloc_soft_command(oct_dev,
1977 					  0,
1978 					  sizeof(struct oct_intrmod_resp), 0);
1979 
1980 	if (!sc)
1981 		return -ENOMEM;
1982 
1983 	resp = (struct oct_intrmod_resp *)sc->virtrptr;
1984 	memset(resp, 0, sizeof(struct oct_intrmod_resp));
1985 
1986 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1987 
1988 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1989 				    OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
1990 
1991 	init_completion(&sc->complete);
1992 	sc->sc_status = OCTEON_REQUEST_PENDING;
1993 
1994 	retval = octeon_send_soft_command(oct_dev, sc);
1995 	if (retval == IQ_SEND_FAILED) {
1996 		octeon_free_soft_command(oct_dev, sc);
1997 		return -EINVAL;
1998 	}
1999 
2000 	/* Sleep on a wait queue till the cond flag indicates that the
2001 	 * response arrived or timed-out.
2002 	 */
2003 	retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2004 	if (retval)
2005 		return -ENODEV;
2006 
2007 	if (resp->status) {
2008 		dev_err(&oct_dev->pci_dev->dev,
2009 			"Get interrupt moderation parameters failed\n");
2010 		WRITE_ONCE(sc->caller_is_done, true);
2011 		return -ENODEV;
2012 	}
2013 
2014 	octeon_swap_8B_data((u64 *)&resp->intrmod,
2015 			    (sizeof(struct oct_intrmod_cfg)) / 8);
2016 	memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
2017 	WRITE_ONCE(sc->caller_is_done, true);
2018 
2019 	return 0;
2020 }
2021 
2022 /*  Configure interrupt moderation parameters */
2023 static int octnet_set_intrmod_cfg(struct lio *lio,
2024 				  struct oct_intrmod_cfg *intr_cfg)
2025 {
2026 	struct octeon_soft_command *sc;
2027 	struct oct_intrmod_cfg *cfg;
2028 	int retval;
2029 	struct octeon_device *oct_dev = lio->oct_dev;
2030 
2031 	/* Alloc soft command */
2032 	sc = (struct octeon_soft_command *)
2033 		octeon_alloc_soft_command(oct_dev,
2034 					  sizeof(struct oct_intrmod_cfg),
2035 					  16, 0);
2036 
2037 	if (!sc)
2038 		return -ENOMEM;
2039 
2040 	cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
2041 
2042 	memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
2043 	octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
2044 
2045 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2046 
2047 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
2048 				    OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
2049 
2050 	init_completion(&sc->complete);
2051 	sc->sc_status = OCTEON_REQUEST_PENDING;
2052 
2053 	retval = octeon_send_soft_command(oct_dev, sc);
2054 	if (retval == IQ_SEND_FAILED) {
2055 		octeon_free_soft_command(oct_dev, sc);
2056 		return -EINVAL;
2057 	}
2058 
2059 	/* Sleep on a wait queue till the cond flag indicates that the
2060 	 * response arrived or timed-out.
2061 	 */
2062 	retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2063 	if (retval)
2064 		return retval;
2065 
2066 	retval = sc->sc_status;
2067 	if (retval == 0) {
2068 		dev_info(&oct_dev->pci_dev->dev,
2069 			 "Rx-Adaptive Interrupt moderation %s\n",
2070 			 (intr_cfg->rx_enable) ?
2071 			 "enabled" : "disabled");
2072 		WRITE_ONCE(sc->caller_is_done, true);
2073 		return 0;
2074 	}
2075 
2076 	dev_err(&oct_dev->pci_dev->dev,
2077 		"intrmod config failed. Status: %x\n", retval);
2078 	WRITE_ONCE(sc->caller_is_done, true);
2079 	return -ENODEV;
2080 }
2081 
2082 static int lio_get_intr_coalesce(struct net_device *netdev,
2083 				 struct ethtool_coalesce *intr_coal)
2084 {
2085 	struct lio *lio = GET_LIO(netdev);
2086 	struct octeon_device *oct = lio->oct_dev;
2087 	struct octeon_instr_queue *iq;
2088 	struct oct_intrmod_cfg intrmod_cfg;
2089 
2090 	if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
2091 		return -ENODEV;
2092 
2093 	switch (oct->chip_id) {
2094 	case OCTEON_CN23XX_PF_VID:
2095 	case OCTEON_CN23XX_VF_VID: {
2096 		if (!intrmod_cfg.rx_enable) {
2097 			intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
2098 			intr_coal->rx_max_coalesced_frames =
2099 				oct->rx_max_coalesced_frames;
2100 		}
2101 		if (!intrmod_cfg.tx_enable)
2102 			intr_coal->tx_max_coalesced_frames =
2103 				oct->tx_max_coalesced_frames;
2104 		break;
2105 	}
2106 	case OCTEON_CN68XX:
2107 	case OCTEON_CN66XX: {
2108 		struct octeon_cn6xxx *cn6xxx =
2109 			(struct octeon_cn6xxx *)oct->chip;
2110 
2111 		if (!intrmod_cfg.rx_enable) {
2112 			intr_coal->rx_coalesce_usecs =
2113 				CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
2114 			intr_coal->rx_max_coalesced_frames =
2115 				CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
2116 		}
2117 		iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
2118 		intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
2119 		break;
2120 	}
2121 	default:
2122 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
2123 		return -EINVAL;
2124 	}
2125 	if (intrmod_cfg.rx_enable) {
2126 		intr_coal->use_adaptive_rx_coalesce =
2127 			intrmod_cfg.rx_enable;
2128 		intr_coal->rate_sample_interval =
2129 			intrmod_cfg.check_intrvl;
2130 		intr_coal->pkt_rate_high =
2131 			intrmod_cfg.maxpkt_ratethr;
2132 		intr_coal->pkt_rate_low =
2133 			intrmod_cfg.minpkt_ratethr;
2134 		intr_coal->rx_max_coalesced_frames_high =
2135 			intrmod_cfg.rx_maxcnt_trigger;
2136 		intr_coal->rx_coalesce_usecs_high =
2137 			intrmod_cfg.rx_maxtmr_trigger;
2138 		intr_coal->rx_coalesce_usecs_low =
2139 			intrmod_cfg.rx_mintmr_trigger;
2140 		intr_coal->rx_max_coalesced_frames_low =
2141 			intrmod_cfg.rx_mincnt_trigger;
2142 	}
2143 	if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
2144 	    (intrmod_cfg.tx_enable)) {
2145 		intr_coal->use_adaptive_tx_coalesce =
2146 			intrmod_cfg.tx_enable;
2147 		intr_coal->tx_max_coalesced_frames_high =
2148 			intrmod_cfg.tx_maxcnt_trigger;
2149 		intr_coal->tx_max_coalesced_frames_low =
2150 			intrmod_cfg.tx_mincnt_trigger;
2151 	}
2152 	return 0;
2153 }
2154 
2155 /* Enable/Disable auto interrupt Moderation */
2156 static int oct_cfg_adaptive_intr(struct lio *lio,
2157 				 struct oct_intrmod_cfg *intrmod_cfg,
2158 				 struct ethtool_coalesce *intr_coal)
2159 {
2160 	int ret = 0;
2161 
2162 	if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
2163 		intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
2164 		intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
2165 		intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
2166 	}
2167 	if (intrmod_cfg->rx_enable) {
2168 		intrmod_cfg->rx_maxcnt_trigger =
2169 			intr_coal->rx_max_coalesced_frames_high;
2170 		intrmod_cfg->rx_maxtmr_trigger =
2171 			intr_coal->rx_coalesce_usecs_high;
2172 		intrmod_cfg->rx_mintmr_trigger =
2173 			intr_coal->rx_coalesce_usecs_low;
2174 		intrmod_cfg->rx_mincnt_trigger =
2175 			intr_coal->rx_max_coalesced_frames_low;
2176 	}
2177 	if (intrmod_cfg->tx_enable) {
2178 		intrmod_cfg->tx_maxcnt_trigger =
2179 			intr_coal->tx_max_coalesced_frames_high;
2180 		intrmod_cfg->tx_mincnt_trigger =
2181 			intr_coal->tx_max_coalesced_frames_low;
2182 	}
2183 
2184 	ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
2185 
2186 	return ret;
2187 }
2188 
2189 static int
2190 oct_cfg_rx_intrcnt(struct lio *lio,
2191 		   struct oct_intrmod_cfg *intrmod,
2192 		   struct ethtool_coalesce *intr_coal)
2193 {
2194 	struct octeon_device *oct = lio->oct_dev;
2195 	u32 rx_max_coalesced_frames;
2196 
2197 	/* Config Cnt based interrupt values */
2198 	switch (oct->chip_id) {
2199 	case OCTEON_CN68XX:
2200 	case OCTEON_CN66XX: {
2201 		struct octeon_cn6xxx *cn6xxx =
2202 			(struct octeon_cn6xxx *)oct->chip;
2203 
2204 		if (!intr_coal->rx_max_coalesced_frames)
2205 			rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
2206 		else
2207 			rx_max_coalesced_frames =
2208 				intr_coal->rx_max_coalesced_frames;
2209 		octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
2210 				 rx_max_coalesced_frames);
2211 		CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
2212 		break;
2213 	}
2214 	case OCTEON_CN23XX_PF_VID: {
2215 		int q_no;
2216 
2217 		if (!intr_coal->rx_max_coalesced_frames)
2218 			rx_max_coalesced_frames = intrmod->rx_frames;
2219 		else
2220 			rx_max_coalesced_frames =
2221 			    intr_coal->rx_max_coalesced_frames;
2222 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2223 			q_no += oct->sriov_info.pf_srn;
2224 			octeon_write_csr64(
2225 			    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2226 			    (octeon_read_csr64(
2227 				 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2228 			     (0x3fffff00000000UL)) |
2229 				(rx_max_coalesced_frames - 1));
2230 			/*consider setting resend bit*/
2231 		}
2232 		intrmod->rx_frames = rx_max_coalesced_frames;
2233 		oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2234 		break;
2235 	}
2236 	case OCTEON_CN23XX_VF_VID: {
2237 		int q_no;
2238 
2239 		if (!intr_coal->rx_max_coalesced_frames)
2240 			rx_max_coalesced_frames = intrmod->rx_frames;
2241 		else
2242 			rx_max_coalesced_frames =
2243 			    intr_coal->rx_max_coalesced_frames;
2244 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2245 			octeon_write_csr64(
2246 			    oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2247 			    (octeon_read_csr64(
2248 				 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2249 			     (0x3fffff00000000UL)) |
2250 				(rx_max_coalesced_frames - 1));
2251 			/*consider writing to resend bit here*/
2252 		}
2253 		intrmod->rx_frames = rx_max_coalesced_frames;
2254 		oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2255 		break;
2256 	}
2257 	default:
2258 		return -EINVAL;
2259 	}
2260 	return 0;
2261 }
2262 
2263 static int oct_cfg_rx_intrtime(struct lio *lio,
2264 			       struct oct_intrmod_cfg *intrmod,
2265 			       struct ethtool_coalesce *intr_coal)
2266 {
2267 	struct octeon_device *oct = lio->oct_dev;
2268 	u32 time_threshold, rx_coalesce_usecs;
2269 
2270 	/* Config Time based interrupt values */
2271 	switch (oct->chip_id) {
2272 	case OCTEON_CN68XX:
2273 	case OCTEON_CN66XX: {
2274 		struct octeon_cn6xxx *cn6xxx =
2275 			(struct octeon_cn6xxx *)oct->chip;
2276 		if (!intr_coal->rx_coalesce_usecs)
2277 			rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
2278 		else
2279 			rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2280 
2281 		time_threshold = lio_cn6xxx_get_oq_ticks(oct,
2282 							 rx_coalesce_usecs);
2283 		octeon_write_csr(oct,
2284 				 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
2285 				 time_threshold);
2286 
2287 		CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
2288 		break;
2289 	}
2290 	case OCTEON_CN23XX_PF_VID: {
2291 		u64 time_threshold;
2292 		int q_no;
2293 
2294 		if (!intr_coal->rx_coalesce_usecs)
2295 			rx_coalesce_usecs = intrmod->rx_usecs;
2296 		else
2297 			rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2298 		time_threshold =
2299 		    cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2300 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2301 			q_no += oct->sriov_info.pf_srn;
2302 			octeon_write_csr64(oct,
2303 					   CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2304 					   (intrmod->rx_frames |
2305 					    ((u64)time_threshold << 32)));
2306 			/*consider writing to resend bit here*/
2307 		}
2308 		intrmod->rx_usecs = rx_coalesce_usecs;
2309 		oct->rx_coalesce_usecs = rx_coalesce_usecs;
2310 		break;
2311 	}
2312 	case OCTEON_CN23XX_VF_VID: {
2313 		u64 time_threshold;
2314 		int q_no;
2315 
2316 		if (!intr_coal->rx_coalesce_usecs)
2317 			rx_coalesce_usecs = intrmod->rx_usecs;
2318 		else
2319 			rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2320 
2321 		time_threshold =
2322 		    cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2323 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2324 			octeon_write_csr64(
2325 				oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2326 				(intrmod->rx_frames |
2327 				 ((u64)time_threshold << 32)));
2328 			/*consider setting resend bit*/
2329 		}
2330 		intrmod->rx_usecs = rx_coalesce_usecs;
2331 		oct->rx_coalesce_usecs = rx_coalesce_usecs;
2332 		break;
2333 	}
2334 	default:
2335 		return -EINVAL;
2336 	}
2337 
2338 	return 0;
2339 }
2340 
2341 static int
2342 oct_cfg_tx_intrcnt(struct lio *lio,
2343 		   struct oct_intrmod_cfg *intrmod,
2344 		   struct ethtool_coalesce *intr_coal)
2345 {
2346 	struct octeon_device *oct = lio->oct_dev;
2347 	u32 iq_intr_pkt;
2348 	void __iomem *inst_cnt_reg;
2349 	u64 val;
2350 
2351 	/* Config Cnt based interrupt values */
2352 	switch (oct->chip_id) {
2353 	case OCTEON_CN68XX:
2354 	case OCTEON_CN66XX:
2355 		break;
2356 	case OCTEON_CN23XX_VF_VID:
2357 	case OCTEON_CN23XX_PF_VID: {
2358 		int q_no;
2359 
2360 		if (!intr_coal->tx_max_coalesced_frames)
2361 			iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
2362 				      CN23XX_PKT_IN_DONE_WMARK_MASK;
2363 		else
2364 			iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
2365 				      CN23XX_PKT_IN_DONE_WMARK_MASK;
2366 		for (q_no = 0; q_no < oct->num_iqs; q_no++) {
2367 			inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
2368 			val = readq(inst_cnt_reg);
2369 			/*clear wmark and count.dont want to write count back*/
2370 			val = (val & 0xFFFF000000000000ULL) |
2371 			      ((u64)(iq_intr_pkt - 1)
2372 			       << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
2373 			writeq(val, inst_cnt_reg);
2374 			/*consider setting resend bit*/
2375 		}
2376 		intrmod->tx_frames = iq_intr_pkt;
2377 		oct->tx_max_coalesced_frames = iq_intr_pkt;
2378 		break;
2379 	}
2380 	default:
2381 		return -EINVAL;
2382 	}
2383 	return 0;
2384 }
2385 
2386 static int lio_set_intr_coalesce(struct net_device *netdev,
2387 				 struct ethtool_coalesce *intr_coal)
2388 {
2389 	struct lio *lio = GET_LIO(netdev);
2390 	int ret;
2391 	struct octeon_device *oct = lio->oct_dev;
2392 	struct oct_intrmod_cfg intrmod = {0};
2393 	u32 j, q_no;
2394 	int db_max, db_min;
2395 
2396 	switch (oct->chip_id) {
2397 	case OCTEON_CN68XX:
2398 	case OCTEON_CN66XX:
2399 		db_min = CN6XXX_DB_MIN;
2400 		db_max = CN6XXX_DB_MAX;
2401 		if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
2402 		    (intr_coal->tx_max_coalesced_frames <= db_max)) {
2403 			for (j = 0; j < lio->linfo.num_txpciq; j++) {
2404 				q_no = lio->linfo.txpciq[j].s.q_no;
2405 				oct->instr_queue[q_no]->fill_threshold =
2406 					intr_coal->tx_max_coalesced_frames;
2407 			}
2408 		} else {
2409 			dev_err(&oct->pci_dev->dev,
2410 				"LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
2411 				intr_coal->tx_max_coalesced_frames,
2412 				db_min, db_max);
2413 			return -EINVAL;
2414 		}
2415 		break;
2416 	case OCTEON_CN23XX_PF_VID:
2417 	case OCTEON_CN23XX_VF_VID:
2418 		break;
2419 	default:
2420 		return -EINVAL;
2421 	}
2422 
2423 	intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
2424 	intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
2425 	intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2426 	intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2427 	intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2428 
2429 	ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
2430 
2431 	if (!intr_coal->use_adaptive_rx_coalesce) {
2432 		ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
2433 		if (ret)
2434 			goto ret_intrmod;
2435 
2436 		ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
2437 		if (ret)
2438 			goto ret_intrmod;
2439 	} else {
2440 		oct->rx_coalesce_usecs =
2441 			CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2442 		oct->rx_max_coalesced_frames =
2443 			CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2444 	}
2445 
2446 	if (!intr_coal->use_adaptive_tx_coalesce) {
2447 		ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
2448 		if (ret)
2449 			goto ret_intrmod;
2450 	} else {
2451 		oct->tx_max_coalesced_frames =
2452 			CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2453 	}
2454 
2455 	return 0;
2456 ret_intrmod:
2457 	return ret;
2458 }
2459 
2460 static int lio_get_ts_info(struct net_device *netdev,
2461 			   struct ethtool_ts_info *info)
2462 {
2463 	struct lio *lio = GET_LIO(netdev);
2464 
2465 	info->so_timestamping =
2466 #ifdef PTP_HARDWARE_TIMESTAMPING
2467 		SOF_TIMESTAMPING_TX_HARDWARE |
2468 		SOF_TIMESTAMPING_RX_HARDWARE |
2469 		SOF_TIMESTAMPING_RAW_HARDWARE |
2470 		SOF_TIMESTAMPING_TX_SOFTWARE |
2471 #endif
2472 		SOF_TIMESTAMPING_RX_SOFTWARE |
2473 		SOF_TIMESTAMPING_SOFTWARE;
2474 
2475 	if (lio->ptp_clock)
2476 		info->phc_index = ptp_clock_index(lio->ptp_clock);
2477 	else
2478 		info->phc_index = -1;
2479 
2480 #ifdef PTP_HARDWARE_TIMESTAMPING
2481 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
2482 
2483 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2484 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2485 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2486 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
2487 #endif
2488 
2489 	return 0;
2490 }
2491 
2492 /* Return register dump len. */
2493 static int lio_get_regs_len(struct net_device *dev)
2494 {
2495 	struct lio *lio = GET_LIO(dev);
2496 	struct octeon_device *oct = lio->oct_dev;
2497 
2498 	switch (oct->chip_id) {
2499 	case OCTEON_CN23XX_PF_VID:
2500 		return OCT_ETHTOOL_REGDUMP_LEN_23XX;
2501 	case OCTEON_CN23XX_VF_VID:
2502 		return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
2503 	default:
2504 		return OCT_ETHTOOL_REGDUMP_LEN;
2505 	}
2506 }
2507 
2508 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
2509 {
2510 	u32 reg;
2511 	u8 pf_num = oct->pf_num;
2512 	int len = 0;
2513 	int i;
2514 
2515 	/* PCI  Window Registers */
2516 
2517 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2518 
2519 	/*0x29030 or 0x29040*/
2520 	reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
2521 	len += sprintf(s + len,
2522 		       "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2523 		       reg, oct->pcie_port, oct->pf_num,
2524 		       (u64)octeon_read_csr64(oct, reg));
2525 
2526 	/*0x27080 or 0x27090*/
2527 	reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
2528 	len +=
2529 	    sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2530 		    reg, oct->pcie_port, oct->pf_num,
2531 		    (u64)octeon_read_csr64(oct, reg));
2532 
2533 	/*0x27000 or 0x27010*/
2534 	reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
2535 	len +=
2536 	    sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2537 		    reg, oct->pcie_port, oct->pf_num,
2538 		    (u64)octeon_read_csr64(oct, reg));
2539 
2540 	/*0x29120*/
2541 	reg = 0x29120;
2542 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
2543 		       (u64)octeon_read_csr64(oct, reg));
2544 
2545 	/*0x27300*/
2546 	reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2547 	      (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2548 	len += sprintf(
2549 	    s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
2550 	    oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
2551 
2552 	/*0x27200*/
2553 	reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2554 	      (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2555 	len += sprintf(s + len,
2556 		       "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2557 		       reg, oct->pcie_port, oct->pf_num,
2558 		       (u64)octeon_read_csr64(oct, reg));
2559 
2560 	/*29130*/
2561 	reg = CN23XX_SLI_PKT_CNT_INT;
2562 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
2563 		       (u64)octeon_read_csr64(oct, reg));
2564 
2565 	/*0x29140*/
2566 	reg = CN23XX_SLI_PKT_TIME_INT;
2567 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
2568 		       (u64)octeon_read_csr64(oct, reg));
2569 
2570 	/*0x29160*/
2571 	reg = 0x29160;
2572 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
2573 		       (u64)octeon_read_csr64(oct, reg));
2574 
2575 	/*0x29180*/
2576 	reg = CN23XX_SLI_OQ_WMARK;
2577 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2578 		       reg, (u64)octeon_read_csr64(oct, reg));
2579 
2580 	/*0x291E0*/
2581 	reg = CN23XX_SLI_PKT_IOQ_RING_RST;
2582 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
2583 		       (u64)octeon_read_csr64(oct, reg));
2584 
2585 	/*0x29210*/
2586 	reg = CN23XX_SLI_GBL_CONTROL;
2587 	len += sprintf(s + len,
2588 		       "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
2589 		       (u64)octeon_read_csr64(oct, reg));
2590 
2591 	/*0x29220*/
2592 	reg = 0x29220;
2593 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2594 		       reg, (u64)octeon_read_csr64(oct, reg));
2595 
2596 	/*PF only*/
2597 	if (pf_num == 0) {
2598 		/*0x29260*/
2599 		reg = CN23XX_SLI_OUT_BP_EN_W1S;
2600 		len += sprintf(s + len,
2601 			       "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S):  %016llx\n",
2602 			       reg, (u64)octeon_read_csr64(oct, reg));
2603 	} else if (pf_num == 1) {
2604 		/*0x29270*/
2605 		reg = CN23XX_SLI_OUT_BP_EN2_W1S;
2606 		len += sprintf(s + len,
2607 			       "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2608 			       reg, (u64)octeon_read_csr64(oct, reg));
2609 	}
2610 
2611 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2612 		reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
2613 		len +=
2614 		    sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2615 			    reg, i, (u64)octeon_read_csr64(oct, reg));
2616 	}
2617 
2618 	/*0x10040*/
2619 	for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2620 		reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2621 		len += sprintf(s + len,
2622 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2623 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2624 	}
2625 
2626 	/*0x10080*/
2627 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2628 		reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
2629 		len += sprintf(s + len,
2630 			       "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2631 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2632 	}
2633 
2634 	/*0x10090*/
2635 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2636 		reg = CN23XX_SLI_OQ_SIZE(i);
2637 		len += sprintf(
2638 		    s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2639 		    reg, i, (u64)octeon_read_csr64(oct, reg));
2640 	}
2641 
2642 	/*0x10050*/
2643 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2644 		reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
2645 		len += sprintf(
2646 			s + len,
2647 			"\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2648 			reg, i, (u64)octeon_read_csr64(oct, reg));
2649 	}
2650 
2651 	/*0x10070*/
2652 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2653 		reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
2654 		len += sprintf(s + len,
2655 			       "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2656 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2657 	}
2658 
2659 	/*0x100a0*/
2660 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2661 		reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
2662 		len += sprintf(s + len,
2663 			       "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2664 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2665 	}
2666 
2667 	/*0x100b0*/
2668 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2669 		reg = CN23XX_SLI_OQ_PKTS_SENT(i);
2670 		len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2671 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2672 	}
2673 
2674 	/*0x100c0*/
2675 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2676 		reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
2677 		len += sprintf(s + len,
2678 			       "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2679 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2680 
2681 		/*0x10000*/
2682 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2683 			reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
2684 			len += sprintf(
2685 				s + len,
2686 				"\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2687 				reg, i, (u64)octeon_read_csr64(oct, reg));
2688 		}
2689 
2690 		/*0x10010*/
2691 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2692 			reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
2693 			len += sprintf(
2694 			    s + len,
2695 			    "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
2696 			    i, (u64)octeon_read_csr64(oct, reg));
2697 		}
2698 
2699 		/*0x10020*/
2700 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2701 			reg = CN23XX_SLI_IQ_DOORBELL(i);
2702 			len += sprintf(
2703 			    s + len,
2704 			    "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2705 			    reg, i, (u64)octeon_read_csr64(oct, reg));
2706 		}
2707 
2708 		/*0x10030*/
2709 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2710 			reg = CN23XX_SLI_IQ_SIZE(i);
2711 			len += sprintf(
2712 			    s + len,
2713 			    "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2714 			    reg, i, (u64)octeon_read_csr64(oct, reg));
2715 		}
2716 
2717 		/*0x10040*/
2718 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2719 			reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2720 		len += sprintf(s + len,
2721 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2722 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2723 	}
2724 
2725 	return len;
2726 }
2727 
2728 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
2729 {
2730 	int len = 0;
2731 	u32 reg;
2732 	int i;
2733 
2734 	/* PCI  Window Registers */
2735 
2736 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2737 
2738 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2739 		reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
2740 		len += sprintf(s + len,
2741 			       "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2742 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2743 	}
2744 
2745 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2746 		reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2747 		len += sprintf(s + len,
2748 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2749 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2750 	}
2751 
2752 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2753 		reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
2754 		len += sprintf(s + len,
2755 			       "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2756 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2757 	}
2758 
2759 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2760 		reg = CN23XX_VF_SLI_OQ_SIZE(i);
2761 		len += sprintf(s + len,
2762 			       "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2763 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2764 	}
2765 
2766 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2767 		reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
2768 		len += sprintf(s + len,
2769 			       "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2770 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2771 	}
2772 
2773 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2774 		reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
2775 		len += sprintf(s + len,
2776 			       "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2777 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2778 	}
2779 
2780 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2781 		reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
2782 		len += sprintf(s + len,
2783 			       "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2784 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2785 	}
2786 
2787 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2788 		reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
2789 		len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2790 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2791 	}
2792 
2793 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2794 		reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
2795 		len += sprintf(s + len,
2796 			       "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2797 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2798 	}
2799 
2800 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2801 		reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
2802 		len += sprintf(s + len,
2803 			       "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2804 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2805 	}
2806 
2807 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2808 		reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
2809 		len += sprintf(s + len,
2810 			       "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2811 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2812 	}
2813 
2814 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2815 		reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
2816 		len += sprintf(s + len,
2817 			       "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2818 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2819 	}
2820 
2821 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2822 		reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
2823 		len += sprintf(s + len,
2824 			       "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2825 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2826 	}
2827 
2828 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2829 		reg = CN23XX_VF_SLI_IQ_SIZE(i);
2830 		len += sprintf(s + len,
2831 			       "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2832 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2833 	}
2834 
2835 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2836 		reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2837 		len += sprintf(s + len,
2838 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2839 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2840 	}
2841 
2842 	return len;
2843 }
2844 
2845 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2846 {
2847 	u32 reg;
2848 	int i, len = 0;
2849 
2850 	/* PCI  Window Registers */
2851 
2852 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2853 	reg = CN6XXX_WIN_WR_ADDR_LO;
2854 	len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2855 		       CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2856 	reg = CN6XXX_WIN_WR_ADDR_HI;
2857 	len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2858 		       CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2859 	reg = CN6XXX_WIN_RD_ADDR_LO;
2860 	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2861 		       CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2862 	reg = CN6XXX_WIN_RD_ADDR_HI;
2863 	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2864 		       CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2865 	reg = CN6XXX_WIN_WR_DATA_LO;
2866 	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2867 		       CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2868 	reg = CN6XXX_WIN_WR_DATA_HI;
2869 	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2870 		       CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2871 	len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2872 		       CN6XXX_WIN_WR_MASK_REG,
2873 		       octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2874 
2875 	/* PCI  Interrupt Register */
2876 	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2877 		       CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2878 						CN6XXX_SLI_INT_ENB64_PORT0));
2879 	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2880 		       CN6XXX_SLI_INT_ENB64_PORT1,
2881 		       octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2882 	len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2883 		       octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2884 
2885 	/* PCI  Output queue registers */
2886 	for (i = 0; i < oct->num_oqs; i++) {
2887 		reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2888 		len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2889 			       reg, i, octeon_read_csr(oct, reg));
2890 		reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2891 		len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2892 			       reg, i, octeon_read_csr(oct, reg));
2893 	}
2894 	reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2895 	len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2896 		       reg, octeon_read_csr(oct, reg));
2897 	reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2898 	len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2899 		       reg, octeon_read_csr(oct, reg));
2900 
2901 	/* PCI  Input queue registers */
2902 	for (i = 0; i <= 3; i++) {
2903 		u32 reg;
2904 
2905 		reg = CN6XXX_SLI_IQ_DOORBELL(i);
2906 		len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2907 			       reg, i, octeon_read_csr(oct, reg));
2908 		reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2909 		len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2910 			       reg, i, octeon_read_csr(oct, reg));
2911 	}
2912 
2913 	/* PCI  DMA registers */
2914 
2915 	len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2916 		       CN6XXX_DMA_CNT(0),
2917 		       octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2918 	reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2919 	len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2920 		       CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2921 	reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2922 	len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2923 		       CN6XXX_DMA_TIME_INT_LEVEL(0),
2924 		       octeon_read_csr(oct, reg));
2925 
2926 	len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2927 		       CN6XXX_DMA_CNT(1),
2928 		       octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2929 	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2930 	len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2931 		       CN6XXX_DMA_PKT_INT_LEVEL(1),
2932 		       octeon_read_csr(oct, reg));
2933 	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2934 	len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2935 		       CN6XXX_DMA_TIME_INT_LEVEL(1),
2936 		       octeon_read_csr(oct, reg));
2937 
2938 	/* PCI  Index registers */
2939 
2940 	len += sprintf(s + len, "\n");
2941 
2942 	for (i = 0; i < 16; i++) {
2943 		reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2944 		len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2945 			       CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2946 	}
2947 
2948 	return len;
2949 }
2950 
2951 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2952 {
2953 	u32 val;
2954 	int i, len = 0;
2955 
2956 	/* PCI CONFIG Registers */
2957 
2958 	len += sprintf(s + len,
2959 		       "\n\t Octeon Config space Registers\n\n");
2960 
2961 	for (i = 0; i <= 13; i++) {
2962 		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2963 		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2964 			       (i * 4), i, val);
2965 	}
2966 
2967 	for (i = 30; i <= 34; i++) {
2968 		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2969 		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2970 			       (i * 4), i, val);
2971 	}
2972 
2973 	return len;
2974 }
2975 
2976 /*  Return register dump user app.  */
2977 static void lio_get_regs(struct net_device *dev,
2978 			 struct ethtool_regs *regs, void *regbuf)
2979 {
2980 	struct lio *lio = GET_LIO(dev);
2981 	int len = 0;
2982 	struct octeon_device *oct = lio->oct_dev;
2983 
2984 	regs->version = OCT_ETHTOOL_REGSVER;
2985 
2986 	switch (oct->chip_id) {
2987 	case OCTEON_CN23XX_PF_VID:
2988 		memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
2989 		len += cn23xx_read_csr_reg(regbuf + len, oct);
2990 		break;
2991 	case OCTEON_CN23XX_VF_VID:
2992 		memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
2993 		len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
2994 		break;
2995 	case OCTEON_CN68XX:
2996 	case OCTEON_CN66XX:
2997 		memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
2998 		len += cn6xxx_read_csr_reg(regbuf + len, oct);
2999 		len += cn6xxx_read_config_reg(regbuf + len, oct);
3000 		break;
3001 	default:
3002 		dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
3003 			__func__, oct->chip_id);
3004 	}
3005 }
3006 
3007 static u32 lio_get_priv_flags(struct net_device *netdev)
3008 {
3009 	struct lio *lio = GET_LIO(netdev);
3010 
3011 	return lio->oct_dev->priv_flags;
3012 }
3013 
3014 static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
3015 {
3016 	struct lio *lio = GET_LIO(netdev);
3017 	bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
3018 
3019 	lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
3020 			  intr_by_tx_bytes);
3021 	return 0;
3022 }
3023 
3024 static const struct ethtool_ops lio_ethtool_ops = {
3025 	.get_link_ksettings	= lio_get_link_ksettings,
3026 	.set_link_ksettings	= lio_set_link_ksettings,
3027 	.get_link		= ethtool_op_get_link,
3028 	.get_drvinfo		= lio_get_drvinfo,
3029 	.get_ringparam		= lio_ethtool_get_ringparam,
3030 	.set_ringparam		= lio_ethtool_set_ringparam,
3031 	.get_channels		= lio_ethtool_get_channels,
3032 	.set_channels		= lio_ethtool_set_channels,
3033 	.set_phys_id		= lio_set_phys_id,
3034 	.get_eeprom_len		= lio_get_eeprom_len,
3035 	.get_eeprom		= lio_get_eeprom,
3036 	.get_strings		= lio_get_strings,
3037 	.get_ethtool_stats	= lio_get_ethtool_stats,
3038 	.get_pauseparam		= lio_get_pauseparam,
3039 	.set_pauseparam		= lio_set_pauseparam,
3040 	.get_regs_len		= lio_get_regs_len,
3041 	.get_regs		= lio_get_regs,
3042 	.get_msglevel		= lio_get_msglevel,
3043 	.set_msglevel		= lio_set_msglevel,
3044 	.get_sset_count		= lio_get_sset_count,
3045 	.get_coalesce		= lio_get_intr_coalesce,
3046 	.set_coalesce		= lio_set_intr_coalesce,
3047 	.get_priv_flags		= lio_get_priv_flags,
3048 	.set_priv_flags		= lio_set_priv_flags,
3049 	.get_ts_info		= lio_get_ts_info,
3050 };
3051 
3052 static const struct ethtool_ops lio_vf_ethtool_ops = {
3053 	.get_link_ksettings	= lio_get_link_ksettings,
3054 	.get_link		= ethtool_op_get_link,
3055 	.get_drvinfo		= lio_get_vf_drvinfo,
3056 	.get_ringparam		= lio_ethtool_get_ringparam,
3057 	.set_ringparam          = lio_ethtool_set_ringparam,
3058 	.get_channels		= lio_ethtool_get_channels,
3059 	.set_channels		= lio_ethtool_set_channels,
3060 	.get_strings		= lio_vf_get_strings,
3061 	.get_ethtool_stats	= lio_vf_get_ethtool_stats,
3062 	.get_regs_len		= lio_get_regs_len,
3063 	.get_regs		= lio_get_regs,
3064 	.get_msglevel		= lio_get_msglevel,
3065 	.set_msglevel		= lio_vf_set_msglevel,
3066 	.get_sset_count		= lio_vf_get_sset_count,
3067 	.get_coalesce		= lio_get_intr_coalesce,
3068 	.set_coalesce		= lio_set_intr_coalesce,
3069 	.get_priv_flags		= lio_get_priv_flags,
3070 	.set_priv_flags		= lio_set_priv_flags,
3071 	.get_ts_info		= lio_get_ts_info,
3072 };
3073 
3074 void liquidio_set_ethtool_ops(struct net_device *netdev)
3075 {
3076 	struct lio *lio = GET_LIO(netdev);
3077 	struct octeon_device *oct = lio->oct_dev;
3078 
3079 	if (OCTEON_CN23XX_VF(oct))
3080 		netdev->ethtool_ops = &lio_vf_ethtool_ops;
3081 	else
3082 		netdev->ethtool_ops = &lio_ethtool_ops;
3083 }
3084