1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/ethtool.h> 19 #include <linux/netdevice.h> 20 #include <linux/net_tstamp.h> 21 #include <linux/pci.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn66xx_regs.h" 31 #include "cn66xx_device.h" 32 #include "cn23xx_pf_device.h" 33 #include "cn23xx_vf_device.h" 34 35 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); 36 37 struct oct_intrmod_resp { 38 u64 rh; 39 struct oct_intrmod_cfg intrmod; 40 u64 status; 41 }; 42 43 struct oct_mdio_cmd_resp { 44 u64 rh; 45 struct oct_mdio_cmd resp; 46 u64 status; 47 }; 48 49 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp)) 50 51 /* Octeon's interface mode of operation */ 52 enum { 53 INTERFACE_MODE_DISABLED, 54 INTERFACE_MODE_RGMII, 55 INTERFACE_MODE_GMII, 56 INTERFACE_MODE_SPI, 57 INTERFACE_MODE_PCIE, 58 INTERFACE_MODE_XAUI, 59 INTERFACE_MODE_SGMII, 60 INTERFACE_MODE_PICMG, 61 INTERFACE_MODE_NPI, 62 INTERFACE_MODE_LOOP, 63 INTERFACE_MODE_SRIO, 64 INTERFACE_MODE_ILK, 65 INTERFACE_MODE_RXAUI, 66 INTERFACE_MODE_QSGMII, 67 INTERFACE_MODE_AGL, 68 INTERFACE_MODE_XLAUI, 69 INTERFACE_MODE_XFI, 70 INTERFACE_MODE_10G_KR, 71 INTERFACE_MODE_40G_KR4, 72 INTERFACE_MODE_MIXED, 73 }; 74 75 #define OCT_ETHTOOL_REGDUMP_LEN 4096 76 #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11) 77 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2) 78 #define OCT_ETHTOOL_REGSVER 1 79 80 /* statistics of PF */ 81 static const char oct_stats_strings[][ETH_GSTRING_LEN] = { 82 "rx_packets", 83 "tx_packets", 84 "rx_bytes", 85 "tx_bytes", 86 "rx_errors", 87 "tx_errors", 88 "rx_dropped", 89 "tx_dropped", 90 91 "tx_total_sent", 92 "tx_total_fwd", 93 "tx_err_pko", 94 "tx_err_pki", 95 "tx_err_link", 96 "tx_err_drop", 97 98 "tx_tso", 99 "tx_tso_packets", 100 "tx_tso_err", 101 "tx_vxlan", 102 103 "tx_mcast", 104 "tx_bcast", 105 106 "mac_tx_total_pkts", 107 "mac_tx_total_bytes", 108 "mac_tx_mcast_pkts", 109 "mac_tx_bcast_pkts", 110 "mac_tx_ctl_packets", 111 "mac_tx_total_collisions", 112 "mac_tx_one_collision", 113 "mac_tx_multi_collision", 114 "mac_tx_max_collision_fail", 115 "mac_tx_max_deferral_fail", 116 "mac_tx_fifo_err", 117 "mac_tx_runts", 118 119 "rx_total_rcvd", 120 "rx_total_fwd", 121 "rx_mcast", 122 "rx_bcast", 123 "rx_jabber_err", 124 "rx_l2_err", 125 "rx_frame_err", 126 "rx_err_pko", 127 "rx_err_link", 128 "rx_err_drop", 129 130 "rx_vxlan", 131 "rx_vxlan_err", 132 133 "rx_lro_pkts", 134 "rx_lro_bytes", 135 "rx_total_lro", 136 137 "rx_lro_aborts", 138 "rx_lro_aborts_port", 139 "rx_lro_aborts_seq", 140 "rx_lro_aborts_tsval", 141 "rx_lro_aborts_timer", 142 "rx_fwd_rate", 143 144 "mac_rx_total_rcvd", 145 "mac_rx_bytes", 146 "mac_rx_total_bcst", 147 "mac_rx_total_mcst", 148 "mac_rx_runts", 149 "mac_rx_ctl_packets", 150 "mac_rx_fifo_err", 151 "mac_rx_dma_drop", 152 "mac_rx_fcs_err", 153 154 "link_state_changes", 155 }; 156 157 /* statistics of VF */ 158 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { 159 "rx_packets", 160 "tx_packets", 161 "rx_bytes", 162 "tx_bytes", 163 "rx_errors", 164 "tx_errors", 165 "rx_dropped", 166 "tx_dropped", 167 "rx_mcast", 168 "tx_mcast", 169 "rx_bcast", 170 "tx_bcast", 171 "link_state_changes", 172 }; 173 174 /* statistics of host tx queue */ 175 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { 176 "packets", 177 "bytes", 178 "dropped", 179 "iq_busy", 180 "sgentry_sent", 181 182 "fw_instr_posted", 183 "fw_instr_processed", 184 "fw_instr_dropped", 185 "fw_bytes_sent", 186 187 "tso", 188 "vxlan", 189 "txq_restart", 190 }; 191 192 /* statistics of host rx queue */ 193 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { 194 "packets", 195 "bytes", 196 "dropped", 197 "dropped_nomem", 198 "dropped_toomany", 199 "fw_dropped", 200 "fw_pkts_received", 201 "fw_bytes_received", 202 "fw_dropped_nodispatch", 203 204 "vxlan", 205 "buffer_alloc_failure", 206 }; 207 208 /* LiquidIO driver private flags */ 209 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = { 210 }; 211 212 #define OCTNIC_NCMD_AUTONEG_ON 0x1 213 #define OCTNIC_NCMD_PHY_ON 0x2 214 215 static int lio_get_link_ksettings(struct net_device *netdev, 216 struct ethtool_link_ksettings *ecmd) 217 { 218 struct lio *lio = GET_LIO(netdev); 219 struct octeon_device *oct = lio->oct_dev; 220 struct oct_link_info *linfo; 221 222 linfo = &lio->linfo; 223 224 ethtool_link_ksettings_zero_link_mode(ecmd, supported); 225 ethtool_link_ksettings_zero_link_mode(ecmd, advertising); 226 227 switch (linfo->link.s.phy_type) { 228 case LIO_PHY_PORT_TP: 229 ecmd->base.port = PORT_TP; 230 ecmd->base.autoneg = AUTONEG_DISABLE; 231 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); 232 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); 233 ethtool_link_ksettings_add_link_mode(ecmd, supported, 234 10000baseT_Full); 235 236 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); 237 ethtool_link_ksettings_add_link_mode(ecmd, advertising, 238 10000baseT_Full); 239 240 break; 241 242 case LIO_PHY_PORT_FIBRE: 243 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || 244 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || 245 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || 246 linfo->link.s.if_mode == INTERFACE_MODE_XFI) { 247 dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); 248 ecmd->base.transceiver = XCVR_EXTERNAL; 249 } else { 250 dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", 251 linfo->link.s.if_mode); 252 } 253 254 ecmd->base.port = PORT_FIBRE; 255 ecmd->base.autoneg = AUTONEG_DISABLE; 256 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); 257 258 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); 259 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); 260 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 261 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { 262 if (OCTEON_CN23XX_PF(oct)) { 263 ethtool_link_ksettings_add_link_mode 264 (ecmd, supported, 25000baseSR_Full); 265 ethtool_link_ksettings_add_link_mode 266 (ecmd, supported, 25000baseKR_Full); 267 ethtool_link_ksettings_add_link_mode 268 (ecmd, supported, 25000baseCR_Full); 269 270 if (oct->no_speed_setting == 0) { 271 ethtool_link_ksettings_add_link_mode 272 (ecmd, supported, 273 10000baseSR_Full); 274 ethtool_link_ksettings_add_link_mode 275 (ecmd, supported, 276 10000baseKR_Full); 277 ethtool_link_ksettings_add_link_mode 278 (ecmd, supported, 279 10000baseCR_Full); 280 } 281 282 if (oct->no_speed_setting == 0) { 283 liquidio_get_speed(lio); 284 liquidio_get_fec(lio); 285 } else { 286 oct->speed_setting = 25; 287 } 288 289 if (oct->speed_setting == 10) { 290 ethtool_link_ksettings_add_link_mode 291 (ecmd, advertising, 292 10000baseSR_Full); 293 ethtool_link_ksettings_add_link_mode 294 (ecmd, advertising, 295 10000baseKR_Full); 296 ethtool_link_ksettings_add_link_mode 297 (ecmd, advertising, 298 10000baseCR_Full); 299 } 300 if (oct->speed_setting == 25) { 301 ethtool_link_ksettings_add_link_mode 302 (ecmd, advertising, 303 25000baseSR_Full); 304 ethtool_link_ksettings_add_link_mode 305 (ecmd, advertising, 306 25000baseKR_Full); 307 ethtool_link_ksettings_add_link_mode 308 (ecmd, advertising, 309 25000baseCR_Full); 310 } 311 312 if (oct->no_speed_setting) 313 break; 314 315 ethtool_link_ksettings_add_link_mode 316 (ecmd, supported, FEC_RS); 317 ethtool_link_ksettings_add_link_mode 318 (ecmd, supported, FEC_NONE); 319 /*FEC_OFF*/ 320 if (oct->props[lio->ifidx].fec == 1) { 321 /* ETHTOOL_FEC_RS */ 322 ethtool_link_ksettings_add_link_mode 323 (ecmd, advertising, FEC_RS); 324 } else { 325 /* ETHTOOL_FEC_OFF */ 326 ethtool_link_ksettings_add_link_mode 327 (ecmd, advertising, FEC_NONE); 328 } 329 } else { /* VF */ 330 if (linfo->link.s.speed == 10000) { 331 ethtool_link_ksettings_add_link_mode 332 (ecmd, supported, 333 10000baseSR_Full); 334 ethtool_link_ksettings_add_link_mode 335 (ecmd, supported, 336 10000baseKR_Full); 337 ethtool_link_ksettings_add_link_mode 338 (ecmd, supported, 339 10000baseCR_Full); 340 341 ethtool_link_ksettings_add_link_mode 342 (ecmd, advertising, 343 10000baseSR_Full); 344 ethtool_link_ksettings_add_link_mode 345 (ecmd, advertising, 346 10000baseKR_Full); 347 ethtool_link_ksettings_add_link_mode 348 (ecmd, advertising, 349 10000baseCR_Full); 350 } 351 352 if (linfo->link.s.speed == 25000) { 353 ethtool_link_ksettings_add_link_mode 354 (ecmd, supported, 355 25000baseSR_Full); 356 ethtool_link_ksettings_add_link_mode 357 (ecmd, supported, 358 25000baseKR_Full); 359 ethtool_link_ksettings_add_link_mode 360 (ecmd, supported, 361 25000baseCR_Full); 362 363 ethtool_link_ksettings_add_link_mode 364 (ecmd, advertising, 365 25000baseSR_Full); 366 ethtool_link_ksettings_add_link_mode 367 (ecmd, advertising, 368 25000baseKR_Full); 369 ethtool_link_ksettings_add_link_mode 370 (ecmd, advertising, 371 25000baseCR_Full); 372 } 373 } 374 } else { 375 ethtool_link_ksettings_add_link_mode(ecmd, supported, 376 10000baseT_Full); 377 ethtool_link_ksettings_add_link_mode(ecmd, advertising, 378 10000baseT_Full); 379 } 380 break; 381 } 382 383 if (linfo->link.s.link_up) { 384 ecmd->base.speed = linfo->link.s.speed; 385 ecmd->base.duplex = linfo->link.s.duplex; 386 } else { 387 ecmd->base.speed = SPEED_UNKNOWN; 388 ecmd->base.duplex = DUPLEX_UNKNOWN; 389 } 390 391 return 0; 392 } 393 394 static int lio_set_link_ksettings(struct net_device *netdev, 395 const struct ethtool_link_ksettings *ecmd) 396 { 397 const int speed = ecmd->base.speed; 398 struct lio *lio = GET_LIO(netdev); 399 struct oct_link_info *linfo; 400 struct octeon_device *oct; 401 402 oct = lio->oct_dev; 403 404 linfo = &lio->linfo; 405 406 if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 407 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID)) 408 return -EOPNOTSUPP; 409 410 if (oct->no_speed_setting) { 411 dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n", 412 __func__); 413 return -EOPNOTSUPP; 414 } 415 416 if ((ecmd->base.duplex != DUPLEX_UNKNOWN && 417 ecmd->base.duplex != linfo->link.s.duplex) || 418 ecmd->base.autoneg != AUTONEG_DISABLE || 419 (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 && 420 ecmd->base.speed != SPEED_UNKNOWN)) 421 return -EOPNOTSUPP; 422 423 if ((oct->speed_boot == speed / 1000) && 424 oct->speed_boot == oct->speed_setting) 425 return 0; 426 427 liquidio_set_speed(lio, speed / 1000); 428 429 dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n", 430 oct->speed_setting); 431 432 return 0; 433 } 434 435 static void 436 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 437 { 438 struct lio *lio; 439 struct octeon_device *oct; 440 441 lio = GET_LIO(netdev); 442 oct = lio->oct_dev; 443 444 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 445 strcpy(drvinfo->driver, "liquidio"); 446 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 447 ETHTOOL_FWVERS_LEN); 448 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 449 } 450 451 static void 452 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 453 { 454 struct octeon_device *oct; 455 struct lio *lio; 456 457 lio = GET_LIO(netdev); 458 oct = lio->oct_dev; 459 460 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 461 strcpy(drvinfo->driver, "liquidio_vf"); 462 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 463 ETHTOOL_FWVERS_LEN); 464 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 465 } 466 467 static int 468 lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues) 469 { 470 struct lio *lio = GET_LIO(netdev); 471 struct octeon_device *oct = lio->oct_dev; 472 struct octnic_ctrl_pkt nctrl; 473 int ret = 0; 474 475 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 476 477 nctrl.ncmd.u64 = 0; 478 nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL; 479 nctrl.ncmd.s.param1 = num_queues; 480 nctrl.ncmd.s.param2 = num_queues; 481 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 482 nctrl.netpndev = (u64)netdev; 483 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 484 485 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 486 if (ret) { 487 dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n", 488 ret); 489 return -1; 490 } 491 492 return 0; 493 } 494 495 static void 496 lio_ethtool_get_channels(struct net_device *dev, 497 struct ethtool_channels *channel) 498 { 499 struct lio *lio = GET_LIO(dev); 500 struct octeon_device *oct = lio->oct_dev; 501 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; 502 u32 combined_count = 0, max_combined = 0; 503 504 if (OCTEON_CN6XXX(oct)) { 505 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 506 507 max_rx = CFG_GET_OQ_MAX_Q(conf6x); 508 max_tx = CFG_GET_IQ_MAX_Q(conf6x); 509 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); 510 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); 511 } else if (OCTEON_CN23XX_PF(oct)) { 512 if (oct->sriov_info.sriov_enabled) { 513 max_combined = lio->linfo.num_txpciq; 514 } else { 515 struct octeon_config *conf23_pf = 516 CHIP_CONF(oct, cn23xx_pf); 517 518 max_combined = CFG_GET_IQ_MAX_Q(conf23_pf); 519 } 520 combined_count = oct->num_iqs; 521 } else if (OCTEON_CN23XX_VF(oct)) { 522 u64 reg_val = 0ULL; 523 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); 524 525 reg_val = octeon_read_csr64(oct, ctrl); 526 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; 527 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; 528 combined_count = oct->num_iqs; 529 } 530 531 channel->max_rx = max_rx; 532 channel->max_tx = max_tx; 533 channel->max_combined = max_combined; 534 channel->rx_count = rx_count; 535 channel->tx_count = tx_count; 536 channel->combined_count = combined_count; 537 } 538 539 static int 540 lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) 541 { 542 struct msix_entry *msix_entries; 543 int num_msix_irqs = 0; 544 int i; 545 546 if (!oct->msix_on) 547 return 0; 548 549 /* Disable the input and output queues now. No more packets will 550 * arrive from Octeon. 551 */ 552 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 553 554 if (oct->msix_on) { 555 if (OCTEON_CN23XX_PF(oct)) 556 num_msix_irqs = oct->num_msix_irqs - 1; 557 else if (OCTEON_CN23XX_VF(oct)) 558 num_msix_irqs = oct->num_msix_irqs; 559 560 msix_entries = (struct msix_entry *)oct->msix_entries; 561 for (i = 0; i < num_msix_irqs; i++) { 562 if (oct->ioq_vector[i].vector) { 563 /* clear the affinity_cpumask */ 564 irq_set_affinity_hint(msix_entries[i].vector, 565 NULL); 566 free_irq(msix_entries[i].vector, 567 &oct->ioq_vector[i]); 568 oct->ioq_vector[i].vector = 0; 569 } 570 } 571 572 /* non-iov vector's argument is oct struct */ 573 if (OCTEON_CN23XX_PF(oct)) 574 free_irq(msix_entries[i].vector, oct); 575 576 pci_disable_msix(oct->pci_dev); 577 kfree(oct->msix_entries); 578 oct->msix_entries = NULL; 579 } 580 581 kfree(oct->irq_name_storage); 582 oct->irq_name_storage = NULL; 583 584 if (octeon_allocate_ioq_vector(oct, num_ioqs)) { 585 dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 586 return -1; 587 } 588 589 if (octeon_setup_interrupt(oct, num_ioqs)) { 590 dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); 591 return -1; 592 } 593 594 /* Enable Octeon device interrupts */ 595 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 596 597 return 0; 598 } 599 600 static int 601 lio_ethtool_set_channels(struct net_device *dev, 602 struct ethtool_channels *channel) 603 { 604 u32 combined_count, max_combined; 605 struct lio *lio = GET_LIO(dev); 606 struct octeon_device *oct = lio->oct_dev; 607 int stopped = 0; 608 609 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) { 610 dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n"); 611 return -EINVAL; 612 } 613 614 if (!channel->combined_count || channel->other_count || 615 channel->rx_count || channel->tx_count) 616 return -EINVAL; 617 618 combined_count = channel->combined_count; 619 620 if (OCTEON_CN23XX_PF(oct)) { 621 if (oct->sriov_info.sriov_enabled) { 622 max_combined = lio->linfo.num_txpciq; 623 } else { 624 struct octeon_config *conf23_pf = 625 CHIP_CONF(oct, 626 cn23xx_pf); 627 628 max_combined = 629 CFG_GET_IQ_MAX_Q(conf23_pf); 630 } 631 } else if (OCTEON_CN23XX_VF(oct)) { 632 u64 reg_val = 0ULL; 633 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); 634 635 reg_val = octeon_read_csr64(oct, ctrl); 636 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; 637 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; 638 } else { 639 return -EINVAL; 640 } 641 642 if (combined_count > max_combined || combined_count < 1) 643 return -EINVAL; 644 645 if (combined_count == oct->num_iqs) 646 return 0; 647 648 ifstate_set(lio, LIO_IFSTATE_RESETTING); 649 650 if (netif_running(dev)) { 651 dev->netdev_ops->ndo_stop(dev); 652 stopped = 1; 653 } 654 655 if (lio_reset_queues(dev, combined_count)) 656 return -EINVAL; 657 658 if (stopped) 659 dev->netdev_ops->ndo_open(dev); 660 661 ifstate_reset(lio, LIO_IFSTATE_RESETTING); 662 663 return 0; 664 } 665 666 static int lio_get_eeprom_len(struct net_device *netdev) 667 { 668 u8 buf[192]; 669 struct lio *lio = GET_LIO(netdev); 670 struct octeon_device *oct_dev = lio->oct_dev; 671 struct octeon_board_info *board_info; 672 int len; 673 674 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 675 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n", 676 board_info->name, board_info->serial_number, 677 board_info->major, board_info->minor); 678 679 return len; 680 } 681 682 static int 683 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 684 u8 *bytes) 685 { 686 struct lio *lio = GET_LIO(netdev); 687 struct octeon_device *oct_dev = lio->oct_dev; 688 struct octeon_board_info *board_info; 689 690 if (eeprom->offset) 691 return -EINVAL; 692 693 eeprom->magic = oct_dev->pci_dev->vendor; 694 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 695 sprintf((char *)bytes, 696 "boardname:%s serialnum:%s maj:%lld min:%lld\n", 697 board_info->name, board_info->serial_number, 698 board_info->major, board_info->minor); 699 700 return 0; 701 } 702 703 static int octnet_gpio_access(struct net_device *netdev, int addr, int val) 704 { 705 struct lio *lio = GET_LIO(netdev); 706 struct octeon_device *oct = lio->oct_dev; 707 struct octnic_ctrl_pkt nctrl; 708 int ret = 0; 709 710 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 711 712 nctrl.ncmd.u64 = 0; 713 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; 714 nctrl.ncmd.s.param1 = addr; 715 nctrl.ncmd.s.param2 = val; 716 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 717 nctrl.netpndev = (u64)netdev; 718 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 719 720 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 721 if (ret) { 722 dev_err(&oct->pci_dev->dev, 723 "Failed to configure gpio value, ret=%d\n", ret); 724 return -EINVAL; 725 } 726 727 return 0; 728 } 729 730 static int octnet_id_active(struct net_device *netdev, int val) 731 { 732 struct lio *lio = GET_LIO(netdev); 733 struct octeon_device *oct = lio->oct_dev; 734 struct octnic_ctrl_pkt nctrl; 735 int ret = 0; 736 737 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 738 739 nctrl.ncmd.u64 = 0; 740 nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE; 741 nctrl.ncmd.s.param1 = val; 742 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 743 nctrl.netpndev = (u64)netdev; 744 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 745 746 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 747 if (ret) { 748 dev_err(&oct->pci_dev->dev, 749 "Failed to configure gpio value, ret=%d\n", ret); 750 return -EINVAL; 751 } 752 753 return 0; 754 } 755 756 /* This routine provides PHY access routines for 757 * mdio clause45 . 758 */ 759 static int 760 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) 761 { 762 struct octeon_device *oct_dev = lio->oct_dev; 763 struct octeon_soft_command *sc; 764 struct oct_mdio_cmd_resp *mdio_cmd_rsp; 765 struct oct_mdio_cmd *mdio_cmd; 766 int retval = 0; 767 768 sc = (struct octeon_soft_command *) 769 octeon_alloc_soft_command(oct_dev, 770 sizeof(struct oct_mdio_cmd), 771 sizeof(struct oct_mdio_cmd_resp), 0); 772 773 if (!sc) 774 return -ENOMEM; 775 776 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; 777 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; 778 779 mdio_cmd->op = op; 780 mdio_cmd->mdio_addr = loc; 781 if (op) 782 mdio_cmd->value1 = *value; 783 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); 784 785 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 786 787 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 788 0, 0, 0); 789 790 init_completion(&sc->complete); 791 sc->sc_status = OCTEON_REQUEST_PENDING; 792 793 retval = octeon_send_soft_command(oct_dev, sc); 794 if (retval == IQ_SEND_FAILED) { 795 dev_err(&oct_dev->pci_dev->dev, 796 "octnet_mdio45_access instruction failed status: %x\n", 797 retval); 798 octeon_free_soft_command(oct_dev, sc); 799 return -EBUSY; 800 } else { 801 /* Sleep on a wait queue till the cond flag indicates that the 802 * response arrived 803 */ 804 retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); 805 if (retval) 806 return retval; 807 808 retval = mdio_cmd_rsp->status; 809 if (retval) { 810 dev_err(&oct_dev->pci_dev->dev, 811 "octnet mdio45 access failed: %x\n", retval); 812 WRITE_ONCE(sc->caller_is_done, true); 813 return -EBUSY; 814 } 815 816 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), 817 sizeof(struct oct_mdio_cmd) / 8); 818 819 if (!op) 820 *value = mdio_cmd_rsp->resp.value1; 821 822 WRITE_ONCE(sc->caller_is_done, true); 823 } 824 825 return retval; 826 } 827 828 static int lio_set_phys_id(struct net_device *netdev, 829 enum ethtool_phys_id_state state) 830 { 831 struct lio *lio = GET_LIO(netdev); 832 struct octeon_device *oct = lio->oct_dev; 833 struct oct_link_info *linfo; 834 int value, ret; 835 u32 cur_ver; 836 837 linfo = &lio->linfo; 838 cur_ver = OCT_FW_VER(oct->fw_info.ver.maj, 839 oct->fw_info.ver.min, 840 oct->fw_info.ver.rev); 841 842 switch (state) { 843 case ETHTOOL_ID_ACTIVE: 844 if (oct->chip_id == OCTEON_CN66XX) { 845 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 846 VITESSE_PHY_GPIO_DRIVEON); 847 return 2; 848 849 } else if (oct->chip_id == OCTEON_CN68XX) { 850 /* Save the current LED settings */ 851 ret = octnet_mdio45_access(lio, 0, 852 LIO68XX_LED_BEACON_ADDR, 853 &lio->phy_beacon_val); 854 if (ret) 855 return ret; 856 857 ret = octnet_mdio45_access(lio, 0, 858 LIO68XX_LED_CTRL_ADDR, 859 &lio->led_ctrl_val); 860 if (ret) 861 return ret; 862 863 /* Configure Beacon values */ 864 value = LIO68XX_LED_BEACON_CFGON; 865 ret = octnet_mdio45_access(lio, 1, 866 LIO68XX_LED_BEACON_ADDR, 867 &value); 868 if (ret) 869 return ret; 870 871 value = LIO68XX_LED_CTRL_CFGON; 872 ret = octnet_mdio45_access(lio, 1, 873 LIO68XX_LED_CTRL_ADDR, 874 &value); 875 if (ret) 876 return ret; 877 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 878 octnet_id_active(netdev, LED_IDENTIFICATION_ON); 879 if (linfo->link.s.phy_type == LIO_PHY_PORT_TP && 880 cur_ver > OCT_FW_VER(1, 7, 2)) 881 return 2; 882 else 883 return 0; 884 } else { 885 return -EINVAL; 886 } 887 break; 888 889 case ETHTOOL_ID_ON: 890 if (oct->chip_id == OCTEON_CN23XX_PF_VID && 891 linfo->link.s.phy_type == LIO_PHY_PORT_TP && 892 cur_ver > OCT_FW_VER(1, 7, 2)) 893 octnet_id_active(netdev, LED_IDENTIFICATION_ON); 894 else if (oct->chip_id == OCTEON_CN66XX) 895 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 896 VITESSE_PHY_GPIO_HIGH); 897 else 898 return -EINVAL; 899 900 break; 901 902 case ETHTOOL_ID_OFF: 903 if (oct->chip_id == OCTEON_CN23XX_PF_VID && 904 linfo->link.s.phy_type == LIO_PHY_PORT_TP && 905 cur_ver > OCT_FW_VER(1, 7, 2)) 906 octnet_id_active(netdev, LED_IDENTIFICATION_OFF); 907 else if (oct->chip_id == OCTEON_CN66XX) 908 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 909 VITESSE_PHY_GPIO_LOW); 910 else 911 return -EINVAL; 912 913 break; 914 915 case ETHTOOL_ID_INACTIVE: 916 if (oct->chip_id == OCTEON_CN66XX) { 917 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 918 VITESSE_PHY_GPIO_DRIVEOFF); 919 } else if (oct->chip_id == OCTEON_CN68XX) { 920 /* Restore LED settings */ 921 ret = octnet_mdio45_access(lio, 1, 922 LIO68XX_LED_CTRL_ADDR, 923 &lio->led_ctrl_val); 924 if (ret) 925 return ret; 926 927 ret = octnet_mdio45_access(lio, 1, 928 LIO68XX_LED_BEACON_ADDR, 929 &lio->phy_beacon_val); 930 if (ret) 931 return ret; 932 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 933 octnet_id_active(netdev, LED_IDENTIFICATION_OFF); 934 935 return 0; 936 } else { 937 return -EINVAL; 938 } 939 break; 940 941 default: 942 return -EINVAL; 943 } 944 945 return 0; 946 } 947 948 static void 949 lio_ethtool_get_ringparam(struct net_device *netdev, 950 struct ethtool_ringparam *ering) 951 { 952 struct lio *lio = GET_LIO(netdev); 953 struct octeon_device *oct = lio->oct_dev; 954 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, 955 rx_pending = 0; 956 957 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 958 return; 959 960 if (OCTEON_CN6XXX(oct)) { 961 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 962 963 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS; 964 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; 965 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); 966 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); 967 } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { 968 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; 969 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; 970 rx_pending = oct->droq[0]->max_count; 971 tx_pending = oct->instr_queue[0]->max_count; 972 } 973 974 ering->tx_pending = tx_pending; 975 ering->tx_max_pending = tx_max_pending; 976 ering->rx_pending = rx_pending; 977 ering->rx_max_pending = rx_max_pending; 978 ering->rx_mini_pending = 0; 979 ering->rx_jumbo_pending = 0; 980 ering->rx_mini_max_pending = 0; 981 ering->rx_jumbo_max_pending = 0; 982 } 983 984 static int lio_23xx_reconfigure_queue_count(struct lio *lio) 985 { 986 struct octeon_device *oct = lio->oct_dev; 987 u32 resp_size, data_size; 988 struct liquidio_if_cfg_resp *resp; 989 struct octeon_soft_command *sc; 990 union oct_nic_if_cfg if_cfg; 991 struct lio_version *vdata; 992 u32 ifidx_or_pfnum; 993 int retval; 994 int j; 995 996 resp_size = sizeof(struct liquidio_if_cfg_resp); 997 data_size = sizeof(struct lio_version); 998 sc = (struct octeon_soft_command *) 999 octeon_alloc_soft_command(oct, data_size, 1000 resp_size, 0); 1001 if (!sc) { 1002 dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n", 1003 __func__); 1004 return -1; 1005 } 1006 1007 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1008 vdata = (struct lio_version *)sc->virtdptr; 1009 1010 vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 1011 vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 1012 vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 1013 1014 ifidx_or_pfnum = oct->pf_num; 1015 1016 if_cfg.u64 = 0; 1017 if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; 1018 if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings; 1019 if_cfg.s.base_queue = oct->sriov_info.pf_srn; 1020 if_cfg.s.gmx_port_id = oct->pf_num; 1021 1022 sc->iq_no = 0; 1023 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1024 OPCODE_NIC_QCOUNT_UPDATE, 0, 1025 if_cfg.u64, 0); 1026 1027 init_completion(&sc->complete); 1028 sc->sc_status = OCTEON_REQUEST_PENDING; 1029 1030 retval = octeon_send_soft_command(oct, sc); 1031 if (retval == IQ_SEND_FAILED) { 1032 dev_err(&oct->pci_dev->dev, 1033 "Sending iq/oq config failed status: %x\n", 1034 retval); 1035 octeon_free_soft_command(oct, sc); 1036 return -EIO; 1037 } 1038 1039 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1040 if (retval) 1041 return retval; 1042 1043 retval = resp->status; 1044 if (retval) { 1045 dev_err(&oct->pci_dev->dev, 1046 "iq/oq config failed: %x\n", retval); 1047 WRITE_ONCE(sc->caller_is_done, true); 1048 return -1; 1049 } 1050 1051 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 1052 (sizeof(struct liquidio_if_cfg_info)) >> 3); 1053 1054 lio->ifidx = ifidx_or_pfnum; 1055 lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask); 1056 lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask); 1057 for (j = 0; j < lio->linfo.num_rxpciq; j++) { 1058 lio->linfo.rxpciq[j].u64 = 1059 resp->cfg_info.linfo.rxpciq[j].u64; 1060 } 1061 1062 for (j = 0; j < lio->linfo.num_txpciq; j++) { 1063 lio->linfo.txpciq[j].u64 = 1064 resp->cfg_info.linfo.txpciq[j].u64; 1065 } 1066 1067 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 1068 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 1069 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 1070 lio->txq = lio->linfo.txpciq[0].s.q_no; 1071 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 1072 1073 dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n", 1074 lio->linfo.num_rxpciq); 1075 1076 WRITE_ONCE(sc->caller_is_done, true); 1077 1078 return 0; 1079 } 1080 1081 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) 1082 { 1083 struct lio *lio = GET_LIO(netdev); 1084 struct octeon_device *oct = lio->oct_dev; 1085 int i, queue_count_update = 0; 1086 struct napi_struct *napi, *n; 1087 int ret; 1088 1089 schedule_timeout_uninterruptible(msecs_to_jiffies(100)); 1090 1091 if (wait_for_pending_requests(oct)) 1092 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 1093 1094 if (lio_wait_for_instr_fetch(oct)) 1095 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 1096 1097 if (octeon_set_io_queues_off(oct)) { 1098 dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n"); 1099 return -1; 1100 } 1101 1102 /* Disable the input and output queues now. No more packets will 1103 * arrive from Octeon. 1104 */ 1105 oct->fn_list.disable_io_queues(oct); 1106 /* Delete NAPI */ 1107 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1108 netif_napi_del(napi); 1109 1110 if (num_qs != oct->num_iqs) { 1111 ret = netif_set_real_num_rx_queues(netdev, num_qs); 1112 if (ret) { 1113 dev_err(&oct->pci_dev->dev, 1114 "Setting real number rx failed\n"); 1115 return ret; 1116 } 1117 1118 ret = netif_set_real_num_tx_queues(netdev, num_qs); 1119 if (ret) { 1120 dev_err(&oct->pci_dev->dev, 1121 "Setting real number tx failed\n"); 1122 return ret; 1123 } 1124 1125 /* The value of queue_count_update decides whether it is the 1126 * queue count or the descriptor count that is being 1127 * re-configured. 1128 */ 1129 queue_count_update = 1; 1130 } 1131 1132 /* Re-configuration of queues can happen in two scenarios, SRIOV enabled 1133 * and SRIOV disabled. Few things like recreating queue zero, resetting 1134 * glists and IRQs are required for both. For the latter, some more 1135 * steps like updating sriov_info for the octeon device need to be done. 1136 */ 1137 if (queue_count_update) { 1138 cleanup_rx_oom_poll_fn(netdev); 1139 1140 lio_delete_glists(lio); 1141 1142 /* Delete mbox for PF which is SRIOV disabled because sriov_info 1143 * will be now changed. 1144 */ 1145 if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled) 1146 oct->fn_list.free_mbox(oct); 1147 } 1148 1149 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1150 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1151 continue; 1152 octeon_delete_droq(oct, i); 1153 } 1154 1155 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1156 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1157 continue; 1158 octeon_delete_instr_queue(oct, i); 1159 } 1160 1161 if (queue_count_update) { 1162 /* For PF re-configure sriov related information */ 1163 if ((OCTEON_CN23XX_PF(oct)) && 1164 !oct->sriov_info.sriov_enabled) { 1165 oct->sriov_info.num_pf_rings = num_qs; 1166 if (cn23xx_sriov_config(oct)) { 1167 dev_err(&oct->pci_dev->dev, 1168 "Queue reset aborted: SRIOV config failed\n"); 1169 return -1; 1170 } 1171 1172 num_qs = oct->sriov_info.num_pf_rings; 1173 } 1174 } 1175 1176 if (oct->fn_list.setup_device_regs(oct)) { 1177 dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); 1178 return -1; 1179 } 1180 1181 /* The following are needed in case of queue count re-configuration and 1182 * not for descriptor count re-configuration. 1183 */ 1184 if (queue_count_update) { 1185 if (octeon_setup_instr_queues(oct)) 1186 return -1; 1187 1188 if (octeon_setup_output_queues(oct)) 1189 return -1; 1190 1191 /* Recreating mbox for PF that is SRIOV disabled */ 1192 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { 1193 if (oct->fn_list.setup_mbox(oct)) { 1194 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 1195 return -1; 1196 } 1197 } 1198 1199 /* Deleting and recreating IRQs whether the interface is SRIOV 1200 * enabled or disabled. 1201 */ 1202 if (lio_irq_reallocate_irqs(oct, num_qs)) { 1203 dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n"); 1204 return -1; 1205 } 1206 1207 /* Enable the input and output queues for this Octeon device */ 1208 if (oct->fn_list.enable_io_queues(oct)) { 1209 dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n"); 1210 return -1; 1211 } 1212 1213 for (i = 0; i < oct->num_oqs; i++) 1214 writel(oct->droq[i]->max_count, 1215 oct->droq[i]->pkts_credit_reg); 1216 1217 /* Informing firmware about the new queue count. It is required 1218 * for firmware to allocate more number of queues than those at 1219 * load time. 1220 */ 1221 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { 1222 if (lio_23xx_reconfigure_queue_count(lio)) 1223 return -1; 1224 } 1225 } 1226 1227 /* Once firmware is aware of the new value, queues can be recreated */ 1228 if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { 1229 dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n"); 1230 return -1; 1231 } 1232 1233 if (queue_count_update) { 1234 if (lio_setup_glists(oct, lio, num_qs)) { 1235 dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n"); 1236 return -1; 1237 } 1238 1239 if (setup_rx_oom_poll_fn(netdev)) { 1240 dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n"); 1241 return 1; 1242 } 1243 1244 /* Send firmware the information about new number of queues 1245 * if the interface is a VF or a PF that is SRIOV enabled. 1246 */ 1247 if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct)) 1248 if (lio_send_queue_count_update(netdev, num_qs)) 1249 return -1; 1250 } 1251 1252 return 0; 1253 } 1254 1255 static int lio_ethtool_set_ringparam(struct net_device *netdev, 1256 struct ethtool_ringparam *ering) 1257 { 1258 u32 rx_count, tx_count, rx_count_old, tx_count_old; 1259 struct lio *lio = GET_LIO(netdev); 1260 struct octeon_device *oct = lio->oct_dev; 1261 int stopped = 0; 1262 1263 if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct)) 1264 return -EINVAL; 1265 1266 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 1267 return -EINVAL; 1268 1269 rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS, 1270 CN23XX_MAX_OQ_DESCRIPTORS); 1271 tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS, 1272 CN23XX_MAX_IQ_DESCRIPTORS); 1273 1274 rx_count_old = oct->droq[0]->max_count; 1275 tx_count_old = oct->instr_queue[0]->max_count; 1276 1277 if (rx_count == rx_count_old && tx_count == tx_count_old) 1278 return 0; 1279 1280 ifstate_set(lio, LIO_IFSTATE_RESETTING); 1281 1282 if (netif_running(netdev)) { 1283 netdev->netdev_ops->ndo_stop(netdev); 1284 stopped = 1; 1285 } 1286 1287 /* Change RX/TX DESCS count */ 1288 if (tx_count != tx_count_old) 1289 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1290 tx_count); 1291 if (rx_count != rx_count_old) 1292 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1293 rx_count); 1294 1295 if (lio_reset_queues(netdev, oct->num_iqs)) 1296 goto err_lio_reset_queues; 1297 1298 if (stopped) 1299 netdev->netdev_ops->ndo_open(netdev); 1300 1301 ifstate_reset(lio, LIO_IFSTATE_RESETTING); 1302 1303 return 0; 1304 1305 err_lio_reset_queues: 1306 if (tx_count != tx_count_old) 1307 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1308 tx_count_old); 1309 if (rx_count != rx_count_old) 1310 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1311 rx_count_old); 1312 return -EINVAL; 1313 } 1314 1315 static u32 lio_get_msglevel(struct net_device *netdev) 1316 { 1317 struct lio *lio = GET_LIO(netdev); 1318 1319 return lio->msg_enable; 1320 } 1321 1322 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) 1323 { 1324 struct lio *lio = GET_LIO(netdev); 1325 1326 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { 1327 if (msglvl & NETIF_MSG_HW) 1328 liquidio_set_feature(netdev, 1329 OCTNET_CMD_VERBOSE_ENABLE, 0); 1330 else 1331 liquidio_set_feature(netdev, 1332 OCTNET_CMD_VERBOSE_DISABLE, 0); 1333 } 1334 1335 lio->msg_enable = msglvl; 1336 } 1337 1338 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl) 1339 { 1340 struct lio *lio = GET_LIO(netdev); 1341 1342 lio->msg_enable = msglvl; 1343 } 1344 1345 static void 1346 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 1347 { 1348 /* Notes: Not supporting any auto negotiation in these 1349 * drivers. Just report pause frame support. 1350 */ 1351 struct lio *lio = GET_LIO(netdev); 1352 struct octeon_device *oct = lio->oct_dev; 1353 1354 pause->autoneg = 0; 1355 1356 pause->tx_pause = oct->tx_pause; 1357 pause->rx_pause = oct->rx_pause; 1358 } 1359 1360 static int 1361 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 1362 { 1363 /* Notes: Not supporting any auto negotiation in these 1364 * drivers. 1365 */ 1366 struct lio *lio = GET_LIO(netdev); 1367 struct octeon_device *oct = lio->oct_dev; 1368 struct octnic_ctrl_pkt nctrl; 1369 struct oct_link_info *linfo = &lio->linfo; 1370 1371 int ret = 0; 1372 1373 if (oct->chip_id != OCTEON_CN23XX_PF_VID) 1374 return -EINVAL; 1375 1376 if (linfo->link.s.duplex == 0) { 1377 /*no flow control for half duplex*/ 1378 if (pause->rx_pause || pause->tx_pause) 1379 return -EINVAL; 1380 } 1381 1382 /*do not support autoneg of link flow control*/ 1383 if (pause->autoneg == AUTONEG_ENABLE) 1384 return -EINVAL; 1385 1386 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1387 1388 nctrl.ncmd.u64 = 0; 1389 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL; 1390 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1391 nctrl.netpndev = (u64)netdev; 1392 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1393 1394 if (pause->rx_pause) { 1395 /*enable rx pause*/ 1396 nctrl.ncmd.s.param1 = 1; 1397 } else { 1398 /*disable rx pause*/ 1399 nctrl.ncmd.s.param1 = 0; 1400 } 1401 1402 if (pause->tx_pause) { 1403 /*enable tx pause*/ 1404 nctrl.ncmd.s.param2 = 1; 1405 } else { 1406 /*disable tx pause*/ 1407 nctrl.ncmd.s.param2 = 0; 1408 } 1409 1410 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1411 if (ret) { 1412 dev_err(&oct->pci_dev->dev, 1413 "Failed to set pause parameter, ret=%d\n", ret); 1414 return -EINVAL; 1415 } 1416 1417 oct->rx_pause = pause->rx_pause; 1418 oct->tx_pause = pause->tx_pause; 1419 1420 return 0; 1421 } 1422 1423 static void 1424 lio_get_ethtool_stats(struct net_device *netdev, 1425 struct ethtool_stats *stats __attribute__((unused)), 1426 u64 *data) 1427 { 1428 struct lio *lio = GET_LIO(netdev); 1429 struct octeon_device *oct_dev = lio->oct_dev; 1430 struct rtnl_link_stats64 lstats; 1431 int i = 0, j; 1432 1433 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1434 return; 1435 1436 netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); 1437 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1438 data[i++] = lstats.rx_packets; 1439 /*sum of oct->instr_queue[iq_no]->stats.tx_done */ 1440 data[i++] = lstats.tx_packets; 1441 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1442 data[i++] = lstats.rx_bytes; 1443 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1444 data[i++] = lstats.tx_bytes; 1445 data[i++] = lstats.rx_errors + 1446 oct_dev->link_stats.fromwire.fcs_err + 1447 oct_dev->link_stats.fromwire.jabber_err + 1448 oct_dev->link_stats.fromwire.l2_err + 1449 oct_dev->link_stats.fromwire.frame_err; 1450 data[i++] = lstats.tx_errors; 1451 /*sum of oct->droq[oq_no]->stats->rx_dropped + 1452 *oct->droq[oq_no]->stats->dropped_nodispatch + 1453 *oct->droq[oq_no]->stats->dropped_toomany + 1454 *oct->droq[oq_no]->stats->dropped_nomem 1455 */ 1456 data[i++] = lstats.rx_dropped + 1457 oct_dev->link_stats.fromwire.fifo_err + 1458 oct_dev->link_stats.fromwire.dmac_drop + 1459 oct_dev->link_stats.fromwire.red_drops + 1460 oct_dev->link_stats.fromwire.fw_err_pko + 1461 oct_dev->link_stats.fromwire.fw_err_link + 1462 oct_dev->link_stats.fromwire.fw_err_drop; 1463 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1464 data[i++] = lstats.tx_dropped + 1465 oct_dev->link_stats.fromhost.max_collision_fail + 1466 oct_dev->link_stats.fromhost.max_deferral_fail + 1467 oct_dev->link_stats.fromhost.total_collisions + 1468 oct_dev->link_stats.fromhost.fw_err_pko + 1469 oct_dev->link_stats.fromhost.fw_err_link + 1470 oct_dev->link_stats.fromhost.fw_err_drop + 1471 oct_dev->link_stats.fromhost.fw_err_pki; 1472 1473 /* firmware tx stats */ 1474 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. 1475 *fromhost.fw_total_sent 1476 */ 1477 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent); 1478 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */ 1479 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); 1480 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ 1481 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); 1482 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */ 1483 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki); 1484 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ 1485 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); 1486 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1487 *fw_err_drop 1488 */ 1489 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop); 1490 1491 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */ 1492 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso); 1493 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1494 *fw_tso_fwd 1495 */ 1496 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd); 1497 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1498 *fw_err_tso 1499 */ 1500 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); 1501 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1502 *fw_tx_vxlan 1503 */ 1504 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); 1505 1506 /* Multicast packets sent by this port */ 1507 data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; 1508 data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; 1509 1510 /* mac tx statistics */ 1511 /*CVMX_BGXX_CMRX_TX_STAT5 */ 1512 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); 1513 /*CVMX_BGXX_CMRX_TX_STAT4 */ 1514 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent); 1515 /*CVMX_BGXX_CMRX_TX_STAT15 */ 1516 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent); 1517 /*CVMX_BGXX_CMRX_TX_STAT14 */ 1518 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent); 1519 /*CVMX_BGXX_CMRX_TX_STAT17 */ 1520 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent); 1521 /*CVMX_BGXX_CMRX_TX_STAT0 */ 1522 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions); 1523 /*CVMX_BGXX_CMRX_TX_STAT3 */ 1524 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent); 1525 /*CVMX_BGXX_CMRX_TX_STAT2 */ 1526 data[i++] = 1527 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent); 1528 /*CVMX_BGXX_CMRX_TX_STAT0 */ 1529 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail); 1530 /*CVMX_BGXX_CMRX_TX_STAT1 */ 1531 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail); 1532 /*CVMX_BGXX_CMRX_TX_STAT16 */ 1533 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err); 1534 /*CVMX_BGXX_CMRX_TX_STAT6 */ 1535 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts); 1536 1537 /* RX firmware stats */ 1538 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1539 *fw_total_rcvd 1540 */ 1541 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd); 1542 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1543 *fw_total_fwd 1544 */ 1545 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); 1546 /* Multicast packets received on this port */ 1547 data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; 1548 data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; 1549 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ 1550 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); 1551 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ 1552 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err); 1553 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */ 1554 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err); 1555 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1556 *fw_err_pko 1557 */ 1558 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko); 1559 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */ 1560 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link); 1561 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1562 *fromwire.fw_err_drop 1563 */ 1564 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); 1565 1566 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1567 *fromwire.fw_rx_vxlan 1568 */ 1569 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan); 1570 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1571 *fromwire.fw_rx_vxlan_err 1572 */ 1573 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err); 1574 1575 /* LRO */ 1576 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1577 *fw_lro_pkts 1578 */ 1579 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts); 1580 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1581 *fw_lro_octs 1582 */ 1583 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs); 1584 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */ 1585 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro); 1586 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 1587 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts); 1588 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1589 *fw_lro_aborts_port 1590 */ 1591 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port); 1592 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1593 *fw_lro_aborts_seq 1594 */ 1595 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq); 1596 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1597 *fw_lro_aborts_tsval 1598 */ 1599 data[i++] = 1600 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval); 1601 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1602 *fw_lro_aborts_timer 1603 */ 1604 /* intrmod: packet forward rate */ 1605 data[i++] = 1606 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer); 1607 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 1608 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate); 1609 1610 /* mac: link-level stats */ 1611 /*CVMX_BGXX_CMRX_RX_STAT0 */ 1612 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd); 1613 /*CVMX_BGXX_CMRX_RX_STAT1 */ 1614 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd); 1615 /*CVMX_PKI_STATX_STAT5 */ 1616 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst); 1617 /*CVMX_PKI_STATX_STAT5 */ 1618 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst); 1619 /*wqe->word2.err_code or wqe->word2.err_level */ 1620 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts); 1621 /*CVMX_BGXX_CMRX_RX_STAT2 */ 1622 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd); 1623 /*CVMX_BGXX_CMRX_RX_STAT6 */ 1624 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err); 1625 /*CVMX_BGXX_CMRX_RX_STAT4 */ 1626 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop); 1627 /*wqe->word2.err_code or wqe->word2.err_level */ 1628 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err); 1629 /*lio->link_changes*/ 1630 data[i++] = CVM_CAST64(lio->link_changes); 1631 1632 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { 1633 if (!(oct_dev->io_qmask.iq & BIT_ULL(j))) 1634 continue; 1635 /*packets to network port*/ 1636 /*# of packets tx to network */ 1637 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1638 /*# of bytes tx to network */ 1639 data[i++] = 1640 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1641 /*# of packets dropped */ 1642 data[i++] = 1643 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); 1644 /*# of tx fails due to queue full */ 1645 data[i++] = 1646 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); 1647 /*XXX gather entries sent */ 1648 data[i++] = 1649 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); 1650 1651 /*instruction to firmware: data and control */ 1652 /*# of instructions to the queue */ 1653 data[i++] = 1654 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); 1655 /*# of instructions processed */ 1656 data[i++] = CVM_CAST64( 1657 oct_dev->instr_queue[j]->stats.instr_processed); 1658 /*# of instructions could not be processed */ 1659 data[i++] = CVM_CAST64( 1660 oct_dev->instr_queue[j]->stats.instr_dropped); 1661 /*bytes sent through the queue */ 1662 data[i++] = 1663 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); 1664 1665 /*tso request*/ 1666 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1667 /*vxlan request*/ 1668 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1669 /*txq restart*/ 1670 data[i++] = 1671 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); 1672 } 1673 1674 /* RX */ 1675 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { 1676 if (!(oct_dev->io_qmask.oq & BIT_ULL(j))) 1677 continue; 1678 1679 /*packets send to TCP/IP network stack */ 1680 /*# of packets to network stack */ 1681 data[i++] = 1682 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); 1683 /*# of bytes to network stack */ 1684 data[i++] = 1685 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); 1686 /*# of packets dropped */ 1687 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1688 oct_dev->droq[j]->stats.dropped_toomany + 1689 oct_dev->droq[j]->stats.rx_dropped); 1690 data[i++] = 1691 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1692 data[i++] = 1693 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1694 data[i++] = 1695 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1696 1697 /*control and data path*/ 1698 data[i++] = 1699 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1700 data[i++] = 1701 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1702 data[i++] = 1703 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1704 1705 data[i++] = 1706 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1707 data[i++] = 1708 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1709 } 1710 } 1711 1712 static void lio_vf_get_ethtool_stats(struct net_device *netdev, 1713 struct ethtool_stats *stats 1714 __attribute__((unused)), 1715 u64 *data) 1716 { 1717 struct rtnl_link_stats64 lstats; 1718 struct lio *lio = GET_LIO(netdev); 1719 struct octeon_device *oct_dev = lio->oct_dev; 1720 int i = 0, j, vj; 1721 1722 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1723 return; 1724 1725 netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); 1726 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1727 data[i++] = lstats.rx_packets; 1728 /* sum of oct->instr_queue[iq_no]->stats.tx_done */ 1729 data[i++] = lstats.tx_packets; 1730 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1731 data[i++] = lstats.rx_bytes; 1732 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1733 data[i++] = lstats.tx_bytes; 1734 data[i++] = lstats.rx_errors; 1735 data[i++] = lstats.tx_errors; 1736 /* sum of oct->droq[oq_no]->stats->rx_dropped + 1737 * oct->droq[oq_no]->stats->dropped_nodispatch + 1738 * oct->droq[oq_no]->stats->dropped_toomany + 1739 * oct->droq[oq_no]->stats->dropped_nomem 1740 */ 1741 data[i++] = lstats.rx_dropped; 1742 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1743 data[i++] = lstats.tx_dropped + 1744 oct_dev->link_stats.fromhost.fw_err_drop; 1745 1746 data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; 1747 data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; 1748 data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; 1749 data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; 1750 1751 /* lio->link_changes */ 1752 data[i++] = CVM_CAST64(lio->link_changes); 1753 1754 for (vj = 0; vj < oct_dev->num_iqs; vj++) { 1755 j = lio->linfo.txpciq[vj].s.q_no; 1756 1757 /* packets to network port */ 1758 /* # of packets tx to network */ 1759 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1760 /* # of bytes tx to network */ 1761 data[i++] = CVM_CAST64( 1762 oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1763 /* # of packets dropped */ 1764 data[i++] = CVM_CAST64( 1765 oct_dev->instr_queue[j]->stats.tx_dropped); 1766 /* # of tx fails due to queue full */ 1767 data[i++] = CVM_CAST64( 1768 oct_dev->instr_queue[j]->stats.tx_iq_busy); 1769 /* XXX gather entries sent */ 1770 data[i++] = CVM_CAST64( 1771 oct_dev->instr_queue[j]->stats.sgentry_sent); 1772 1773 /* instruction to firmware: data and control */ 1774 /* # of instructions to the queue */ 1775 data[i++] = CVM_CAST64( 1776 oct_dev->instr_queue[j]->stats.instr_posted); 1777 /* # of instructions processed */ 1778 data[i++] = 1779 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed); 1780 /* # of instructions could not be processed */ 1781 data[i++] = 1782 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped); 1783 /* bytes sent through the queue */ 1784 data[i++] = CVM_CAST64( 1785 oct_dev->instr_queue[j]->stats.bytes_sent); 1786 /* tso request */ 1787 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1788 /* vxlan request */ 1789 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1790 /* txq restart */ 1791 data[i++] = CVM_CAST64( 1792 oct_dev->instr_queue[j]->stats.tx_restart); 1793 } 1794 1795 /* RX */ 1796 for (vj = 0; vj < oct_dev->num_oqs; vj++) { 1797 j = lio->linfo.rxpciq[vj].s.q_no; 1798 1799 /* packets send to TCP/IP network stack */ 1800 /* # of packets to network stack */ 1801 data[i++] = CVM_CAST64( 1802 oct_dev->droq[j]->stats.rx_pkts_received); 1803 /* # of bytes to network stack */ 1804 data[i++] = CVM_CAST64( 1805 oct_dev->droq[j]->stats.rx_bytes_received); 1806 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1807 oct_dev->droq[j]->stats.dropped_toomany + 1808 oct_dev->droq[j]->stats.rx_dropped); 1809 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1810 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1811 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1812 1813 /* control and data path */ 1814 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1815 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1816 data[i++] = 1817 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1818 1819 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1820 data[i++] = 1821 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1822 } 1823 } 1824 1825 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) 1826 { 1827 struct octeon_device *oct_dev = lio->oct_dev; 1828 int i; 1829 1830 switch (oct_dev->chip_id) { 1831 case OCTEON_CN23XX_PF_VID: 1832 case OCTEON_CN23XX_VF_VID: 1833 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) { 1834 sprintf(data, "%s", oct_priv_flags_strings[i]); 1835 data += ETH_GSTRING_LEN; 1836 } 1837 break; 1838 case OCTEON_CN68XX: 1839 case OCTEON_CN66XX: 1840 break; 1841 default: 1842 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1843 break; 1844 } 1845 } 1846 1847 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 1848 { 1849 struct lio *lio = GET_LIO(netdev); 1850 struct octeon_device *oct_dev = lio->oct_dev; 1851 int num_iq_stats, num_oq_stats, i, j; 1852 int num_stats; 1853 1854 switch (stringset) { 1855 case ETH_SS_STATS: 1856 num_stats = ARRAY_SIZE(oct_stats_strings); 1857 for (j = 0; j < num_stats; j++) { 1858 sprintf(data, "%s", oct_stats_strings[j]); 1859 data += ETH_GSTRING_LEN; 1860 } 1861 1862 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1863 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1864 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1865 continue; 1866 for (j = 0; j < num_iq_stats; j++) { 1867 sprintf(data, "tx-%d-%s", i, 1868 oct_iq_stats_strings[j]); 1869 data += ETH_GSTRING_LEN; 1870 } 1871 } 1872 1873 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1874 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1875 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1876 continue; 1877 for (j = 0; j < num_oq_stats; j++) { 1878 sprintf(data, "rx-%d-%s", i, 1879 oct_droq_stats_strings[j]); 1880 data += ETH_GSTRING_LEN; 1881 } 1882 } 1883 break; 1884 1885 case ETH_SS_PRIV_FLAGS: 1886 lio_get_priv_flags_strings(lio, data); 1887 break; 1888 default: 1889 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1890 break; 1891 } 1892 } 1893 1894 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset, 1895 u8 *data) 1896 { 1897 int num_iq_stats, num_oq_stats, i, j; 1898 struct lio *lio = GET_LIO(netdev); 1899 struct octeon_device *oct_dev = lio->oct_dev; 1900 int num_stats; 1901 1902 switch (stringset) { 1903 case ETH_SS_STATS: 1904 num_stats = ARRAY_SIZE(oct_vf_stats_strings); 1905 for (j = 0; j < num_stats; j++) { 1906 sprintf(data, "%s", oct_vf_stats_strings[j]); 1907 data += ETH_GSTRING_LEN; 1908 } 1909 1910 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1911 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1912 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1913 continue; 1914 for (j = 0; j < num_iq_stats; j++) { 1915 sprintf(data, "tx-%d-%s", i, 1916 oct_iq_stats_strings[j]); 1917 data += ETH_GSTRING_LEN; 1918 } 1919 } 1920 1921 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1922 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1923 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1924 continue; 1925 for (j = 0; j < num_oq_stats; j++) { 1926 sprintf(data, "rx-%d-%s", i, 1927 oct_droq_stats_strings[j]); 1928 data += ETH_GSTRING_LEN; 1929 } 1930 } 1931 break; 1932 1933 case ETH_SS_PRIV_FLAGS: 1934 lio_get_priv_flags_strings(lio, data); 1935 break; 1936 default: 1937 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1938 break; 1939 } 1940 } 1941 1942 static int lio_get_priv_flags_ss_count(struct lio *lio) 1943 { 1944 struct octeon_device *oct_dev = lio->oct_dev; 1945 1946 switch (oct_dev->chip_id) { 1947 case OCTEON_CN23XX_PF_VID: 1948 case OCTEON_CN23XX_VF_VID: 1949 return ARRAY_SIZE(oct_priv_flags_strings); 1950 case OCTEON_CN68XX: 1951 case OCTEON_CN66XX: 1952 return -EOPNOTSUPP; 1953 default: 1954 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1955 return -EOPNOTSUPP; 1956 } 1957 } 1958 1959 static int lio_get_sset_count(struct net_device *netdev, int sset) 1960 { 1961 struct lio *lio = GET_LIO(netdev); 1962 struct octeon_device *oct_dev = lio->oct_dev; 1963 1964 switch (sset) { 1965 case ETH_SS_STATS: 1966 return (ARRAY_SIZE(oct_stats_strings) + 1967 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1968 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1969 case ETH_SS_PRIV_FLAGS: 1970 return lio_get_priv_flags_ss_count(lio); 1971 default: 1972 return -EOPNOTSUPP; 1973 } 1974 } 1975 1976 static int lio_vf_get_sset_count(struct net_device *netdev, int sset) 1977 { 1978 struct lio *lio = GET_LIO(netdev); 1979 struct octeon_device *oct_dev = lio->oct_dev; 1980 1981 switch (sset) { 1982 case ETH_SS_STATS: 1983 return (ARRAY_SIZE(oct_vf_stats_strings) + 1984 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1985 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1986 case ETH_SS_PRIV_FLAGS: 1987 return lio_get_priv_flags_ss_count(lio); 1988 default: 1989 return -EOPNOTSUPP; 1990 } 1991 } 1992 1993 /* get interrupt moderation parameters */ 1994 static int octnet_get_intrmod_cfg(struct lio *lio, 1995 struct oct_intrmod_cfg *intr_cfg) 1996 { 1997 struct octeon_soft_command *sc; 1998 struct oct_intrmod_resp *resp; 1999 int retval; 2000 struct octeon_device *oct_dev = lio->oct_dev; 2001 2002 /* Alloc soft command */ 2003 sc = (struct octeon_soft_command *) 2004 octeon_alloc_soft_command(oct_dev, 2005 0, 2006 sizeof(struct oct_intrmod_resp), 0); 2007 2008 if (!sc) 2009 return -ENOMEM; 2010 2011 resp = (struct oct_intrmod_resp *)sc->virtrptr; 2012 memset(resp, 0, sizeof(struct oct_intrmod_resp)); 2013 2014 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 2015 2016 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 2017 OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); 2018 2019 init_completion(&sc->complete); 2020 sc->sc_status = OCTEON_REQUEST_PENDING; 2021 2022 retval = octeon_send_soft_command(oct_dev, sc); 2023 if (retval == IQ_SEND_FAILED) { 2024 octeon_free_soft_command(oct_dev, sc); 2025 return -EINVAL; 2026 } 2027 2028 /* Sleep on a wait queue till the cond flag indicates that the 2029 * response arrived or timed-out. 2030 */ 2031 retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); 2032 if (retval) 2033 return -ENODEV; 2034 2035 if (resp->status) { 2036 dev_err(&oct_dev->pci_dev->dev, 2037 "Get interrupt moderation parameters failed\n"); 2038 WRITE_ONCE(sc->caller_is_done, true); 2039 return -ENODEV; 2040 } 2041 2042 octeon_swap_8B_data((u64 *)&resp->intrmod, 2043 (sizeof(struct oct_intrmod_cfg)) / 8); 2044 memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg)); 2045 WRITE_ONCE(sc->caller_is_done, true); 2046 2047 return 0; 2048 } 2049 2050 /* Configure interrupt moderation parameters */ 2051 static int octnet_set_intrmod_cfg(struct lio *lio, 2052 struct oct_intrmod_cfg *intr_cfg) 2053 { 2054 struct octeon_soft_command *sc; 2055 struct oct_intrmod_cfg *cfg; 2056 int retval; 2057 struct octeon_device *oct_dev = lio->oct_dev; 2058 2059 /* Alloc soft command */ 2060 sc = (struct octeon_soft_command *) 2061 octeon_alloc_soft_command(oct_dev, 2062 sizeof(struct oct_intrmod_cfg), 2063 16, 0); 2064 2065 if (!sc) 2066 return -ENOMEM; 2067 2068 cfg = (struct oct_intrmod_cfg *)sc->virtdptr; 2069 2070 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); 2071 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8); 2072 2073 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 2074 2075 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 2076 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); 2077 2078 init_completion(&sc->complete); 2079 sc->sc_status = OCTEON_REQUEST_PENDING; 2080 2081 retval = octeon_send_soft_command(oct_dev, sc); 2082 if (retval == IQ_SEND_FAILED) { 2083 octeon_free_soft_command(oct_dev, sc); 2084 return -EINVAL; 2085 } 2086 2087 /* Sleep on a wait queue till the cond flag indicates that the 2088 * response arrived or timed-out. 2089 */ 2090 retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); 2091 if (retval) 2092 return retval; 2093 2094 retval = sc->sc_status; 2095 if (retval == 0) { 2096 dev_info(&oct_dev->pci_dev->dev, 2097 "Rx-Adaptive Interrupt moderation %s\n", 2098 (intr_cfg->rx_enable) ? 2099 "enabled" : "disabled"); 2100 WRITE_ONCE(sc->caller_is_done, true); 2101 return 0; 2102 } 2103 2104 dev_err(&oct_dev->pci_dev->dev, 2105 "intrmod config failed. Status: %x\n", retval); 2106 WRITE_ONCE(sc->caller_is_done, true); 2107 return -ENODEV; 2108 } 2109 2110 static int lio_get_intr_coalesce(struct net_device *netdev, 2111 struct ethtool_coalesce *intr_coal) 2112 { 2113 struct lio *lio = GET_LIO(netdev); 2114 struct octeon_device *oct = lio->oct_dev; 2115 struct octeon_instr_queue *iq; 2116 struct oct_intrmod_cfg intrmod_cfg; 2117 2118 if (octnet_get_intrmod_cfg(lio, &intrmod_cfg)) 2119 return -ENODEV; 2120 2121 switch (oct->chip_id) { 2122 case OCTEON_CN23XX_PF_VID: 2123 case OCTEON_CN23XX_VF_VID: { 2124 if (!intrmod_cfg.rx_enable) { 2125 intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs; 2126 intr_coal->rx_max_coalesced_frames = 2127 oct->rx_max_coalesced_frames; 2128 } 2129 if (!intrmod_cfg.tx_enable) 2130 intr_coal->tx_max_coalesced_frames = 2131 oct->tx_max_coalesced_frames; 2132 break; 2133 } 2134 case OCTEON_CN68XX: 2135 case OCTEON_CN66XX: { 2136 struct octeon_cn6xxx *cn6xxx = 2137 (struct octeon_cn6xxx *)oct->chip; 2138 2139 if (!intrmod_cfg.rx_enable) { 2140 intr_coal->rx_coalesce_usecs = 2141 CFG_GET_OQ_INTR_TIME(cn6xxx->conf); 2142 intr_coal->rx_max_coalesced_frames = 2143 CFG_GET_OQ_INTR_PKT(cn6xxx->conf); 2144 } 2145 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; 2146 intr_coal->tx_max_coalesced_frames = iq->fill_threshold; 2147 break; 2148 } 2149 default: 2150 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 2151 return -EINVAL; 2152 } 2153 if (intrmod_cfg.rx_enable) { 2154 intr_coal->use_adaptive_rx_coalesce = 2155 intrmod_cfg.rx_enable; 2156 intr_coal->rate_sample_interval = 2157 intrmod_cfg.check_intrvl; 2158 intr_coal->pkt_rate_high = 2159 intrmod_cfg.maxpkt_ratethr; 2160 intr_coal->pkt_rate_low = 2161 intrmod_cfg.minpkt_ratethr; 2162 intr_coal->rx_max_coalesced_frames_high = 2163 intrmod_cfg.rx_maxcnt_trigger; 2164 intr_coal->rx_coalesce_usecs_high = 2165 intrmod_cfg.rx_maxtmr_trigger; 2166 intr_coal->rx_coalesce_usecs_low = 2167 intrmod_cfg.rx_mintmr_trigger; 2168 intr_coal->rx_max_coalesced_frames_low = 2169 intrmod_cfg.rx_mincnt_trigger; 2170 } 2171 if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) && 2172 (intrmod_cfg.tx_enable)) { 2173 intr_coal->use_adaptive_tx_coalesce = 2174 intrmod_cfg.tx_enable; 2175 intr_coal->tx_max_coalesced_frames_high = 2176 intrmod_cfg.tx_maxcnt_trigger; 2177 intr_coal->tx_max_coalesced_frames_low = 2178 intrmod_cfg.tx_mincnt_trigger; 2179 } 2180 return 0; 2181 } 2182 2183 /* Enable/Disable auto interrupt Moderation */ 2184 static int oct_cfg_adaptive_intr(struct lio *lio, 2185 struct oct_intrmod_cfg *intrmod_cfg, 2186 struct ethtool_coalesce *intr_coal) 2187 { 2188 int ret = 0; 2189 2190 if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) { 2191 intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval; 2192 intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high; 2193 intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low; 2194 } 2195 if (intrmod_cfg->rx_enable) { 2196 intrmod_cfg->rx_maxcnt_trigger = 2197 intr_coal->rx_max_coalesced_frames_high; 2198 intrmod_cfg->rx_maxtmr_trigger = 2199 intr_coal->rx_coalesce_usecs_high; 2200 intrmod_cfg->rx_mintmr_trigger = 2201 intr_coal->rx_coalesce_usecs_low; 2202 intrmod_cfg->rx_mincnt_trigger = 2203 intr_coal->rx_max_coalesced_frames_low; 2204 } 2205 if (intrmod_cfg->tx_enable) { 2206 intrmod_cfg->tx_maxcnt_trigger = 2207 intr_coal->tx_max_coalesced_frames_high; 2208 intrmod_cfg->tx_mincnt_trigger = 2209 intr_coal->tx_max_coalesced_frames_low; 2210 } 2211 2212 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); 2213 2214 return ret; 2215 } 2216 2217 static int 2218 oct_cfg_rx_intrcnt(struct lio *lio, 2219 struct oct_intrmod_cfg *intrmod, 2220 struct ethtool_coalesce *intr_coal) 2221 { 2222 struct octeon_device *oct = lio->oct_dev; 2223 u32 rx_max_coalesced_frames; 2224 2225 /* Config Cnt based interrupt values */ 2226 switch (oct->chip_id) { 2227 case OCTEON_CN68XX: 2228 case OCTEON_CN66XX: { 2229 struct octeon_cn6xxx *cn6xxx = 2230 (struct octeon_cn6xxx *)oct->chip; 2231 2232 if (!intr_coal->rx_max_coalesced_frames) 2233 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; 2234 else 2235 rx_max_coalesced_frames = 2236 intr_coal->rx_max_coalesced_frames; 2237 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, 2238 rx_max_coalesced_frames); 2239 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); 2240 break; 2241 } 2242 case OCTEON_CN23XX_PF_VID: { 2243 int q_no; 2244 2245 if (!intr_coal->rx_max_coalesced_frames) 2246 rx_max_coalesced_frames = intrmod->rx_frames; 2247 else 2248 rx_max_coalesced_frames = 2249 intr_coal->rx_max_coalesced_frames; 2250 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2251 q_no += oct->sriov_info.pf_srn; 2252 octeon_write_csr64( 2253 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 2254 (octeon_read_csr64( 2255 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) & 2256 (0x3fffff00000000UL)) | 2257 (rx_max_coalesced_frames - 1)); 2258 /*consider setting resend bit*/ 2259 } 2260 intrmod->rx_frames = rx_max_coalesced_frames; 2261 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 2262 break; 2263 } 2264 case OCTEON_CN23XX_VF_VID: { 2265 int q_no; 2266 2267 if (!intr_coal->rx_max_coalesced_frames) 2268 rx_max_coalesced_frames = intrmod->rx_frames; 2269 else 2270 rx_max_coalesced_frames = 2271 intr_coal->rx_max_coalesced_frames; 2272 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2273 octeon_write_csr64( 2274 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 2275 (octeon_read_csr64( 2276 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) & 2277 (0x3fffff00000000UL)) | 2278 (rx_max_coalesced_frames - 1)); 2279 /*consider writing to resend bit here*/ 2280 } 2281 intrmod->rx_frames = rx_max_coalesced_frames; 2282 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 2283 break; 2284 } 2285 default: 2286 return -EINVAL; 2287 } 2288 return 0; 2289 } 2290 2291 static int oct_cfg_rx_intrtime(struct lio *lio, 2292 struct oct_intrmod_cfg *intrmod, 2293 struct ethtool_coalesce *intr_coal) 2294 { 2295 struct octeon_device *oct = lio->oct_dev; 2296 u32 time_threshold, rx_coalesce_usecs; 2297 2298 /* Config Time based interrupt values */ 2299 switch (oct->chip_id) { 2300 case OCTEON_CN68XX: 2301 case OCTEON_CN66XX: { 2302 struct octeon_cn6xxx *cn6xxx = 2303 (struct octeon_cn6xxx *)oct->chip; 2304 if (!intr_coal->rx_coalesce_usecs) 2305 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; 2306 else 2307 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2308 2309 time_threshold = lio_cn6xxx_get_oq_ticks(oct, 2310 rx_coalesce_usecs); 2311 octeon_write_csr(oct, 2312 CN6XXX_SLI_OQ_INT_LEVEL_TIME, 2313 time_threshold); 2314 2315 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); 2316 break; 2317 } 2318 case OCTEON_CN23XX_PF_VID: { 2319 u64 time_threshold; 2320 int q_no; 2321 2322 if (!intr_coal->rx_coalesce_usecs) 2323 rx_coalesce_usecs = intrmod->rx_usecs; 2324 else 2325 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2326 time_threshold = 2327 cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 2328 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2329 q_no += oct->sriov_info.pf_srn; 2330 octeon_write_csr64(oct, 2331 CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 2332 (intrmod->rx_frames | 2333 ((u64)time_threshold << 32))); 2334 /*consider writing to resend bit here*/ 2335 } 2336 intrmod->rx_usecs = rx_coalesce_usecs; 2337 oct->rx_coalesce_usecs = rx_coalesce_usecs; 2338 break; 2339 } 2340 case OCTEON_CN23XX_VF_VID: { 2341 u64 time_threshold; 2342 int q_no; 2343 2344 if (!intr_coal->rx_coalesce_usecs) 2345 rx_coalesce_usecs = intrmod->rx_usecs; 2346 else 2347 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2348 2349 time_threshold = 2350 cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 2351 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2352 octeon_write_csr64( 2353 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 2354 (intrmod->rx_frames | 2355 ((u64)time_threshold << 32))); 2356 /*consider setting resend bit*/ 2357 } 2358 intrmod->rx_usecs = rx_coalesce_usecs; 2359 oct->rx_coalesce_usecs = rx_coalesce_usecs; 2360 break; 2361 } 2362 default: 2363 return -EINVAL; 2364 } 2365 2366 return 0; 2367 } 2368 2369 static int 2370 oct_cfg_tx_intrcnt(struct lio *lio, 2371 struct oct_intrmod_cfg *intrmod, 2372 struct ethtool_coalesce *intr_coal) 2373 { 2374 struct octeon_device *oct = lio->oct_dev; 2375 u32 iq_intr_pkt; 2376 void __iomem *inst_cnt_reg; 2377 u64 val; 2378 2379 /* Config Cnt based interrupt values */ 2380 switch (oct->chip_id) { 2381 case OCTEON_CN68XX: 2382 case OCTEON_CN66XX: 2383 break; 2384 case OCTEON_CN23XX_VF_VID: 2385 case OCTEON_CN23XX_PF_VID: { 2386 int q_no; 2387 2388 if (!intr_coal->tx_max_coalesced_frames) 2389 iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD & 2390 CN23XX_PKT_IN_DONE_WMARK_MASK; 2391 else 2392 iq_intr_pkt = intr_coal->tx_max_coalesced_frames & 2393 CN23XX_PKT_IN_DONE_WMARK_MASK; 2394 for (q_no = 0; q_no < oct->num_iqs; q_no++) { 2395 inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg; 2396 val = readq(inst_cnt_reg); 2397 /*clear wmark and count.dont want to write count back*/ 2398 val = (val & 0xFFFF000000000000ULL) | 2399 ((u64)(iq_intr_pkt - 1) 2400 << CN23XX_PKT_IN_DONE_WMARK_BIT_POS); 2401 writeq(val, inst_cnt_reg); 2402 /*consider setting resend bit*/ 2403 } 2404 intrmod->tx_frames = iq_intr_pkt; 2405 oct->tx_max_coalesced_frames = iq_intr_pkt; 2406 break; 2407 } 2408 default: 2409 return -EINVAL; 2410 } 2411 return 0; 2412 } 2413 2414 static int lio_set_intr_coalesce(struct net_device *netdev, 2415 struct ethtool_coalesce *intr_coal) 2416 { 2417 struct lio *lio = GET_LIO(netdev); 2418 int ret; 2419 struct octeon_device *oct = lio->oct_dev; 2420 struct oct_intrmod_cfg intrmod = {0}; 2421 u32 j, q_no; 2422 int db_max, db_min; 2423 2424 switch (oct->chip_id) { 2425 case OCTEON_CN68XX: 2426 case OCTEON_CN66XX: 2427 db_min = CN6XXX_DB_MIN; 2428 db_max = CN6XXX_DB_MAX; 2429 if ((intr_coal->tx_max_coalesced_frames >= db_min) && 2430 (intr_coal->tx_max_coalesced_frames <= db_max)) { 2431 for (j = 0; j < lio->linfo.num_txpciq; j++) { 2432 q_no = lio->linfo.txpciq[j].s.q_no; 2433 oct->instr_queue[q_no]->fill_threshold = 2434 intr_coal->tx_max_coalesced_frames; 2435 } 2436 } else { 2437 dev_err(&oct->pci_dev->dev, 2438 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", 2439 intr_coal->tx_max_coalesced_frames, 2440 db_min, db_max); 2441 return -EINVAL; 2442 } 2443 break; 2444 case OCTEON_CN23XX_PF_VID: 2445 case OCTEON_CN23XX_VF_VID: 2446 break; 2447 default: 2448 return -EINVAL; 2449 } 2450 2451 intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; 2452 intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; 2453 intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 2454 intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 2455 intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 2456 2457 ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal); 2458 2459 if (!intr_coal->use_adaptive_rx_coalesce) { 2460 ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal); 2461 if (ret) 2462 goto ret_intrmod; 2463 2464 ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal); 2465 if (ret) 2466 goto ret_intrmod; 2467 } else { 2468 oct->rx_coalesce_usecs = 2469 CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 2470 oct->rx_max_coalesced_frames = 2471 CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 2472 } 2473 2474 if (!intr_coal->use_adaptive_tx_coalesce) { 2475 ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal); 2476 if (ret) 2477 goto ret_intrmod; 2478 } else { 2479 oct->tx_max_coalesced_frames = 2480 CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 2481 } 2482 2483 return 0; 2484 ret_intrmod: 2485 return ret; 2486 } 2487 2488 static int lio_get_ts_info(struct net_device *netdev, 2489 struct ethtool_ts_info *info) 2490 { 2491 struct lio *lio = GET_LIO(netdev); 2492 2493 info->so_timestamping = 2494 #ifdef PTP_HARDWARE_TIMESTAMPING 2495 SOF_TIMESTAMPING_TX_HARDWARE | 2496 SOF_TIMESTAMPING_RX_HARDWARE | 2497 SOF_TIMESTAMPING_RAW_HARDWARE | 2498 SOF_TIMESTAMPING_TX_SOFTWARE | 2499 #endif 2500 SOF_TIMESTAMPING_RX_SOFTWARE | 2501 SOF_TIMESTAMPING_SOFTWARE; 2502 2503 if (lio->ptp_clock) 2504 info->phc_index = ptp_clock_index(lio->ptp_clock); 2505 else 2506 info->phc_index = -1; 2507 2508 #ifdef PTP_HARDWARE_TIMESTAMPING 2509 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 2510 2511 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2512 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 2513 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 2514 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 2515 #endif 2516 2517 return 0; 2518 } 2519 2520 /* Return register dump len. */ 2521 static int lio_get_regs_len(struct net_device *dev) 2522 { 2523 struct lio *lio = GET_LIO(dev); 2524 struct octeon_device *oct = lio->oct_dev; 2525 2526 switch (oct->chip_id) { 2527 case OCTEON_CN23XX_PF_VID: 2528 return OCT_ETHTOOL_REGDUMP_LEN_23XX; 2529 case OCTEON_CN23XX_VF_VID: 2530 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF; 2531 default: 2532 return OCT_ETHTOOL_REGDUMP_LEN; 2533 } 2534 } 2535 2536 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct) 2537 { 2538 u32 reg; 2539 u8 pf_num = oct->pf_num; 2540 int len = 0; 2541 int i; 2542 2543 /* PCI Window Registers */ 2544 2545 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2546 2547 /*0x29030 or 0x29040*/ 2548 reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num); 2549 len += sprintf(s + len, 2550 "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n", 2551 reg, oct->pcie_port, oct->pf_num, 2552 (u64)octeon_read_csr64(oct, reg)); 2553 2554 /*0x27080 or 0x27090*/ 2555 reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); 2556 len += 2557 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n", 2558 reg, oct->pcie_port, oct->pf_num, 2559 (u64)octeon_read_csr64(oct, reg)); 2560 2561 /*0x27000 or 0x27010*/ 2562 reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); 2563 len += 2564 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n", 2565 reg, oct->pcie_port, oct->pf_num, 2566 (u64)octeon_read_csr64(oct, reg)); 2567 2568 /*0x29120*/ 2569 reg = 0x29120; 2570 len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg, 2571 (u64)octeon_read_csr64(oct, reg)); 2572 2573 /*0x27300*/ 2574 reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2575 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2576 len += sprintf( 2577 s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg, 2578 oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg)); 2579 2580 /*0x27200*/ 2581 reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2582 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2583 len += sprintf(s + len, 2584 "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n", 2585 reg, oct->pcie_port, oct->pf_num, 2586 (u64)octeon_read_csr64(oct, reg)); 2587 2588 /*29130*/ 2589 reg = CN23XX_SLI_PKT_CNT_INT; 2590 len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg, 2591 (u64)octeon_read_csr64(oct, reg)); 2592 2593 /*0x29140*/ 2594 reg = CN23XX_SLI_PKT_TIME_INT; 2595 len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg, 2596 (u64)octeon_read_csr64(oct, reg)); 2597 2598 /*0x29160*/ 2599 reg = 0x29160; 2600 len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg, 2601 (u64)octeon_read_csr64(oct, reg)); 2602 2603 /*0x29180*/ 2604 reg = CN23XX_SLI_OQ_WMARK; 2605 len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n", 2606 reg, (u64)octeon_read_csr64(oct, reg)); 2607 2608 /*0x291E0*/ 2609 reg = CN23XX_SLI_PKT_IOQ_RING_RST; 2610 len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg, 2611 (u64)octeon_read_csr64(oct, reg)); 2612 2613 /*0x29210*/ 2614 reg = CN23XX_SLI_GBL_CONTROL; 2615 len += sprintf(s + len, 2616 "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg, 2617 (u64)octeon_read_csr64(oct, reg)); 2618 2619 /*0x29220*/ 2620 reg = 0x29220; 2621 len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n", 2622 reg, (u64)octeon_read_csr64(oct, reg)); 2623 2624 /*PF only*/ 2625 if (pf_num == 0) { 2626 /*0x29260*/ 2627 reg = CN23XX_SLI_OUT_BP_EN_W1S; 2628 len += sprintf(s + len, 2629 "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n", 2630 reg, (u64)octeon_read_csr64(oct, reg)); 2631 } else if (pf_num == 1) { 2632 /*0x29270*/ 2633 reg = CN23XX_SLI_OUT_BP_EN2_W1S; 2634 len += sprintf(s + len, 2635 "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n", 2636 reg, (u64)octeon_read_csr64(oct, reg)); 2637 } 2638 2639 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2640 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i); 2641 len += 2642 sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2643 reg, i, (u64)octeon_read_csr64(oct, reg)); 2644 } 2645 2646 /*0x10040*/ 2647 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2648 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2649 len += sprintf(s + len, 2650 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2651 reg, i, (u64)octeon_read_csr64(oct, reg)); 2652 } 2653 2654 /*0x10080*/ 2655 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2656 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i); 2657 len += sprintf(s + len, 2658 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2659 reg, i, (u64)octeon_read_csr64(oct, reg)); 2660 } 2661 2662 /*0x10090*/ 2663 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2664 reg = CN23XX_SLI_OQ_SIZE(i); 2665 len += sprintf( 2666 s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2667 reg, i, (u64)octeon_read_csr64(oct, reg)); 2668 } 2669 2670 /*0x10050*/ 2671 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2672 reg = CN23XX_SLI_OQ_PKT_CONTROL(i); 2673 len += sprintf( 2674 s + len, 2675 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2676 reg, i, (u64)octeon_read_csr64(oct, reg)); 2677 } 2678 2679 /*0x10070*/ 2680 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2681 reg = CN23XX_SLI_OQ_BASE_ADDR64(i); 2682 len += sprintf(s + len, 2683 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2684 reg, i, (u64)octeon_read_csr64(oct, reg)); 2685 } 2686 2687 /*0x100a0*/ 2688 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2689 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i); 2690 len += sprintf(s + len, 2691 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2692 reg, i, (u64)octeon_read_csr64(oct, reg)); 2693 } 2694 2695 /*0x100b0*/ 2696 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2697 reg = CN23XX_SLI_OQ_PKTS_SENT(i); 2698 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2699 reg, i, (u64)octeon_read_csr64(oct, reg)); 2700 } 2701 2702 /*0x100c0*/ 2703 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2704 reg = 0x100c0 + i * CN23XX_OQ_OFFSET; 2705 len += sprintf(s + len, 2706 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2707 reg, i, (u64)octeon_read_csr64(oct, reg)); 2708 2709 /*0x10000*/ 2710 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2711 reg = CN23XX_SLI_IQ_PKT_CONTROL64(i); 2712 len += sprintf( 2713 s + len, 2714 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2715 reg, i, (u64)octeon_read_csr64(oct, reg)); 2716 } 2717 2718 /*0x10010*/ 2719 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2720 reg = CN23XX_SLI_IQ_BASE_ADDR64(i); 2721 len += sprintf( 2722 s + len, 2723 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg, 2724 i, (u64)octeon_read_csr64(oct, reg)); 2725 } 2726 2727 /*0x10020*/ 2728 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2729 reg = CN23XX_SLI_IQ_DOORBELL(i); 2730 len += sprintf( 2731 s + len, 2732 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2733 reg, i, (u64)octeon_read_csr64(oct, reg)); 2734 } 2735 2736 /*0x10030*/ 2737 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2738 reg = CN23XX_SLI_IQ_SIZE(i); 2739 len += sprintf( 2740 s + len, 2741 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2742 reg, i, (u64)octeon_read_csr64(oct, reg)); 2743 } 2744 2745 /*0x10040*/ 2746 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) 2747 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2748 len += sprintf(s + len, 2749 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2750 reg, i, (u64)octeon_read_csr64(oct, reg)); 2751 } 2752 2753 return len; 2754 } 2755 2756 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct) 2757 { 2758 int len = 0; 2759 u32 reg; 2760 int i; 2761 2762 /* PCI Window Registers */ 2763 2764 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2765 2766 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2767 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i); 2768 len += sprintf(s + len, 2769 "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2770 reg, i, (u64)octeon_read_csr64(oct, reg)); 2771 } 2772 2773 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2774 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2775 len += sprintf(s + len, 2776 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2777 reg, i, (u64)octeon_read_csr64(oct, reg)); 2778 } 2779 2780 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2781 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i); 2782 len += sprintf(s + len, 2783 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2784 reg, i, (u64)octeon_read_csr64(oct, reg)); 2785 } 2786 2787 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2788 reg = CN23XX_VF_SLI_OQ_SIZE(i); 2789 len += sprintf(s + len, 2790 "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2791 reg, i, (u64)octeon_read_csr64(oct, reg)); 2792 } 2793 2794 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2795 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i); 2796 len += sprintf(s + len, 2797 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2798 reg, i, (u64)octeon_read_csr64(oct, reg)); 2799 } 2800 2801 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2802 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i); 2803 len += sprintf(s + len, 2804 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2805 reg, i, (u64)octeon_read_csr64(oct, reg)); 2806 } 2807 2808 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2809 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i); 2810 len += sprintf(s + len, 2811 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2812 reg, i, (u64)octeon_read_csr64(oct, reg)); 2813 } 2814 2815 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2816 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i); 2817 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2818 reg, i, (u64)octeon_read_csr64(oct, reg)); 2819 } 2820 2821 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2822 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET; 2823 len += sprintf(s + len, 2824 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2825 reg, i, (u64)octeon_read_csr64(oct, reg)); 2826 } 2827 2828 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2829 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET; 2830 len += sprintf(s + len, 2831 "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n", 2832 reg, i, (u64)octeon_read_csr64(oct, reg)); 2833 } 2834 2835 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2836 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i); 2837 len += sprintf(s + len, 2838 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2839 reg, i, (u64)octeon_read_csr64(oct, reg)); 2840 } 2841 2842 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2843 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i); 2844 len += sprintf(s + len, 2845 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", 2846 reg, i, (u64)octeon_read_csr64(oct, reg)); 2847 } 2848 2849 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2850 reg = CN23XX_VF_SLI_IQ_DOORBELL(i); 2851 len += sprintf(s + len, 2852 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2853 reg, i, (u64)octeon_read_csr64(oct, reg)); 2854 } 2855 2856 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2857 reg = CN23XX_VF_SLI_IQ_SIZE(i); 2858 len += sprintf(s + len, 2859 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2860 reg, i, (u64)octeon_read_csr64(oct, reg)); 2861 } 2862 2863 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2864 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2865 len += sprintf(s + len, 2866 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2867 reg, i, (u64)octeon_read_csr64(oct, reg)); 2868 } 2869 2870 return len; 2871 } 2872 2873 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct) 2874 { 2875 u32 reg; 2876 int i, len = 0; 2877 2878 /* PCI Window Registers */ 2879 2880 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2881 reg = CN6XXX_WIN_WR_ADDR_LO; 2882 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n", 2883 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg)); 2884 reg = CN6XXX_WIN_WR_ADDR_HI; 2885 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n", 2886 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg)); 2887 reg = CN6XXX_WIN_RD_ADDR_LO; 2888 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n", 2889 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg)); 2890 reg = CN6XXX_WIN_RD_ADDR_HI; 2891 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n", 2892 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg)); 2893 reg = CN6XXX_WIN_WR_DATA_LO; 2894 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n", 2895 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg)); 2896 reg = CN6XXX_WIN_WR_DATA_HI; 2897 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n", 2898 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg)); 2899 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n", 2900 CN6XXX_WIN_WR_MASK_REG, 2901 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG)); 2902 2903 /* PCI Interrupt Register */ 2904 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n", 2905 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct, 2906 CN6XXX_SLI_INT_ENB64_PORT0)); 2907 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n", 2908 CN6XXX_SLI_INT_ENB64_PORT1, 2909 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1)); 2910 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64, 2911 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64)); 2912 2913 /* PCI Output queue registers */ 2914 for (i = 0; i < oct->num_oqs; i++) { 2915 reg = CN6XXX_SLI_OQ_PKTS_SENT(i); 2916 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n", 2917 reg, i, octeon_read_csr(oct, reg)); 2918 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i); 2919 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n", 2920 reg, i, octeon_read_csr(oct, reg)); 2921 } 2922 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS; 2923 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n", 2924 reg, octeon_read_csr(oct, reg)); 2925 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME; 2926 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n", 2927 reg, octeon_read_csr(oct, reg)); 2928 2929 /* PCI Input queue registers */ 2930 for (i = 0; i <= 3; i++) { 2931 u32 reg; 2932 2933 reg = CN6XXX_SLI_IQ_DOORBELL(i); 2934 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n", 2935 reg, i, octeon_read_csr(oct, reg)); 2936 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i); 2937 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n", 2938 reg, i, octeon_read_csr(oct, reg)); 2939 } 2940 2941 /* PCI DMA registers */ 2942 2943 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n", 2944 CN6XXX_DMA_CNT(0), 2945 octeon_read_csr(oct, CN6XXX_DMA_CNT(0))); 2946 reg = CN6XXX_DMA_PKT_INT_LEVEL(0); 2947 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n", 2948 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg)); 2949 reg = CN6XXX_DMA_TIME_INT_LEVEL(0); 2950 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n", 2951 CN6XXX_DMA_TIME_INT_LEVEL(0), 2952 octeon_read_csr(oct, reg)); 2953 2954 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n", 2955 CN6XXX_DMA_CNT(1), 2956 octeon_read_csr(oct, CN6XXX_DMA_CNT(1))); 2957 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 2958 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n", 2959 CN6XXX_DMA_PKT_INT_LEVEL(1), 2960 octeon_read_csr(oct, reg)); 2961 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 2962 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n", 2963 CN6XXX_DMA_TIME_INT_LEVEL(1), 2964 octeon_read_csr(oct, reg)); 2965 2966 /* PCI Index registers */ 2967 2968 len += sprintf(s + len, "\n"); 2969 2970 for (i = 0; i < 16; i++) { 2971 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port)); 2972 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n", 2973 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg); 2974 } 2975 2976 return len; 2977 } 2978 2979 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct) 2980 { 2981 u32 val; 2982 int i, len = 0; 2983 2984 /* PCI CONFIG Registers */ 2985 2986 len += sprintf(s + len, 2987 "\n\t Octeon Config space Registers\n\n"); 2988 2989 for (i = 0; i <= 13; i++) { 2990 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 2991 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 2992 (i * 4), i, val); 2993 } 2994 2995 for (i = 30; i <= 34; i++) { 2996 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 2997 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 2998 (i * 4), i, val); 2999 } 3000 3001 return len; 3002 } 3003 3004 /* Return register dump user app. */ 3005 static void lio_get_regs(struct net_device *dev, 3006 struct ethtool_regs *regs, void *regbuf) 3007 { 3008 struct lio *lio = GET_LIO(dev); 3009 int len = 0; 3010 struct octeon_device *oct = lio->oct_dev; 3011 3012 regs->version = OCT_ETHTOOL_REGSVER; 3013 3014 switch (oct->chip_id) { 3015 case OCTEON_CN23XX_PF_VID: 3016 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX); 3017 len += cn23xx_read_csr_reg(regbuf + len, oct); 3018 break; 3019 case OCTEON_CN23XX_VF_VID: 3020 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF); 3021 len += cn23xx_vf_read_csr_reg(regbuf + len, oct); 3022 break; 3023 case OCTEON_CN68XX: 3024 case OCTEON_CN66XX: 3025 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); 3026 len += cn6xxx_read_csr_reg(regbuf + len, oct); 3027 len += cn6xxx_read_config_reg(regbuf + len, oct); 3028 break; 3029 default: 3030 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n", 3031 __func__, oct->chip_id); 3032 } 3033 } 3034 3035 static u32 lio_get_priv_flags(struct net_device *netdev) 3036 { 3037 struct lio *lio = GET_LIO(netdev); 3038 3039 return lio->oct_dev->priv_flags; 3040 } 3041 3042 static int lio_set_priv_flags(struct net_device *netdev, u32 flags) 3043 { 3044 struct lio *lio = GET_LIO(netdev); 3045 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES)); 3046 3047 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES, 3048 intr_by_tx_bytes); 3049 return 0; 3050 } 3051 3052 static int lio_get_fecparam(struct net_device *netdev, 3053 struct ethtool_fecparam *fec) 3054 { 3055 struct lio *lio = GET_LIO(netdev); 3056 struct octeon_device *oct = lio->oct_dev; 3057 3058 fec->active_fec = ETHTOOL_FEC_NONE; 3059 fec->fec = ETHTOOL_FEC_NONE; 3060 3061 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 3062 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { 3063 if (oct->no_speed_setting == 1) 3064 return 0; 3065 3066 liquidio_get_fec(lio); 3067 fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF); 3068 if (oct->props[lio->ifidx].fec == 1) 3069 fec->active_fec = ETHTOOL_FEC_RS; 3070 else 3071 fec->active_fec = ETHTOOL_FEC_OFF; 3072 } 3073 3074 return 0; 3075 } 3076 3077 static int lio_set_fecparam(struct net_device *netdev, 3078 struct ethtool_fecparam *fec) 3079 { 3080 struct lio *lio = GET_LIO(netdev); 3081 struct octeon_device *oct = lio->oct_dev; 3082 3083 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 3084 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { 3085 if (oct->no_speed_setting == 1) 3086 return -EOPNOTSUPP; 3087 3088 if (fec->fec & ETHTOOL_FEC_OFF) 3089 liquidio_set_fec(lio, 0); 3090 else if (fec->fec & ETHTOOL_FEC_RS) 3091 liquidio_set_fec(lio, 1); 3092 else 3093 return -EOPNOTSUPP; 3094 } else { 3095 return -EOPNOTSUPP; 3096 } 3097 3098 return 0; 3099 } 3100 3101 #define LIO_ETHTOOL_COALESCE (ETHTOOL_COALESCE_RX_USECS | \ 3102 ETHTOOL_COALESCE_MAX_FRAMES | \ 3103 ETHTOOL_COALESCE_USE_ADAPTIVE | \ 3104 ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \ 3105 ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \ 3106 ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \ 3107 ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH | \ 3108 ETHTOOL_COALESCE_PKT_RATE_RX_USECS) 3109 3110 static const struct ethtool_ops lio_ethtool_ops = { 3111 .supported_coalesce_params = LIO_ETHTOOL_COALESCE, 3112 .get_link_ksettings = lio_get_link_ksettings, 3113 .set_link_ksettings = lio_set_link_ksettings, 3114 .get_fecparam = lio_get_fecparam, 3115 .set_fecparam = lio_set_fecparam, 3116 .get_link = ethtool_op_get_link, 3117 .get_drvinfo = lio_get_drvinfo, 3118 .get_ringparam = lio_ethtool_get_ringparam, 3119 .set_ringparam = lio_ethtool_set_ringparam, 3120 .get_channels = lio_ethtool_get_channels, 3121 .set_channels = lio_ethtool_set_channels, 3122 .set_phys_id = lio_set_phys_id, 3123 .get_eeprom_len = lio_get_eeprom_len, 3124 .get_eeprom = lio_get_eeprom, 3125 .get_strings = lio_get_strings, 3126 .get_ethtool_stats = lio_get_ethtool_stats, 3127 .get_pauseparam = lio_get_pauseparam, 3128 .set_pauseparam = lio_set_pauseparam, 3129 .get_regs_len = lio_get_regs_len, 3130 .get_regs = lio_get_regs, 3131 .get_msglevel = lio_get_msglevel, 3132 .set_msglevel = lio_set_msglevel, 3133 .get_sset_count = lio_get_sset_count, 3134 .get_coalesce = lio_get_intr_coalesce, 3135 .set_coalesce = lio_set_intr_coalesce, 3136 .get_priv_flags = lio_get_priv_flags, 3137 .set_priv_flags = lio_set_priv_flags, 3138 .get_ts_info = lio_get_ts_info, 3139 }; 3140 3141 static const struct ethtool_ops lio_vf_ethtool_ops = { 3142 .supported_coalesce_params = LIO_ETHTOOL_COALESCE, 3143 .get_link_ksettings = lio_get_link_ksettings, 3144 .get_link = ethtool_op_get_link, 3145 .get_drvinfo = lio_get_vf_drvinfo, 3146 .get_ringparam = lio_ethtool_get_ringparam, 3147 .set_ringparam = lio_ethtool_set_ringparam, 3148 .get_channels = lio_ethtool_get_channels, 3149 .set_channels = lio_ethtool_set_channels, 3150 .get_strings = lio_vf_get_strings, 3151 .get_ethtool_stats = lio_vf_get_ethtool_stats, 3152 .get_regs_len = lio_get_regs_len, 3153 .get_regs = lio_get_regs, 3154 .get_msglevel = lio_get_msglevel, 3155 .set_msglevel = lio_vf_set_msglevel, 3156 .get_sset_count = lio_vf_get_sset_count, 3157 .get_coalesce = lio_get_intr_coalesce, 3158 .set_coalesce = lio_set_intr_coalesce, 3159 .get_priv_flags = lio_get_priv_flags, 3160 .set_priv_flags = lio_set_priv_flags, 3161 .get_ts_info = lio_get_ts_info, 3162 }; 3163 3164 void liquidio_set_ethtool_ops(struct net_device *netdev) 3165 { 3166 struct lio *lio = GET_LIO(netdev); 3167 struct octeon_device *oct = lio->oct_dev; 3168 3169 if (OCTEON_CN23XX_VF(oct)) 3170 netdev->ethtool_ops = &lio_vf_ethtool_ops; 3171 else 3172 netdev->ethtool_ops = &lio_ethtool_ops; 3173 } 3174