1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/netdevice.h> 19 #include <linux/net_tstamp.h> 20 #include <linux/pci.h> 21 #include "liquidio_common.h" 22 #include "octeon_droq.h" 23 #include "octeon_iq.h" 24 #include "response_manager.h" 25 #include "octeon_device.h" 26 #include "octeon_nic.h" 27 #include "octeon_main.h" 28 #include "octeon_network.h" 29 #include "cn66xx_regs.h" 30 #include "cn66xx_device.h" 31 #include "cn23xx_pf_device.h" 32 #include "cn23xx_vf_device.h" 33 34 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); 35 36 struct oct_intrmod_context { 37 int octeon_id; 38 wait_queue_head_t wc; 39 int cond; 40 int status; 41 }; 42 43 struct oct_intrmod_resp { 44 u64 rh; 45 struct oct_intrmod_cfg intrmod; 46 u64 status; 47 }; 48 49 struct oct_mdio_cmd_context { 50 int octeon_id; 51 wait_queue_head_t wc; 52 int cond; 53 }; 54 55 struct oct_mdio_cmd_resp { 56 u64 rh; 57 struct oct_mdio_cmd resp; 58 u64 status; 59 }; 60 61 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp)) 62 63 /* Octeon's interface mode of operation */ 64 enum { 65 INTERFACE_MODE_DISABLED, 66 INTERFACE_MODE_RGMII, 67 INTERFACE_MODE_GMII, 68 INTERFACE_MODE_SPI, 69 INTERFACE_MODE_PCIE, 70 INTERFACE_MODE_XAUI, 71 INTERFACE_MODE_SGMII, 72 INTERFACE_MODE_PICMG, 73 INTERFACE_MODE_NPI, 74 INTERFACE_MODE_LOOP, 75 INTERFACE_MODE_SRIO, 76 INTERFACE_MODE_ILK, 77 INTERFACE_MODE_RXAUI, 78 INTERFACE_MODE_QSGMII, 79 INTERFACE_MODE_AGL, 80 INTERFACE_MODE_XLAUI, 81 INTERFACE_MODE_XFI, 82 INTERFACE_MODE_10G_KR, 83 INTERFACE_MODE_40G_KR4, 84 INTERFACE_MODE_MIXED, 85 }; 86 87 #define OCT_ETHTOOL_REGDUMP_LEN 4096 88 #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11) 89 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2) 90 #define OCT_ETHTOOL_REGSVER 1 91 92 /* statistics of PF */ 93 static const char oct_stats_strings[][ETH_GSTRING_LEN] = { 94 "rx_packets", 95 "tx_packets", 96 "rx_bytes", 97 "tx_bytes", 98 "rx_errors", 99 "tx_errors", 100 "rx_dropped", 101 "tx_dropped", 102 103 "tx_total_sent", 104 "tx_total_fwd", 105 "tx_err_pko", 106 "tx_err_pki", 107 "tx_err_link", 108 "tx_err_drop", 109 110 "tx_tso", 111 "tx_tso_packets", 112 "tx_tso_err", 113 "tx_vxlan", 114 115 "tx_mcast", 116 "tx_bcast", 117 118 "mac_tx_total_pkts", 119 "mac_tx_total_bytes", 120 "mac_tx_mcast_pkts", 121 "mac_tx_bcast_pkts", 122 "mac_tx_ctl_packets", 123 "mac_tx_total_collisions", 124 "mac_tx_one_collision", 125 "mac_tx_multi_collision", 126 "mac_tx_max_collision_fail", 127 "mac_tx_max_deferal_fail", 128 "mac_tx_fifo_err", 129 "mac_tx_runts", 130 131 "rx_total_rcvd", 132 "rx_total_fwd", 133 "rx_mcast", 134 "rx_bcast", 135 "rx_jabber_err", 136 "rx_l2_err", 137 "rx_frame_err", 138 "rx_err_pko", 139 "rx_err_link", 140 "rx_err_drop", 141 142 "rx_vxlan", 143 "rx_vxlan_err", 144 145 "rx_lro_pkts", 146 "rx_lro_bytes", 147 "rx_total_lro", 148 149 "rx_lro_aborts", 150 "rx_lro_aborts_port", 151 "rx_lro_aborts_seq", 152 "rx_lro_aborts_tsval", 153 "rx_lro_aborts_timer", 154 "rx_fwd_rate", 155 156 "mac_rx_total_rcvd", 157 "mac_rx_bytes", 158 "mac_rx_total_bcst", 159 "mac_rx_total_mcst", 160 "mac_rx_runts", 161 "mac_rx_ctl_packets", 162 "mac_rx_fifo_err", 163 "mac_rx_dma_drop", 164 "mac_rx_fcs_err", 165 166 "link_state_changes", 167 }; 168 169 /* statistics of VF */ 170 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { 171 "rx_packets", 172 "tx_packets", 173 "rx_bytes", 174 "tx_bytes", 175 "rx_errors", 176 "tx_errors", 177 "rx_dropped", 178 "tx_dropped", 179 "rx_mcast", 180 "tx_mcast", 181 "rx_bcast", 182 "tx_bcast", 183 "link_state_changes", 184 }; 185 186 /* statistics of host tx queue */ 187 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { 188 "packets", 189 "bytes", 190 "dropped", 191 "iq_busy", 192 "sgentry_sent", 193 194 "fw_instr_posted", 195 "fw_instr_processed", 196 "fw_instr_dropped", 197 "fw_bytes_sent", 198 199 "tso", 200 "vxlan", 201 "txq_restart", 202 }; 203 204 /* statistics of host rx queue */ 205 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { 206 "packets", 207 "bytes", 208 "dropped", 209 "dropped_nomem", 210 "dropped_toomany", 211 "fw_dropped", 212 "fw_pkts_received", 213 "fw_bytes_received", 214 "fw_dropped_nodispatch", 215 216 "vxlan", 217 "buffer_alloc_failure", 218 }; 219 220 /* LiquidIO driver private flags */ 221 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = { 222 }; 223 224 #define OCTNIC_NCMD_AUTONEG_ON 0x1 225 #define OCTNIC_NCMD_PHY_ON 0x2 226 227 static int lio_get_link_ksettings(struct net_device *netdev, 228 struct ethtool_link_ksettings *ecmd) 229 { 230 struct lio *lio = GET_LIO(netdev); 231 struct octeon_device *oct = lio->oct_dev; 232 struct oct_link_info *linfo; 233 234 linfo = &lio->linfo; 235 236 ethtool_link_ksettings_zero_link_mode(ecmd, supported); 237 ethtool_link_ksettings_zero_link_mode(ecmd, advertising); 238 239 switch (linfo->link.s.phy_type) { 240 case LIO_PHY_PORT_TP: 241 ecmd->base.port = PORT_TP; 242 ecmd->base.autoneg = AUTONEG_DISABLE; 243 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); 244 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); 245 ethtool_link_ksettings_add_link_mode(ecmd, supported, 246 10000baseT_Full); 247 248 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); 249 ethtool_link_ksettings_add_link_mode(ecmd, advertising, 250 10000baseT_Full); 251 252 break; 253 254 case LIO_PHY_PORT_FIBRE: 255 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || 256 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || 257 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || 258 linfo->link.s.if_mode == INTERFACE_MODE_XFI) { 259 dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); 260 } else { 261 dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", 262 linfo->link.s.if_mode); 263 } 264 265 ecmd->base.port = PORT_FIBRE; 266 ecmd->base.autoneg = AUTONEG_DISABLE; 267 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); 268 269 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); 270 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); 271 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 272 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { 273 if (OCTEON_CN23XX_PF(oct)) { 274 ethtool_link_ksettings_add_link_mode 275 (ecmd, supported, 25000baseSR_Full); 276 ethtool_link_ksettings_add_link_mode 277 (ecmd, supported, 25000baseKR_Full); 278 ethtool_link_ksettings_add_link_mode 279 (ecmd, supported, 25000baseCR_Full); 280 281 if (oct->no_speed_setting == 0) { 282 ethtool_link_ksettings_add_link_mode 283 (ecmd, supported, 284 10000baseSR_Full); 285 ethtool_link_ksettings_add_link_mode 286 (ecmd, supported, 287 10000baseKR_Full); 288 ethtool_link_ksettings_add_link_mode 289 (ecmd, supported, 290 10000baseCR_Full); 291 } 292 293 if (oct->no_speed_setting == 0) 294 liquidio_get_speed(lio); 295 else 296 oct->speed_setting = 25; 297 298 if (oct->speed_setting == 10) { 299 ethtool_link_ksettings_add_link_mode 300 (ecmd, advertising, 301 10000baseSR_Full); 302 ethtool_link_ksettings_add_link_mode 303 (ecmd, advertising, 304 10000baseKR_Full); 305 ethtool_link_ksettings_add_link_mode 306 (ecmd, advertising, 307 10000baseCR_Full); 308 } 309 if (oct->speed_setting == 25) { 310 ethtool_link_ksettings_add_link_mode 311 (ecmd, advertising, 312 25000baseSR_Full); 313 ethtool_link_ksettings_add_link_mode 314 (ecmd, advertising, 315 25000baseKR_Full); 316 ethtool_link_ksettings_add_link_mode 317 (ecmd, advertising, 318 25000baseCR_Full); 319 } 320 } else { /* VF */ 321 if (linfo->link.s.speed == 10000) { 322 ethtool_link_ksettings_add_link_mode 323 (ecmd, supported, 324 10000baseSR_Full); 325 ethtool_link_ksettings_add_link_mode 326 (ecmd, supported, 327 10000baseKR_Full); 328 ethtool_link_ksettings_add_link_mode 329 (ecmd, supported, 330 10000baseCR_Full); 331 332 ethtool_link_ksettings_add_link_mode 333 (ecmd, advertising, 334 10000baseSR_Full); 335 ethtool_link_ksettings_add_link_mode 336 (ecmd, advertising, 337 10000baseKR_Full); 338 ethtool_link_ksettings_add_link_mode 339 (ecmd, advertising, 340 10000baseCR_Full); 341 } 342 343 if (linfo->link.s.speed == 25000) { 344 ethtool_link_ksettings_add_link_mode 345 (ecmd, supported, 346 25000baseSR_Full); 347 ethtool_link_ksettings_add_link_mode 348 (ecmd, supported, 349 25000baseKR_Full); 350 ethtool_link_ksettings_add_link_mode 351 (ecmd, supported, 352 25000baseCR_Full); 353 354 ethtool_link_ksettings_add_link_mode 355 (ecmd, advertising, 356 25000baseSR_Full); 357 ethtool_link_ksettings_add_link_mode 358 (ecmd, advertising, 359 25000baseKR_Full); 360 ethtool_link_ksettings_add_link_mode 361 (ecmd, advertising, 362 25000baseCR_Full); 363 } 364 } 365 } else { 366 ethtool_link_ksettings_add_link_mode(ecmd, supported, 367 10000baseT_Full); 368 ethtool_link_ksettings_add_link_mode(ecmd, advertising, 369 10000baseT_Full); 370 } 371 break; 372 } 373 374 if (linfo->link.s.link_up) { 375 ecmd->base.speed = linfo->link.s.speed; 376 ecmd->base.duplex = linfo->link.s.duplex; 377 } else { 378 ecmd->base.speed = SPEED_UNKNOWN; 379 ecmd->base.duplex = DUPLEX_UNKNOWN; 380 } 381 382 return 0; 383 } 384 385 static int lio_set_link_ksettings(struct net_device *netdev, 386 const struct ethtool_link_ksettings *ecmd) 387 { 388 const int speed = ecmd->base.speed; 389 struct lio *lio = GET_LIO(netdev); 390 struct oct_link_info *linfo; 391 struct octeon_device *oct; 392 393 oct = lio->oct_dev; 394 395 linfo = &lio->linfo; 396 397 if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 398 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID)) 399 return -EOPNOTSUPP; 400 401 if (oct->no_speed_setting) { 402 dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n", 403 __func__); 404 return -EOPNOTSUPP; 405 } 406 407 if ((ecmd->base.duplex != DUPLEX_UNKNOWN && 408 ecmd->base.duplex != linfo->link.s.duplex) || 409 ecmd->base.autoneg != AUTONEG_DISABLE || 410 (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 && 411 ecmd->base.speed != SPEED_UNKNOWN)) 412 return -EOPNOTSUPP; 413 414 if ((oct->speed_boot == speed / 1000) && 415 oct->speed_boot == oct->speed_setting) 416 return 0; 417 418 liquidio_set_speed(lio, speed / 1000); 419 420 dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n", 421 oct->speed_setting); 422 423 return 0; 424 } 425 426 static void 427 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 428 { 429 struct lio *lio; 430 struct octeon_device *oct; 431 432 lio = GET_LIO(netdev); 433 oct = lio->oct_dev; 434 435 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 436 strcpy(drvinfo->driver, "liquidio"); 437 strcpy(drvinfo->version, LIQUIDIO_VERSION); 438 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 439 ETHTOOL_FWVERS_LEN); 440 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 441 } 442 443 static void 444 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 445 { 446 struct octeon_device *oct; 447 struct lio *lio; 448 449 lio = GET_LIO(netdev); 450 oct = lio->oct_dev; 451 452 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 453 strcpy(drvinfo->driver, "liquidio_vf"); 454 strcpy(drvinfo->version, LIQUIDIO_VERSION); 455 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 456 ETHTOOL_FWVERS_LEN); 457 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 458 } 459 460 static int 461 lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues) 462 { 463 struct lio *lio = GET_LIO(netdev); 464 struct octeon_device *oct = lio->oct_dev; 465 struct octnic_ctrl_pkt nctrl; 466 int ret = 0; 467 468 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 469 470 nctrl.ncmd.u64 = 0; 471 nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL; 472 nctrl.ncmd.s.param1 = num_queues; 473 nctrl.ncmd.s.param2 = num_queues; 474 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 475 nctrl.wait_time = 100; 476 nctrl.netpndev = (u64)netdev; 477 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 478 479 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 480 if (ret < 0) { 481 dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n", 482 ret); 483 return -1; 484 } 485 486 return 0; 487 } 488 489 static void 490 lio_ethtool_get_channels(struct net_device *dev, 491 struct ethtool_channels *channel) 492 { 493 struct lio *lio = GET_LIO(dev); 494 struct octeon_device *oct = lio->oct_dev; 495 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; 496 u32 combined_count = 0, max_combined = 0; 497 498 if (OCTEON_CN6XXX(oct)) { 499 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 500 501 max_rx = CFG_GET_OQ_MAX_Q(conf6x); 502 max_tx = CFG_GET_IQ_MAX_Q(conf6x); 503 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); 504 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); 505 } else if (OCTEON_CN23XX_PF(oct)) { 506 if (oct->sriov_info.sriov_enabled) { 507 max_combined = lio->linfo.num_txpciq; 508 } else { 509 struct octeon_config *conf23_pf = 510 CHIP_CONF(oct, cn23xx_pf); 511 512 max_combined = CFG_GET_IQ_MAX_Q(conf23_pf); 513 } 514 combined_count = oct->num_iqs; 515 } else if (OCTEON_CN23XX_VF(oct)) { 516 u64 reg_val = 0ULL; 517 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); 518 519 reg_val = octeon_read_csr64(oct, ctrl); 520 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; 521 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; 522 combined_count = oct->num_iqs; 523 } 524 525 channel->max_rx = max_rx; 526 channel->max_tx = max_tx; 527 channel->max_combined = max_combined; 528 channel->rx_count = rx_count; 529 channel->tx_count = tx_count; 530 channel->combined_count = combined_count; 531 } 532 533 static int 534 lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) 535 { 536 struct msix_entry *msix_entries; 537 int num_msix_irqs = 0; 538 int i; 539 540 if (!oct->msix_on) 541 return 0; 542 543 /* Disable the input and output queues now. No more packets will 544 * arrive from Octeon. 545 */ 546 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 547 548 if (oct->msix_on) { 549 if (OCTEON_CN23XX_PF(oct)) 550 num_msix_irqs = oct->num_msix_irqs - 1; 551 else if (OCTEON_CN23XX_VF(oct)) 552 num_msix_irqs = oct->num_msix_irqs; 553 554 msix_entries = (struct msix_entry *)oct->msix_entries; 555 for (i = 0; i < num_msix_irqs; i++) { 556 if (oct->ioq_vector[i].vector) { 557 /* clear the affinity_cpumask */ 558 irq_set_affinity_hint(msix_entries[i].vector, 559 NULL); 560 free_irq(msix_entries[i].vector, 561 &oct->ioq_vector[i]); 562 oct->ioq_vector[i].vector = 0; 563 } 564 } 565 566 /* non-iov vector's argument is oct struct */ 567 if (OCTEON_CN23XX_PF(oct)) 568 free_irq(msix_entries[i].vector, oct); 569 570 pci_disable_msix(oct->pci_dev); 571 kfree(oct->msix_entries); 572 oct->msix_entries = NULL; 573 } 574 575 kfree(oct->irq_name_storage); 576 oct->irq_name_storage = NULL; 577 578 if (octeon_allocate_ioq_vector(oct, num_ioqs)) { 579 dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 580 return -1; 581 } 582 583 if (octeon_setup_interrupt(oct, num_ioqs)) { 584 dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); 585 return -1; 586 } 587 588 /* Enable Octeon device interrupts */ 589 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 590 591 return 0; 592 } 593 594 static int 595 lio_ethtool_set_channels(struct net_device *dev, 596 struct ethtool_channels *channel) 597 { 598 u32 combined_count, max_combined; 599 struct lio *lio = GET_LIO(dev); 600 struct octeon_device *oct = lio->oct_dev; 601 int stopped = 0; 602 603 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) { 604 dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n"); 605 return -EINVAL; 606 } 607 608 if (!channel->combined_count || channel->other_count || 609 channel->rx_count || channel->tx_count) 610 return -EINVAL; 611 612 combined_count = channel->combined_count; 613 614 if (OCTEON_CN23XX_PF(oct)) { 615 if (oct->sriov_info.sriov_enabled) { 616 max_combined = lio->linfo.num_txpciq; 617 } else { 618 struct octeon_config *conf23_pf = 619 CHIP_CONF(oct, 620 cn23xx_pf); 621 622 max_combined = 623 CFG_GET_IQ_MAX_Q(conf23_pf); 624 } 625 } else if (OCTEON_CN23XX_VF(oct)) { 626 u64 reg_val = 0ULL; 627 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); 628 629 reg_val = octeon_read_csr64(oct, ctrl); 630 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; 631 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; 632 } else { 633 return -EINVAL; 634 } 635 636 if (combined_count > max_combined || combined_count < 1) 637 return -EINVAL; 638 639 if (combined_count == oct->num_iqs) 640 return 0; 641 642 ifstate_set(lio, LIO_IFSTATE_RESETTING); 643 644 if (netif_running(dev)) { 645 dev->netdev_ops->ndo_stop(dev); 646 stopped = 1; 647 } 648 649 if (lio_reset_queues(dev, combined_count)) 650 return -EINVAL; 651 652 if (stopped) 653 dev->netdev_ops->ndo_open(dev); 654 655 ifstate_reset(lio, LIO_IFSTATE_RESETTING); 656 657 return 0; 658 } 659 660 static int lio_get_eeprom_len(struct net_device *netdev) 661 { 662 u8 buf[192]; 663 struct lio *lio = GET_LIO(netdev); 664 struct octeon_device *oct_dev = lio->oct_dev; 665 struct octeon_board_info *board_info; 666 int len; 667 668 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 669 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n", 670 board_info->name, board_info->serial_number, 671 board_info->major, board_info->minor); 672 673 return len; 674 } 675 676 static int 677 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 678 u8 *bytes) 679 { 680 struct lio *lio = GET_LIO(netdev); 681 struct octeon_device *oct_dev = lio->oct_dev; 682 struct octeon_board_info *board_info; 683 684 if (eeprom->offset) 685 return -EINVAL; 686 687 eeprom->magic = oct_dev->pci_dev->vendor; 688 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 689 sprintf((char *)bytes, 690 "boardname:%s serialnum:%s maj:%lld min:%lld\n", 691 board_info->name, board_info->serial_number, 692 board_info->major, board_info->minor); 693 694 return 0; 695 } 696 697 static int octnet_gpio_access(struct net_device *netdev, int addr, int val) 698 { 699 struct lio *lio = GET_LIO(netdev); 700 struct octeon_device *oct = lio->oct_dev; 701 struct octnic_ctrl_pkt nctrl; 702 int ret = 0; 703 704 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 705 706 nctrl.ncmd.u64 = 0; 707 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; 708 nctrl.ncmd.s.param1 = addr; 709 nctrl.ncmd.s.param2 = val; 710 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 711 nctrl.wait_time = 100; 712 nctrl.netpndev = (u64)netdev; 713 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 714 715 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 716 if (ret < 0) { 717 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); 718 return -EINVAL; 719 } 720 721 return 0; 722 } 723 724 static int octnet_id_active(struct net_device *netdev, int val) 725 { 726 struct lio *lio = GET_LIO(netdev); 727 struct octeon_device *oct = lio->oct_dev; 728 struct octnic_ctrl_pkt nctrl; 729 int ret = 0; 730 731 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 732 733 nctrl.ncmd.u64 = 0; 734 nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE; 735 nctrl.ncmd.s.param1 = val; 736 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 737 nctrl.wait_time = 100; 738 nctrl.netpndev = (u64)netdev; 739 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 740 741 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 742 if (ret < 0) { 743 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); 744 return -EINVAL; 745 } 746 747 return 0; 748 } 749 750 /* Callback for when mdio command response arrives 751 */ 752 static void octnet_mdio_resp_callback(struct octeon_device *oct, 753 u32 status, 754 void *buf) 755 { 756 struct oct_mdio_cmd_context *mdio_cmd_ctx; 757 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 758 759 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; 760 761 oct = lio_get_device(mdio_cmd_ctx->octeon_id); 762 if (status) { 763 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", 764 CVM_CAST64(status)); 765 WRITE_ONCE(mdio_cmd_ctx->cond, -1); 766 } else { 767 WRITE_ONCE(mdio_cmd_ctx->cond, 1); 768 } 769 wake_up_interruptible(&mdio_cmd_ctx->wc); 770 } 771 772 /* This routine provides PHY access routines for 773 * mdio clause45 . 774 */ 775 static int 776 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) 777 { 778 struct octeon_device *oct_dev = lio->oct_dev; 779 struct octeon_soft_command *sc; 780 struct oct_mdio_cmd_resp *mdio_cmd_rsp; 781 struct oct_mdio_cmd_context *mdio_cmd_ctx; 782 struct oct_mdio_cmd *mdio_cmd; 783 int retval = 0; 784 785 sc = (struct octeon_soft_command *) 786 octeon_alloc_soft_command(oct_dev, 787 sizeof(struct oct_mdio_cmd), 788 sizeof(struct oct_mdio_cmd_resp), 789 sizeof(struct oct_mdio_cmd_context)); 790 791 if (!sc) 792 return -ENOMEM; 793 794 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; 795 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; 796 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; 797 798 WRITE_ONCE(mdio_cmd_ctx->cond, 0); 799 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); 800 mdio_cmd->op = op; 801 mdio_cmd->mdio_addr = loc; 802 if (op) 803 mdio_cmd->value1 = *value; 804 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); 805 806 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 807 808 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 809 0, 0, 0); 810 811 sc->wait_time = 1000; 812 sc->callback = octnet_mdio_resp_callback; 813 sc->callback_arg = sc; 814 815 init_waitqueue_head(&mdio_cmd_ctx->wc); 816 817 retval = octeon_send_soft_command(oct_dev, sc); 818 819 if (retval == IQ_SEND_FAILED) { 820 dev_err(&oct_dev->pci_dev->dev, 821 "octnet_mdio45_access instruction failed status: %x\n", 822 retval); 823 retval = -EBUSY; 824 } else { 825 /* Sleep on a wait queue till the cond flag indicates that the 826 * response arrived 827 */ 828 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond); 829 retval = mdio_cmd_rsp->status; 830 if (retval) { 831 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n"); 832 retval = -EBUSY; 833 } else { 834 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), 835 sizeof(struct oct_mdio_cmd) / 8); 836 837 if (READ_ONCE(mdio_cmd_ctx->cond) == 1) { 838 if (!op) 839 *value = mdio_cmd_rsp->resp.value1; 840 } else { 841 retval = -EINVAL; 842 } 843 } 844 } 845 846 octeon_free_soft_command(oct_dev, sc); 847 848 return retval; 849 } 850 851 static int lio_set_phys_id(struct net_device *netdev, 852 enum ethtool_phys_id_state state) 853 { 854 struct lio *lio = GET_LIO(netdev); 855 struct octeon_device *oct = lio->oct_dev; 856 struct oct_link_info *linfo; 857 int value, ret; 858 u32 cur_ver; 859 860 linfo = &lio->linfo; 861 cur_ver = OCT_FW_VER(oct->fw_info.ver.maj, 862 oct->fw_info.ver.min, 863 oct->fw_info.ver.rev); 864 865 switch (state) { 866 case ETHTOOL_ID_ACTIVE: 867 if (oct->chip_id == OCTEON_CN66XX) { 868 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 869 VITESSE_PHY_GPIO_DRIVEON); 870 return 2; 871 872 } else if (oct->chip_id == OCTEON_CN68XX) { 873 /* Save the current LED settings */ 874 ret = octnet_mdio45_access(lio, 0, 875 LIO68XX_LED_BEACON_ADDR, 876 &lio->phy_beacon_val); 877 if (ret) 878 return ret; 879 880 ret = octnet_mdio45_access(lio, 0, 881 LIO68XX_LED_CTRL_ADDR, 882 &lio->led_ctrl_val); 883 if (ret) 884 return ret; 885 886 /* Configure Beacon values */ 887 value = LIO68XX_LED_BEACON_CFGON; 888 ret = octnet_mdio45_access(lio, 1, 889 LIO68XX_LED_BEACON_ADDR, 890 &value); 891 if (ret) 892 return ret; 893 894 value = LIO68XX_LED_CTRL_CFGON; 895 ret = octnet_mdio45_access(lio, 1, 896 LIO68XX_LED_CTRL_ADDR, 897 &value); 898 if (ret) 899 return ret; 900 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 901 octnet_id_active(netdev, LED_IDENTIFICATION_ON); 902 if (linfo->link.s.phy_type == LIO_PHY_PORT_TP && 903 cur_ver > OCT_FW_VER(1, 7, 2)) 904 return 2; 905 else 906 return 0; 907 } else { 908 return -EINVAL; 909 } 910 break; 911 912 case ETHTOOL_ID_ON: 913 if (oct->chip_id == OCTEON_CN23XX_PF_VID && 914 linfo->link.s.phy_type == LIO_PHY_PORT_TP && 915 cur_ver > OCT_FW_VER(1, 7, 2)) 916 octnet_id_active(netdev, LED_IDENTIFICATION_ON); 917 else if (oct->chip_id == OCTEON_CN66XX) 918 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 919 VITESSE_PHY_GPIO_HIGH); 920 else 921 return -EINVAL; 922 923 break; 924 925 case ETHTOOL_ID_OFF: 926 if (oct->chip_id == OCTEON_CN23XX_PF_VID && 927 linfo->link.s.phy_type == LIO_PHY_PORT_TP && 928 cur_ver > OCT_FW_VER(1, 7, 2)) 929 octnet_id_active(netdev, LED_IDENTIFICATION_OFF); 930 else if (oct->chip_id == OCTEON_CN66XX) 931 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 932 VITESSE_PHY_GPIO_LOW); 933 else 934 return -EINVAL; 935 936 break; 937 938 case ETHTOOL_ID_INACTIVE: 939 if (oct->chip_id == OCTEON_CN66XX) { 940 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 941 VITESSE_PHY_GPIO_DRIVEOFF); 942 } else if (oct->chip_id == OCTEON_CN68XX) { 943 /* Restore LED settings */ 944 ret = octnet_mdio45_access(lio, 1, 945 LIO68XX_LED_CTRL_ADDR, 946 &lio->led_ctrl_val); 947 if (ret) 948 return ret; 949 950 ret = octnet_mdio45_access(lio, 1, 951 LIO68XX_LED_BEACON_ADDR, 952 &lio->phy_beacon_val); 953 if (ret) 954 return ret; 955 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 956 octnet_id_active(netdev, LED_IDENTIFICATION_OFF); 957 958 return 0; 959 } else { 960 return -EINVAL; 961 } 962 break; 963 964 default: 965 return -EINVAL; 966 } 967 968 return 0; 969 } 970 971 static void 972 lio_ethtool_get_ringparam(struct net_device *netdev, 973 struct ethtool_ringparam *ering) 974 { 975 struct lio *lio = GET_LIO(netdev); 976 struct octeon_device *oct = lio->oct_dev; 977 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, 978 rx_pending = 0; 979 980 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 981 return; 982 983 if (OCTEON_CN6XXX(oct)) { 984 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 985 986 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS; 987 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; 988 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); 989 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); 990 } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { 991 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; 992 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; 993 rx_pending = oct->droq[0]->max_count; 994 tx_pending = oct->instr_queue[0]->max_count; 995 } 996 997 ering->tx_pending = tx_pending; 998 ering->tx_max_pending = tx_max_pending; 999 ering->rx_pending = rx_pending; 1000 ering->rx_max_pending = rx_max_pending; 1001 ering->rx_mini_pending = 0; 1002 ering->rx_jumbo_pending = 0; 1003 ering->rx_mini_max_pending = 0; 1004 ering->rx_jumbo_max_pending = 0; 1005 } 1006 1007 static int lio_23xx_reconfigure_queue_count(struct lio *lio) 1008 { 1009 struct octeon_device *oct = lio->oct_dev; 1010 struct liquidio_if_cfg_context *ctx; 1011 u32 resp_size, ctx_size, data_size; 1012 struct liquidio_if_cfg_resp *resp; 1013 struct octeon_soft_command *sc; 1014 union oct_nic_if_cfg if_cfg; 1015 struct lio_version *vdata; 1016 u32 ifidx_or_pfnum; 1017 int retval; 1018 int j; 1019 1020 resp_size = sizeof(struct liquidio_if_cfg_resp); 1021 ctx_size = sizeof(struct liquidio_if_cfg_context); 1022 data_size = sizeof(struct lio_version); 1023 sc = (struct octeon_soft_command *) 1024 octeon_alloc_soft_command(oct, data_size, 1025 resp_size, ctx_size); 1026 if (!sc) { 1027 dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n", 1028 __func__); 1029 return -1; 1030 } 1031 1032 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1033 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 1034 vdata = (struct lio_version *)sc->virtdptr; 1035 1036 vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 1037 vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 1038 vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 1039 1040 ifidx_or_pfnum = oct->pf_num; 1041 WRITE_ONCE(ctx->cond, 0); 1042 ctx->octeon_id = lio_get_device_id(oct); 1043 init_waitqueue_head(&ctx->wc); 1044 1045 if_cfg.u64 = 0; 1046 if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; 1047 if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings; 1048 if_cfg.s.base_queue = oct->sriov_info.pf_srn; 1049 if_cfg.s.gmx_port_id = oct->pf_num; 1050 1051 sc->iq_no = 0; 1052 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1053 OPCODE_NIC_QCOUNT_UPDATE, 0, 1054 if_cfg.u64, 0); 1055 sc->callback = lio_if_cfg_callback; 1056 sc->callback_arg = sc; 1057 sc->wait_time = LIO_IFCFG_WAIT_TIME; 1058 1059 retval = octeon_send_soft_command(oct, sc); 1060 if (retval == IQ_SEND_FAILED) { 1061 dev_err(&oct->pci_dev->dev, 1062 "iq/oq config failed status: %x\n", 1063 retval); 1064 goto qcount_update_fail; 1065 } 1066 1067 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 1068 dev_err(&oct->pci_dev->dev, "Wait interrupted\n"); 1069 return -1; 1070 } 1071 1072 retval = resp->status; 1073 if (retval) { 1074 dev_err(&oct->pci_dev->dev, "iq/oq config failed\n"); 1075 goto qcount_update_fail; 1076 } 1077 1078 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 1079 (sizeof(struct liquidio_if_cfg_info)) >> 3); 1080 1081 lio->ifidx = ifidx_or_pfnum; 1082 lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask); 1083 lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask); 1084 for (j = 0; j < lio->linfo.num_rxpciq; j++) { 1085 lio->linfo.rxpciq[j].u64 = 1086 resp->cfg_info.linfo.rxpciq[j].u64; 1087 } 1088 1089 for (j = 0; j < lio->linfo.num_txpciq; j++) { 1090 lio->linfo.txpciq[j].u64 = 1091 resp->cfg_info.linfo.txpciq[j].u64; 1092 } 1093 1094 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 1095 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 1096 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 1097 lio->txq = lio->linfo.txpciq[0].s.q_no; 1098 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 1099 1100 octeon_free_soft_command(oct, sc); 1101 dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n", 1102 lio->linfo.num_rxpciq); 1103 1104 return 0; 1105 1106 qcount_update_fail: 1107 octeon_free_soft_command(oct, sc); 1108 1109 return -1; 1110 } 1111 1112 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) 1113 { 1114 struct lio *lio = GET_LIO(netdev); 1115 struct octeon_device *oct = lio->oct_dev; 1116 int i, queue_count_update = 0; 1117 struct napi_struct *napi, *n; 1118 int ret; 1119 1120 schedule_timeout_uninterruptible(msecs_to_jiffies(100)); 1121 1122 if (wait_for_pending_requests(oct)) 1123 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 1124 1125 if (lio_wait_for_instr_fetch(oct)) 1126 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 1127 1128 if (octeon_set_io_queues_off(oct)) { 1129 dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n"); 1130 return -1; 1131 } 1132 1133 /* Disable the input and output queues now. No more packets will 1134 * arrive from Octeon. 1135 */ 1136 oct->fn_list.disable_io_queues(oct); 1137 /* Delete NAPI */ 1138 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1139 netif_napi_del(napi); 1140 1141 if (num_qs != oct->num_iqs) { 1142 ret = netif_set_real_num_rx_queues(netdev, num_qs); 1143 if (ret) { 1144 dev_err(&oct->pci_dev->dev, 1145 "Setting real number rx failed\n"); 1146 return ret; 1147 } 1148 1149 ret = netif_set_real_num_tx_queues(netdev, num_qs); 1150 if (ret) { 1151 dev_err(&oct->pci_dev->dev, 1152 "Setting real number tx failed\n"); 1153 return ret; 1154 } 1155 1156 /* The value of queue_count_update decides whether it is the 1157 * queue count or the descriptor count that is being 1158 * re-configured. 1159 */ 1160 queue_count_update = 1; 1161 } 1162 1163 /* Re-configuration of queues can happen in two scenarios, SRIOV enabled 1164 * and SRIOV disabled. Few things like recreating queue zero, resetting 1165 * glists and IRQs are required for both. For the latter, some more 1166 * steps like updating sriov_info for the octeon device need to be done. 1167 */ 1168 if (queue_count_update) { 1169 lio_delete_glists(lio); 1170 1171 /* Delete mbox for PF which is SRIOV disabled because sriov_info 1172 * will be now changed. 1173 */ 1174 if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled) 1175 oct->fn_list.free_mbox(oct); 1176 } 1177 1178 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1179 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1180 continue; 1181 octeon_delete_droq(oct, i); 1182 } 1183 1184 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1185 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1186 continue; 1187 octeon_delete_instr_queue(oct, i); 1188 } 1189 1190 if (queue_count_update) { 1191 /* For PF re-configure sriov related information */ 1192 if ((OCTEON_CN23XX_PF(oct)) && 1193 !oct->sriov_info.sriov_enabled) { 1194 oct->sriov_info.num_pf_rings = num_qs; 1195 if (cn23xx_sriov_config(oct)) { 1196 dev_err(&oct->pci_dev->dev, 1197 "Queue reset aborted: SRIOV config failed\n"); 1198 return -1; 1199 } 1200 1201 num_qs = oct->sriov_info.num_pf_rings; 1202 } 1203 } 1204 1205 if (oct->fn_list.setup_device_regs(oct)) { 1206 dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); 1207 return -1; 1208 } 1209 1210 /* The following are needed in case of queue count re-configuration and 1211 * not for descriptor count re-configuration. 1212 */ 1213 if (queue_count_update) { 1214 if (octeon_setup_instr_queues(oct)) 1215 return -1; 1216 1217 if (octeon_setup_output_queues(oct)) 1218 return -1; 1219 1220 /* Recreating mbox for PF that is SRIOV disabled */ 1221 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { 1222 if (oct->fn_list.setup_mbox(oct)) { 1223 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 1224 return -1; 1225 } 1226 } 1227 1228 /* Deleting and recreating IRQs whether the interface is SRIOV 1229 * enabled or disabled. 1230 */ 1231 if (lio_irq_reallocate_irqs(oct, num_qs)) { 1232 dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n"); 1233 return -1; 1234 } 1235 1236 /* Enable the input and output queues for this Octeon device */ 1237 if (oct->fn_list.enable_io_queues(oct)) { 1238 dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n"); 1239 return -1; 1240 } 1241 1242 for (i = 0; i < oct->num_oqs; i++) 1243 writel(oct->droq[i]->max_count, 1244 oct->droq[i]->pkts_credit_reg); 1245 1246 /* Informing firmware about the new queue count. It is required 1247 * for firmware to allocate more number of queues than those at 1248 * load time. 1249 */ 1250 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { 1251 if (lio_23xx_reconfigure_queue_count(lio)) 1252 return -1; 1253 } 1254 } 1255 1256 /* Once firmware is aware of the new value, queues can be recreated */ 1257 if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { 1258 dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n"); 1259 return -1; 1260 } 1261 1262 if (queue_count_update) { 1263 if (lio_setup_glists(oct, lio, num_qs)) { 1264 dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n"); 1265 return -1; 1266 } 1267 1268 /* Send firmware the information about new number of queues 1269 * if the interface is a VF or a PF that is SRIOV enabled. 1270 */ 1271 if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct)) 1272 if (lio_send_queue_count_update(netdev, num_qs)) 1273 return -1; 1274 } 1275 1276 return 0; 1277 } 1278 1279 static int lio_ethtool_set_ringparam(struct net_device *netdev, 1280 struct ethtool_ringparam *ering) 1281 { 1282 u32 rx_count, tx_count, rx_count_old, tx_count_old; 1283 struct lio *lio = GET_LIO(netdev); 1284 struct octeon_device *oct = lio->oct_dev; 1285 int stopped = 0; 1286 1287 if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct)) 1288 return -EINVAL; 1289 1290 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 1291 return -EINVAL; 1292 1293 rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS, 1294 CN23XX_MAX_OQ_DESCRIPTORS); 1295 tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS, 1296 CN23XX_MAX_IQ_DESCRIPTORS); 1297 1298 rx_count_old = oct->droq[0]->max_count; 1299 tx_count_old = oct->instr_queue[0]->max_count; 1300 1301 if (rx_count == rx_count_old && tx_count == tx_count_old) 1302 return 0; 1303 1304 ifstate_set(lio, LIO_IFSTATE_RESETTING); 1305 1306 if (netif_running(netdev)) { 1307 netdev->netdev_ops->ndo_stop(netdev); 1308 stopped = 1; 1309 } 1310 1311 /* Change RX/TX DESCS count */ 1312 if (tx_count != tx_count_old) 1313 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1314 tx_count); 1315 if (rx_count != rx_count_old) 1316 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1317 rx_count); 1318 1319 if (lio_reset_queues(netdev, oct->num_iqs)) 1320 goto err_lio_reset_queues; 1321 1322 if (stopped) 1323 netdev->netdev_ops->ndo_open(netdev); 1324 1325 ifstate_reset(lio, LIO_IFSTATE_RESETTING); 1326 1327 return 0; 1328 1329 err_lio_reset_queues: 1330 if (tx_count != tx_count_old) 1331 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1332 tx_count_old); 1333 if (rx_count != rx_count_old) 1334 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1335 rx_count_old); 1336 return -EINVAL; 1337 } 1338 1339 static u32 lio_get_msglevel(struct net_device *netdev) 1340 { 1341 struct lio *lio = GET_LIO(netdev); 1342 1343 return lio->msg_enable; 1344 } 1345 1346 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) 1347 { 1348 struct lio *lio = GET_LIO(netdev); 1349 1350 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { 1351 if (msglvl & NETIF_MSG_HW) 1352 liquidio_set_feature(netdev, 1353 OCTNET_CMD_VERBOSE_ENABLE, 0); 1354 else 1355 liquidio_set_feature(netdev, 1356 OCTNET_CMD_VERBOSE_DISABLE, 0); 1357 } 1358 1359 lio->msg_enable = msglvl; 1360 } 1361 1362 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl) 1363 { 1364 struct lio *lio = GET_LIO(netdev); 1365 1366 lio->msg_enable = msglvl; 1367 } 1368 1369 static void 1370 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 1371 { 1372 /* Notes: Not supporting any auto negotiation in these 1373 * drivers. Just report pause frame support. 1374 */ 1375 struct lio *lio = GET_LIO(netdev); 1376 struct octeon_device *oct = lio->oct_dev; 1377 1378 pause->autoneg = 0; 1379 1380 pause->tx_pause = oct->tx_pause; 1381 pause->rx_pause = oct->rx_pause; 1382 } 1383 1384 static int 1385 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 1386 { 1387 /* Notes: Not supporting any auto negotiation in these 1388 * drivers. 1389 */ 1390 struct lio *lio = GET_LIO(netdev); 1391 struct octeon_device *oct = lio->oct_dev; 1392 struct octnic_ctrl_pkt nctrl; 1393 struct oct_link_info *linfo = &lio->linfo; 1394 1395 int ret = 0; 1396 1397 if (oct->chip_id != OCTEON_CN23XX_PF_VID) 1398 return -EINVAL; 1399 1400 if (linfo->link.s.duplex == 0) { 1401 /*no flow control for half duplex*/ 1402 if (pause->rx_pause || pause->tx_pause) 1403 return -EINVAL; 1404 } 1405 1406 /*do not support autoneg of link flow control*/ 1407 if (pause->autoneg == AUTONEG_ENABLE) 1408 return -EINVAL; 1409 1410 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1411 1412 nctrl.ncmd.u64 = 0; 1413 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL; 1414 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1415 nctrl.wait_time = 100; 1416 nctrl.netpndev = (u64)netdev; 1417 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1418 1419 if (pause->rx_pause) { 1420 /*enable rx pause*/ 1421 nctrl.ncmd.s.param1 = 1; 1422 } else { 1423 /*disable rx pause*/ 1424 nctrl.ncmd.s.param1 = 0; 1425 } 1426 1427 if (pause->tx_pause) { 1428 /*enable tx pause*/ 1429 nctrl.ncmd.s.param2 = 1; 1430 } else { 1431 /*disable tx pause*/ 1432 nctrl.ncmd.s.param2 = 0; 1433 } 1434 1435 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1436 if (ret < 0) { 1437 dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n"); 1438 return -EINVAL; 1439 } 1440 1441 oct->rx_pause = pause->rx_pause; 1442 oct->tx_pause = pause->tx_pause; 1443 1444 return 0; 1445 } 1446 1447 static void 1448 lio_get_ethtool_stats(struct net_device *netdev, 1449 struct ethtool_stats *stats __attribute__((unused)), 1450 u64 *data) 1451 { 1452 struct lio *lio = GET_LIO(netdev); 1453 struct octeon_device *oct_dev = lio->oct_dev; 1454 struct rtnl_link_stats64 lstats; 1455 int i = 0, j; 1456 1457 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1458 return; 1459 1460 netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); 1461 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1462 data[i++] = lstats.rx_packets; 1463 /*sum of oct->instr_queue[iq_no]->stats.tx_done */ 1464 data[i++] = lstats.tx_packets; 1465 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1466 data[i++] = lstats.rx_bytes; 1467 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1468 data[i++] = lstats.tx_bytes; 1469 data[i++] = lstats.rx_errors + 1470 oct_dev->link_stats.fromwire.fcs_err + 1471 oct_dev->link_stats.fromwire.jabber_err + 1472 oct_dev->link_stats.fromwire.l2_err + 1473 oct_dev->link_stats.fromwire.frame_err; 1474 data[i++] = lstats.tx_errors; 1475 /*sum of oct->droq[oq_no]->stats->rx_dropped + 1476 *oct->droq[oq_no]->stats->dropped_nodispatch + 1477 *oct->droq[oq_no]->stats->dropped_toomany + 1478 *oct->droq[oq_no]->stats->dropped_nomem 1479 */ 1480 data[i++] = lstats.rx_dropped + 1481 oct_dev->link_stats.fromwire.fifo_err + 1482 oct_dev->link_stats.fromwire.dmac_drop + 1483 oct_dev->link_stats.fromwire.red_drops + 1484 oct_dev->link_stats.fromwire.fw_err_pko + 1485 oct_dev->link_stats.fromwire.fw_err_link + 1486 oct_dev->link_stats.fromwire.fw_err_drop; 1487 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1488 data[i++] = lstats.tx_dropped + 1489 oct_dev->link_stats.fromhost.max_collision_fail + 1490 oct_dev->link_stats.fromhost.max_deferral_fail + 1491 oct_dev->link_stats.fromhost.total_collisions + 1492 oct_dev->link_stats.fromhost.fw_err_pko + 1493 oct_dev->link_stats.fromhost.fw_err_link + 1494 oct_dev->link_stats.fromhost.fw_err_drop + 1495 oct_dev->link_stats.fromhost.fw_err_pki; 1496 1497 /* firmware tx stats */ 1498 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. 1499 *fromhost.fw_total_sent 1500 */ 1501 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent); 1502 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */ 1503 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); 1504 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ 1505 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); 1506 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */ 1507 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki); 1508 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ 1509 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); 1510 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1511 *fw_err_drop 1512 */ 1513 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop); 1514 1515 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */ 1516 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso); 1517 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1518 *fw_tso_fwd 1519 */ 1520 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd); 1521 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1522 *fw_err_tso 1523 */ 1524 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); 1525 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1526 *fw_tx_vxlan 1527 */ 1528 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); 1529 1530 /* Multicast packets sent by this port */ 1531 data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; 1532 data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; 1533 1534 /* mac tx statistics */ 1535 /*CVMX_BGXX_CMRX_TX_STAT5 */ 1536 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); 1537 /*CVMX_BGXX_CMRX_TX_STAT4 */ 1538 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent); 1539 /*CVMX_BGXX_CMRX_TX_STAT15 */ 1540 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent); 1541 /*CVMX_BGXX_CMRX_TX_STAT14 */ 1542 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent); 1543 /*CVMX_BGXX_CMRX_TX_STAT17 */ 1544 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent); 1545 /*CVMX_BGXX_CMRX_TX_STAT0 */ 1546 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions); 1547 /*CVMX_BGXX_CMRX_TX_STAT3 */ 1548 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent); 1549 /*CVMX_BGXX_CMRX_TX_STAT2 */ 1550 data[i++] = 1551 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent); 1552 /*CVMX_BGXX_CMRX_TX_STAT0 */ 1553 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail); 1554 /*CVMX_BGXX_CMRX_TX_STAT1 */ 1555 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail); 1556 /*CVMX_BGXX_CMRX_TX_STAT16 */ 1557 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err); 1558 /*CVMX_BGXX_CMRX_TX_STAT6 */ 1559 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts); 1560 1561 /* RX firmware stats */ 1562 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1563 *fw_total_rcvd 1564 */ 1565 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd); 1566 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1567 *fw_total_fwd 1568 */ 1569 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); 1570 /* Multicast packets received on this port */ 1571 data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; 1572 data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; 1573 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ 1574 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); 1575 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ 1576 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err); 1577 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */ 1578 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err); 1579 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1580 *fw_err_pko 1581 */ 1582 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko); 1583 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */ 1584 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link); 1585 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1586 *fromwire.fw_err_drop 1587 */ 1588 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); 1589 1590 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1591 *fromwire.fw_rx_vxlan 1592 */ 1593 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan); 1594 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1595 *fromwire.fw_rx_vxlan_err 1596 */ 1597 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err); 1598 1599 /* LRO */ 1600 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1601 *fw_lro_pkts 1602 */ 1603 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts); 1604 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1605 *fw_lro_octs 1606 */ 1607 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs); 1608 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */ 1609 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro); 1610 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 1611 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts); 1612 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1613 *fw_lro_aborts_port 1614 */ 1615 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port); 1616 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1617 *fw_lro_aborts_seq 1618 */ 1619 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq); 1620 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1621 *fw_lro_aborts_tsval 1622 */ 1623 data[i++] = 1624 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval); 1625 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1626 *fw_lro_aborts_timer 1627 */ 1628 /* intrmod: packet forward rate */ 1629 data[i++] = 1630 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer); 1631 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 1632 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate); 1633 1634 /* mac: link-level stats */ 1635 /*CVMX_BGXX_CMRX_RX_STAT0 */ 1636 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd); 1637 /*CVMX_BGXX_CMRX_RX_STAT1 */ 1638 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd); 1639 /*CVMX_PKI_STATX_STAT5 */ 1640 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst); 1641 /*CVMX_PKI_STATX_STAT5 */ 1642 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst); 1643 /*wqe->word2.err_code or wqe->word2.err_level */ 1644 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts); 1645 /*CVMX_BGXX_CMRX_RX_STAT2 */ 1646 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd); 1647 /*CVMX_BGXX_CMRX_RX_STAT6 */ 1648 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err); 1649 /*CVMX_BGXX_CMRX_RX_STAT4 */ 1650 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop); 1651 /*wqe->word2.err_code or wqe->word2.err_level */ 1652 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err); 1653 /*lio->link_changes*/ 1654 data[i++] = CVM_CAST64(lio->link_changes); 1655 1656 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { 1657 if (!(oct_dev->io_qmask.iq & BIT_ULL(j))) 1658 continue; 1659 /*packets to network port*/ 1660 /*# of packets tx to network */ 1661 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1662 /*# of bytes tx to network */ 1663 data[i++] = 1664 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1665 /*# of packets dropped */ 1666 data[i++] = 1667 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); 1668 /*# of tx fails due to queue full */ 1669 data[i++] = 1670 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); 1671 /*XXX gather entries sent */ 1672 data[i++] = 1673 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); 1674 1675 /*instruction to firmware: data and control */ 1676 /*# of instructions to the queue */ 1677 data[i++] = 1678 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); 1679 /*# of instructions processed */ 1680 data[i++] = CVM_CAST64( 1681 oct_dev->instr_queue[j]->stats.instr_processed); 1682 /*# of instructions could not be processed */ 1683 data[i++] = CVM_CAST64( 1684 oct_dev->instr_queue[j]->stats.instr_dropped); 1685 /*bytes sent through the queue */ 1686 data[i++] = 1687 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); 1688 1689 /*tso request*/ 1690 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1691 /*vxlan request*/ 1692 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1693 /*txq restart*/ 1694 data[i++] = 1695 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); 1696 } 1697 1698 /* RX */ 1699 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { 1700 if (!(oct_dev->io_qmask.oq & BIT_ULL(j))) 1701 continue; 1702 1703 /*packets send to TCP/IP network stack */ 1704 /*# of packets to network stack */ 1705 data[i++] = 1706 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); 1707 /*# of bytes to network stack */ 1708 data[i++] = 1709 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); 1710 /*# of packets dropped */ 1711 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1712 oct_dev->droq[j]->stats.dropped_toomany + 1713 oct_dev->droq[j]->stats.rx_dropped); 1714 data[i++] = 1715 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1716 data[i++] = 1717 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1718 data[i++] = 1719 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1720 1721 /*control and data path*/ 1722 data[i++] = 1723 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1724 data[i++] = 1725 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1726 data[i++] = 1727 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1728 1729 data[i++] = 1730 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1731 data[i++] = 1732 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1733 } 1734 } 1735 1736 static void lio_vf_get_ethtool_stats(struct net_device *netdev, 1737 struct ethtool_stats *stats 1738 __attribute__((unused)), 1739 u64 *data) 1740 { 1741 struct rtnl_link_stats64 lstats; 1742 struct lio *lio = GET_LIO(netdev); 1743 struct octeon_device *oct_dev = lio->oct_dev; 1744 int i = 0, j, vj; 1745 1746 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1747 return; 1748 1749 netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); 1750 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1751 data[i++] = lstats.rx_packets; 1752 /* sum of oct->instr_queue[iq_no]->stats.tx_done */ 1753 data[i++] = lstats.tx_packets; 1754 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1755 data[i++] = lstats.rx_bytes; 1756 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1757 data[i++] = lstats.tx_bytes; 1758 data[i++] = lstats.rx_errors; 1759 data[i++] = lstats.tx_errors; 1760 /* sum of oct->droq[oq_no]->stats->rx_dropped + 1761 * oct->droq[oq_no]->stats->dropped_nodispatch + 1762 * oct->droq[oq_no]->stats->dropped_toomany + 1763 * oct->droq[oq_no]->stats->dropped_nomem 1764 */ 1765 data[i++] = lstats.rx_dropped; 1766 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1767 data[i++] = lstats.tx_dropped; 1768 1769 data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; 1770 data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; 1771 data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; 1772 data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; 1773 1774 /* lio->link_changes */ 1775 data[i++] = CVM_CAST64(lio->link_changes); 1776 1777 for (vj = 0; vj < oct_dev->num_iqs; vj++) { 1778 j = lio->linfo.txpciq[vj].s.q_no; 1779 1780 /* packets to network port */ 1781 /* # of packets tx to network */ 1782 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1783 /* # of bytes tx to network */ 1784 data[i++] = CVM_CAST64( 1785 oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1786 /* # of packets dropped */ 1787 data[i++] = CVM_CAST64( 1788 oct_dev->instr_queue[j]->stats.tx_dropped); 1789 /* # of tx fails due to queue full */ 1790 data[i++] = CVM_CAST64( 1791 oct_dev->instr_queue[j]->stats.tx_iq_busy); 1792 /* XXX gather entries sent */ 1793 data[i++] = CVM_CAST64( 1794 oct_dev->instr_queue[j]->stats.sgentry_sent); 1795 1796 /* instruction to firmware: data and control */ 1797 /* # of instructions to the queue */ 1798 data[i++] = CVM_CAST64( 1799 oct_dev->instr_queue[j]->stats.instr_posted); 1800 /* # of instructions processed */ 1801 data[i++] = 1802 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed); 1803 /* # of instructions could not be processed */ 1804 data[i++] = 1805 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped); 1806 /* bytes sent through the queue */ 1807 data[i++] = CVM_CAST64( 1808 oct_dev->instr_queue[j]->stats.bytes_sent); 1809 /* tso request */ 1810 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1811 /* vxlan request */ 1812 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1813 /* txq restart */ 1814 data[i++] = CVM_CAST64( 1815 oct_dev->instr_queue[j]->stats.tx_restart); 1816 } 1817 1818 /* RX */ 1819 for (vj = 0; vj < oct_dev->num_oqs; vj++) { 1820 j = lio->linfo.rxpciq[vj].s.q_no; 1821 1822 /* packets send to TCP/IP network stack */ 1823 /* # of packets to network stack */ 1824 data[i++] = CVM_CAST64( 1825 oct_dev->droq[j]->stats.rx_pkts_received); 1826 /* # of bytes to network stack */ 1827 data[i++] = CVM_CAST64( 1828 oct_dev->droq[j]->stats.rx_bytes_received); 1829 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1830 oct_dev->droq[j]->stats.dropped_toomany + 1831 oct_dev->droq[j]->stats.rx_dropped); 1832 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1833 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1834 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1835 1836 /* control and data path */ 1837 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1838 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1839 data[i++] = 1840 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1841 1842 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1843 data[i++] = 1844 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1845 } 1846 } 1847 1848 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) 1849 { 1850 struct octeon_device *oct_dev = lio->oct_dev; 1851 int i; 1852 1853 switch (oct_dev->chip_id) { 1854 case OCTEON_CN23XX_PF_VID: 1855 case OCTEON_CN23XX_VF_VID: 1856 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) { 1857 sprintf(data, "%s", oct_priv_flags_strings[i]); 1858 data += ETH_GSTRING_LEN; 1859 } 1860 break; 1861 case OCTEON_CN68XX: 1862 case OCTEON_CN66XX: 1863 break; 1864 default: 1865 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1866 break; 1867 } 1868 } 1869 1870 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 1871 { 1872 struct lio *lio = GET_LIO(netdev); 1873 struct octeon_device *oct_dev = lio->oct_dev; 1874 int num_iq_stats, num_oq_stats, i, j; 1875 int num_stats; 1876 1877 switch (stringset) { 1878 case ETH_SS_STATS: 1879 num_stats = ARRAY_SIZE(oct_stats_strings); 1880 for (j = 0; j < num_stats; j++) { 1881 sprintf(data, "%s", oct_stats_strings[j]); 1882 data += ETH_GSTRING_LEN; 1883 } 1884 1885 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1886 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1887 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1888 continue; 1889 for (j = 0; j < num_iq_stats; j++) { 1890 sprintf(data, "tx-%d-%s", i, 1891 oct_iq_stats_strings[j]); 1892 data += ETH_GSTRING_LEN; 1893 } 1894 } 1895 1896 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1897 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1898 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1899 continue; 1900 for (j = 0; j < num_oq_stats; j++) { 1901 sprintf(data, "rx-%d-%s", i, 1902 oct_droq_stats_strings[j]); 1903 data += ETH_GSTRING_LEN; 1904 } 1905 } 1906 break; 1907 1908 case ETH_SS_PRIV_FLAGS: 1909 lio_get_priv_flags_strings(lio, data); 1910 break; 1911 default: 1912 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1913 break; 1914 } 1915 } 1916 1917 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset, 1918 u8 *data) 1919 { 1920 int num_iq_stats, num_oq_stats, i, j; 1921 struct lio *lio = GET_LIO(netdev); 1922 struct octeon_device *oct_dev = lio->oct_dev; 1923 int num_stats; 1924 1925 switch (stringset) { 1926 case ETH_SS_STATS: 1927 num_stats = ARRAY_SIZE(oct_vf_stats_strings); 1928 for (j = 0; j < num_stats; j++) { 1929 sprintf(data, "%s", oct_vf_stats_strings[j]); 1930 data += ETH_GSTRING_LEN; 1931 } 1932 1933 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1934 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1935 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1936 continue; 1937 for (j = 0; j < num_iq_stats; j++) { 1938 sprintf(data, "tx-%d-%s", i, 1939 oct_iq_stats_strings[j]); 1940 data += ETH_GSTRING_LEN; 1941 } 1942 } 1943 1944 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1945 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1946 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1947 continue; 1948 for (j = 0; j < num_oq_stats; j++) { 1949 sprintf(data, "rx-%d-%s", i, 1950 oct_droq_stats_strings[j]); 1951 data += ETH_GSTRING_LEN; 1952 } 1953 } 1954 break; 1955 1956 case ETH_SS_PRIV_FLAGS: 1957 lio_get_priv_flags_strings(lio, data); 1958 break; 1959 default: 1960 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1961 break; 1962 } 1963 } 1964 1965 static int lio_get_priv_flags_ss_count(struct lio *lio) 1966 { 1967 struct octeon_device *oct_dev = lio->oct_dev; 1968 1969 switch (oct_dev->chip_id) { 1970 case OCTEON_CN23XX_PF_VID: 1971 case OCTEON_CN23XX_VF_VID: 1972 return ARRAY_SIZE(oct_priv_flags_strings); 1973 case OCTEON_CN68XX: 1974 case OCTEON_CN66XX: 1975 return -EOPNOTSUPP; 1976 default: 1977 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1978 return -EOPNOTSUPP; 1979 } 1980 } 1981 1982 static int lio_get_sset_count(struct net_device *netdev, int sset) 1983 { 1984 struct lio *lio = GET_LIO(netdev); 1985 struct octeon_device *oct_dev = lio->oct_dev; 1986 1987 switch (sset) { 1988 case ETH_SS_STATS: 1989 return (ARRAY_SIZE(oct_stats_strings) + 1990 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1991 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1992 case ETH_SS_PRIV_FLAGS: 1993 return lio_get_priv_flags_ss_count(lio); 1994 default: 1995 return -EOPNOTSUPP; 1996 } 1997 } 1998 1999 static int lio_vf_get_sset_count(struct net_device *netdev, int sset) 2000 { 2001 struct lio *lio = GET_LIO(netdev); 2002 struct octeon_device *oct_dev = lio->oct_dev; 2003 2004 switch (sset) { 2005 case ETH_SS_STATS: 2006 return (ARRAY_SIZE(oct_vf_stats_strings) + 2007 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 2008 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 2009 case ETH_SS_PRIV_FLAGS: 2010 return lio_get_priv_flags_ss_count(lio); 2011 default: 2012 return -EOPNOTSUPP; 2013 } 2014 } 2015 2016 /* Callback function for intrmod */ 2017 static void octnet_intrmod_callback(struct octeon_device *oct_dev, 2018 u32 status, 2019 void *ptr) 2020 { 2021 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; 2022 struct oct_intrmod_context *ctx; 2023 2024 ctx = (struct oct_intrmod_context *)sc->ctxptr; 2025 2026 ctx->status = status; 2027 2028 WRITE_ONCE(ctx->cond, 1); 2029 2030 /* This barrier is required to be sure that the response has been 2031 * written fully before waking up the handler 2032 */ 2033 wmb(); 2034 2035 wake_up_interruptible(&ctx->wc); 2036 } 2037 2038 /* get interrupt moderation parameters */ 2039 static int octnet_get_intrmod_cfg(struct lio *lio, 2040 struct oct_intrmod_cfg *intr_cfg) 2041 { 2042 struct octeon_soft_command *sc; 2043 struct oct_intrmod_context *ctx; 2044 struct oct_intrmod_resp *resp; 2045 int retval; 2046 struct octeon_device *oct_dev = lio->oct_dev; 2047 2048 /* Alloc soft command */ 2049 sc = (struct octeon_soft_command *) 2050 octeon_alloc_soft_command(oct_dev, 2051 0, 2052 sizeof(struct oct_intrmod_resp), 2053 sizeof(struct oct_intrmod_context)); 2054 2055 if (!sc) 2056 return -ENOMEM; 2057 2058 resp = (struct oct_intrmod_resp *)sc->virtrptr; 2059 memset(resp, 0, sizeof(struct oct_intrmod_resp)); 2060 2061 ctx = (struct oct_intrmod_context *)sc->ctxptr; 2062 memset(ctx, 0, sizeof(struct oct_intrmod_context)); 2063 WRITE_ONCE(ctx->cond, 0); 2064 ctx->octeon_id = lio_get_device_id(oct_dev); 2065 init_waitqueue_head(&ctx->wc); 2066 2067 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 2068 2069 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 2070 OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); 2071 2072 sc->callback = octnet_intrmod_callback; 2073 sc->callback_arg = sc; 2074 sc->wait_time = 1000; 2075 2076 retval = octeon_send_soft_command(oct_dev, sc); 2077 if (retval == IQ_SEND_FAILED) { 2078 octeon_free_soft_command(oct_dev, sc); 2079 return -EINVAL; 2080 } 2081 2082 /* Sleep on a wait queue till the cond flag indicates that the 2083 * response arrived or timed-out. 2084 */ 2085 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 2086 dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n"); 2087 goto intrmod_info_wait_intr; 2088 } 2089 2090 retval = ctx->status || resp->status; 2091 if (retval) { 2092 dev_err(&oct_dev->pci_dev->dev, 2093 "Get interrupt moderation parameters failed\n"); 2094 goto intrmod_info_wait_fail; 2095 } 2096 2097 octeon_swap_8B_data((u64 *)&resp->intrmod, 2098 (sizeof(struct oct_intrmod_cfg)) / 8); 2099 memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg)); 2100 octeon_free_soft_command(oct_dev, sc); 2101 2102 return 0; 2103 2104 intrmod_info_wait_fail: 2105 2106 octeon_free_soft_command(oct_dev, sc); 2107 2108 intrmod_info_wait_intr: 2109 2110 return -ENODEV; 2111 } 2112 2113 /* Configure interrupt moderation parameters */ 2114 static int octnet_set_intrmod_cfg(struct lio *lio, 2115 struct oct_intrmod_cfg *intr_cfg) 2116 { 2117 struct octeon_soft_command *sc; 2118 struct oct_intrmod_context *ctx; 2119 struct oct_intrmod_cfg *cfg; 2120 int retval; 2121 struct octeon_device *oct_dev = lio->oct_dev; 2122 2123 /* Alloc soft command */ 2124 sc = (struct octeon_soft_command *) 2125 octeon_alloc_soft_command(oct_dev, 2126 sizeof(struct oct_intrmod_cfg), 2127 0, 2128 sizeof(struct oct_intrmod_context)); 2129 2130 if (!sc) 2131 return -ENOMEM; 2132 2133 ctx = (struct oct_intrmod_context *)sc->ctxptr; 2134 2135 WRITE_ONCE(ctx->cond, 0); 2136 ctx->octeon_id = lio_get_device_id(oct_dev); 2137 init_waitqueue_head(&ctx->wc); 2138 2139 cfg = (struct oct_intrmod_cfg *)sc->virtdptr; 2140 2141 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); 2142 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8); 2143 2144 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 2145 2146 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 2147 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); 2148 2149 sc->callback = octnet_intrmod_callback; 2150 sc->callback_arg = sc; 2151 sc->wait_time = 1000; 2152 2153 retval = octeon_send_soft_command(oct_dev, sc); 2154 if (retval == IQ_SEND_FAILED) { 2155 octeon_free_soft_command(oct_dev, sc); 2156 return -EINVAL; 2157 } 2158 2159 /* Sleep on a wait queue till the cond flag indicates that the 2160 * response arrived or timed-out. 2161 */ 2162 if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) { 2163 retval = ctx->status; 2164 if (retval) 2165 dev_err(&oct_dev->pci_dev->dev, 2166 "intrmod config failed. Status: %llx\n", 2167 CVM_CAST64(retval)); 2168 else 2169 dev_info(&oct_dev->pci_dev->dev, 2170 "Rx-Adaptive Interrupt moderation %s\n", 2171 (intr_cfg->rx_enable) ? 2172 "enabled" : "disabled"); 2173 2174 octeon_free_soft_command(oct_dev, sc); 2175 2176 return ((retval) ? -ENODEV : 0); 2177 } 2178 2179 dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n"); 2180 2181 return -EINTR; 2182 } 2183 2184 static int lio_get_intr_coalesce(struct net_device *netdev, 2185 struct ethtool_coalesce *intr_coal) 2186 { 2187 struct lio *lio = GET_LIO(netdev); 2188 struct octeon_device *oct = lio->oct_dev; 2189 struct octeon_instr_queue *iq; 2190 struct oct_intrmod_cfg intrmod_cfg; 2191 2192 if (octnet_get_intrmod_cfg(lio, &intrmod_cfg)) 2193 return -ENODEV; 2194 2195 switch (oct->chip_id) { 2196 case OCTEON_CN23XX_PF_VID: 2197 case OCTEON_CN23XX_VF_VID: { 2198 if (!intrmod_cfg.rx_enable) { 2199 intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs; 2200 intr_coal->rx_max_coalesced_frames = 2201 oct->rx_max_coalesced_frames; 2202 } 2203 if (!intrmod_cfg.tx_enable) 2204 intr_coal->tx_max_coalesced_frames = 2205 oct->tx_max_coalesced_frames; 2206 break; 2207 } 2208 case OCTEON_CN68XX: 2209 case OCTEON_CN66XX: { 2210 struct octeon_cn6xxx *cn6xxx = 2211 (struct octeon_cn6xxx *)oct->chip; 2212 2213 if (!intrmod_cfg.rx_enable) { 2214 intr_coal->rx_coalesce_usecs = 2215 CFG_GET_OQ_INTR_TIME(cn6xxx->conf); 2216 intr_coal->rx_max_coalesced_frames = 2217 CFG_GET_OQ_INTR_PKT(cn6xxx->conf); 2218 } 2219 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; 2220 intr_coal->tx_max_coalesced_frames = iq->fill_threshold; 2221 break; 2222 } 2223 default: 2224 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 2225 return -EINVAL; 2226 } 2227 if (intrmod_cfg.rx_enable) { 2228 intr_coal->use_adaptive_rx_coalesce = 2229 intrmod_cfg.rx_enable; 2230 intr_coal->rate_sample_interval = 2231 intrmod_cfg.check_intrvl; 2232 intr_coal->pkt_rate_high = 2233 intrmod_cfg.maxpkt_ratethr; 2234 intr_coal->pkt_rate_low = 2235 intrmod_cfg.minpkt_ratethr; 2236 intr_coal->rx_max_coalesced_frames_high = 2237 intrmod_cfg.rx_maxcnt_trigger; 2238 intr_coal->rx_coalesce_usecs_high = 2239 intrmod_cfg.rx_maxtmr_trigger; 2240 intr_coal->rx_coalesce_usecs_low = 2241 intrmod_cfg.rx_mintmr_trigger; 2242 intr_coal->rx_max_coalesced_frames_low = 2243 intrmod_cfg.rx_mincnt_trigger; 2244 } 2245 if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) && 2246 (intrmod_cfg.tx_enable)) { 2247 intr_coal->use_adaptive_tx_coalesce = 2248 intrmod_cfg.tx_enable; 2249 intr_coal->tx_max_coalesced_frames_high = 2250 intrmod_cfg.tx_maxcnt_trigger; 2251 intr_coal->tx_max_coalesced_frames_low = 2252 intrmod_cfg.tx_mincnt_trigger; 2253 } 2254 return 0; 2255 } 2256 2257 /* Enable/Disable auto interrupt Moderation */ 2258 static int oct_cfg_adaptive_intr(struct lio *lio, 2259 struct oct_intrmod_cfg *intrmod_cfg, 2260 struct ethtool_coalesce *intr_coal) 2261 { 2262 int ret = 0; 2263 2264 if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) { 2265 intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval; 2266 intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high; 2267 intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low; 2268 } 2269 if (intrmod_cfg->rx_enable) { 2270 intrmod_cfg->rx_maxcnt_trigger = 2271 intr_coal->rx_max_coalesced_frames_high; 2272 intrmod_cfg->rx_maxtmr_trigger = 2273 intr_coal->rx_coalesce_usecs_high; 2274 intrmod_cfg->rx_mintmr_trigger = 2275 intr_coal->rx_coalesce_usecs_low; 2276 intrmod_cfg->rx_mincnt_trigger = 2277 intr_coal->rx_max_coalesced_frames_low; 2278 } 2279 if (intrmod_cfg->tx_enable) { 2280 intrmod_cfg->tx_maxcnt_trigger = 2281 intr_coal->tx_max_coalesced_frames_high; 2282 intrmod_cfg->tx_mincnt_trigger = 2283 intr_coal->tx_max_coalesced_frames_low; 2284 } 2285 2286 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); 2287 2288 return ret; 2289 } 2290 2291 static int 2292 oct_cfg_rx_intrcnt(struct lio *lio, 2293 struct oct_intrmod_cfg *intrmod, 2294 struct ethtool_coalesce *intr_coal) 2295 { 2296 struct octeon_device *oct = lio->oct_dev; 2297 u32 rx_max_coalesced_frames; 2298 2299 /* Config Cnt based interrupt values */ 2300 switch (oct->chip_id) { 2301 case OCTEON_CN68XX: 2302 case OCTEON_CN66XX: { 2303 struct octeon_cn6xxx *cn6xxx = 2304 (struct octeon_cn6xxx *)oct->chip; 2305 2306 if (!intr_coal->rx_max_coalesced_frames) 2307 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; 2308 else 2309 rx_max_coalesced_frames = 2310 intr_coal->rx_max_coalesced_frames; 2311 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, 2312 rx_max_coalesced_frames); 2313 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); 2314 break; 2315 } 2316 case OCTEON_CN23XX_PF_VID: { 2317 int q_no; 2318 2319 if (!intr_coal->rx_max_coalesced_frames) 2320 rx_max_coalesced_frames = intrmod->rx_frames; 2321 else 2322 rx_max_coalesced_frames = 2323 intr_coal->rx_max_coalesced_frames; 2324 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2325 q_no += oct->sriov_info.pf_srn; 2326 octeon_write_csr64( 2327 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 2328 (octeon_read_csr64( 2329 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) & 2330 (0x3fffff00000000UL)) | 2331 (rx_max_coalesced_frames - 1)); 2332 /*consider setting resend bit*/ 2333 } 2334 intrmod->rx_frames = rx_max_coalesced_frames; 2335 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 2336 break; 2337 } 2338 case OCTEON_CN23XX_VF_VID: { 2339 int q_no; 2340 2341 if (!intr_coal->rx_max_coalesced_frames) 2342 rx_max_coalesced_frames = intrmod->rx_frames; 2343 else 2344 rx_max_coalesced_frames = 2345 intr_coal->rx_max_coalesced_frames; 2346 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2347 octeon_write_csr64( 2348 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 2349 (octeon_read_csr64( 2350 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) & 2351 (0x3fffff00000000UL)) | 2352 (rx_max_coalesced_frames - 1)); 2353 /*consider writing to resend bit here*/ 2354 } 2355 intrmod->rx_frames = rx_max_coalesced_frames; 2356 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 2357 break; 2358 } 2359 default: 2360 return -EINVAL; 2361 } 2362 return 0; 2363 } 2364 2365 static int oct_cfg_rx_intrtime(struct lio *lio, 2366 struct oct_intrmod_cfg *intrmod, 2367 struct ethtool_coalesce *intr_coal) 2368 { 2369 struct octeon_device *oct = lio->oct_dev; 2370 u32 time_threshold, rx_coalesce_usecs; 2371 2372 /* Config Time based interrupt values */ 2373 switch (oct->chip_id) { 2374 case OCTEON_CN68XX: 2375 case OCTEON_CN66XX: { 2376 struct octeon_cn6xxx *cn6xxx = 2377 (struct octeon_cn6xxx *)oct->chip; 2378 if (!intr_coal->rx_coalesce_usecs) 2379 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; 2380 else 2381 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2382 2383 time_threshold = lio_cn6xxx_get_oq_ticks(oct, 2384 rx_coalesce_usecs); 2385 octeon_write_csr(oct, 2386 CN6XXX_SLI_OQ_INT_LEVEL_TIME, 2387 time_threshold); 2388 2389 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); 2390 break; 2391 } 2392 case OCTEON_CN23XX_PF_VID: { 2393 u64 time_threshold; 2394 int q_no; 2395 2396 if (!intr_coal->rx_coalesce_usecs) 2397 rx_coalesce_usecs = intrmod->rx_usecs; 2398 else 2399 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2400 time_threshold = 2401 cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 2402 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2403 q_no += oct->sriov_info.pf_srn; 2404 octeon_write_csr64(oct, 2405 CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 2406 (intrmod->rx_frames | 2407 ((u64)time_threshold << 32))); 2408 /*consider writing to resend bit here*/ 2409 } 2410 intrmod->rx_usecs = rx_coalesce_usecs; 2411 oct->rx_coalesce_usecs = rx_coalesce_usecs; 2412 break; 2413 } 2414 case OCTEON_CN23XX_VF_VID: { 2415 u64 time_threshold; 2416 int q_no; 2417 2418 if (!intr_coal->rx_coalesce_usecs) 2419 rx_coalesce_usecs = intrmod->rx_usecs; 2420 else 2421 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2422 2423 time_threshold = 2424 cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 2425 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2426 octeon_write_csr64( 2427 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 2428 (intrmod->rx_frames | 2429 ((u64)time_threshold << 32))); 2430 /*consider setting resend bit*/ 2431 } 2432 intrmod->rx_usecs = rx_coalesce_usecs; 2433 oct->rx_coalesce_usecs = rx_coalesce_usecs; 2434 break; 2435 } 2436 default: 2437 return -EINVAL; 2438 } 2439 2440 return 0; 2441 } 2442 2443 static int 2444 oct_cfg_tx_intrcnt(struct lio *lio, 2445 struct oct_intrmod_cfg *intrmod, 2446 struct ethtool_coalesce *intr_coal) 2447 { 2448 struct octeon_device *oct = lio->oct_dev; 2449 u32 iq_intr_pkt; 2450 void __iomem *inst_cnt_reg; 2451 u64 val; 2452 2453 /* Config Cnt based interrupt values */ 2454 switch (oct->chip_id) { 2455 case OCTEON_CN68XX: 2456 case OCTEON_CN66XX: 2457 break; 2458 case OCTEON_CN23XX_VF_VID: 2459 case OCTEON_CN23XX_PF_VID: { 2460 int q_no; 2461 2462 if (!intr_coal->tx_max_coalesced_frames) 2463 iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD & 2464 CN23XX_PKT_IN_DONE_WMARK_MASK; 2465 else 2466 iq_intr_pkt = intr_coal->tx_max_coalesced_frames & 2467 CN23XX_PKT_IN_DONE_WMARK_MASK; 2468 for (q_no = 0; q_no < oct->num_iqs; q_no++) { 2469 inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg; 2470 val = readq(inst_cnt_reg); 2471 /*clear wmark and count.dont want to write count back*/ 2472 val = (val & 0xFFFF000000000000ULL) | 2473 ((u64)(iq_intr_pkt - 1) 2474 << CN23XX_PKT_IN_DONE_WMARK_BIT_POS); 2475 writeq(val, inst_cnt_reg); 2476 /*consider setting resend bit*/ 2477 } 2478 intrmod->tx_frames = iq_intr_pkt; 2479 oct->tx_max_coalesced_frames = iq_intr_pkt; 2480 break; 2481 } 2482 default: 2483 return -EINVAL; 2484 } 2485 return 0; 2486 } 2487 2488 static int lio_set_intr_coalesce(struct net_device *netdev, 2489 struct ethtool_coalesce *intr_coal) 2490 { 2491 struct lio *lio = GET_LIO(netdev); 2492 int ret; 2493 struct octeon_device *oct = lio->oct_dev; 2494 struct oct_intrmod_cfg intrmod = {0}; 2495 u32 j, q_no; 2496 int db_max, db_min; 2497 2498 switch (oct->chip_id) { 2499 case OCTEON_CN68XX: 2500 case OCTEON_CN66XX: 2501 db_min = CN6XXX_DB_MIN; 2502 db_max = CN6XXX_DB_MAX; 2503 if ((intr_coal->tx_max_coalesced_frames >= db_min) && 2504 (intr_coal->tx_max_coalesced_frames <= db_max)) { 2505 for (j = 0; j < lio->linfo.num_txpciq; j++) { 2506 q_no = lio->linfo.txpciq[j].s.q_no; 2507 oct->instr_queue[q_no]->fill_threshold = 2508 intr_coal->tx_max_coalesced_frames; 2509 } 2510 } else { 2511 dev_err(&oct->pci_dev->dev, 2512 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", 2513 intr_coal->tx_max_coalesced_frames, 2514 db_min, db_max); 2515 return -EINVAL; 2516 } 2517 break; 2518 case OCTEON_CN23XX_PF_VID: 2519 case OCTEON_CN23XX_VF_VID: 2520 break; 2521 default: 2522 return -EINVAL; 2523 } 2524 2525 intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; 2526 intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; 2527 intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 2528 intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 2529 intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 2530 2531 ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal); 2532 2533 if (!intr_coal->use_adaptive_rx_coalesce) { 2534 ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal); 2535 if (ret) 2536 goto ret_intrmod; 2537 2538 ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal); 2539 if (ret) 2540 goto ret_intrmod; 2541 } else { 2542 oct->rx_coalesce_usecs = 2543 CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 2544 oct->rx_max_coalesced_frames = 2545 CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 2546 } 2547 2548 if (!intr_coal->use_adaptive_tx_coalesce) { 2549 ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal); 2550 if (ret) 2551 goto ret_intrmod; 2552 } else { 2553 oct->tx_max_coalesced_frames = 2554 CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 2555 } 2556 2557 return 0; 2558 ret_intrmod: 2559 return ret; 2560 } 2561 2562 static int lio_get_ts_info(struct net_device *netdev, 2563 struct ethtool_ts_info *info) 2564 { 2565 struct lio *lio = GET_LIO(netdev); 2566 2567 info->so_timestamping = 2568 #ifdef PTP_HARDWARE_TIMESTAMPING 2569 SOF_TIMESTAMPING_TX_HARDWARE | 2570 SOF_TIMESTAMPING_RX_HARDWARE | 2571 SOF_TIMESTAMPING_RAW_HARDWARE | 2572 SOF_TIMESTAMPING_TX_SOFTWARE | 2573 #endif 2574 SOF_TIMESTAMPING_RX_SOFTWARE | 2575 SOF_TIMESTAMPING_SOFTWARE; 2576 2577 if (lio->ptp_clock) 2578 info->phc_index = ptp_clock_index(lio->ptp_clock); 2579 else 2580 info->phc_index = -1; 2581 2582 #ifdef PTP_HARDWARE_TIMESTAMPING 2583 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 2584 2585 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2586 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 2587 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 2588 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 2589 #endif 2590 2591 return 0; 2592 } 2593 2594 /* Return register dump len. */ 2595 static int lio_get_regs_len(struct net_device *dev) 2596 { 2597 struct lio *lio = GET_LIO(dev); 2598 struct octeon_device *oct = lio->oct_dev; 2599 2600 switch (oct->chip_id) { 2601 case OCTEON_CN23XX_PF_VID: 2602 return OCT_ETHTOOL_REGDUMP_LEN_23XX; 2603 case OCTEON_CN23XX_VF_VID: 2604 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF; 2605 default: 2606 return OCT_ETHTOOL_REGDUMP_LEN; 2607 } 2608 } 2609 2610 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct) 2611 { 2612 u32 reg; 2613 u8 pf_num = oct->pf_num; 2614 int len = 0; 2615 int i; 2616 2617 /* PCI Window Registers */ 2618 2619 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2620 2621 /*0x29030 or 0x29040*/ 2622 reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num); 2623 len += sprintf(s + len, 2624 "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n", 2625 reg, oct->pcie_port, oct->pf_num, 2626 (u64)octeon_read_csr64(oct, reg)); 2627 2628 /*0x27080 or 0x27090*/ 2629 reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); 2630 len += 2631 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n", 2632 reg, oct->pcie_port, oct->pf_num, 2633 (u64)octeon_read_csr64(oct, reg)); 2634 2635 /*0x27000 or 0x27010*/ 2636 reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); 2637 len += 2638 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n", 2639 reg, oct->pcie_port, oct->pf_num, 2640 (u64)octeon_read_csr64(oct, reg)); 2641 2642 /*0x29120*/ 2643 reg = 0x29120; 2644 len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg, 2645 (u64)octeon_read_csr64(oct, reg)); 2646 2647 /*0x27300*/ 2648 reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2649 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2650 len += sprintf( 2651 s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg, 2652 oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg)); 2653 2654 /*0x27200*/ 2655 reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2656 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2657 len += sprintf(s + len, 2658 "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n", 2659 reg, oct->pcie_port, oct->pf_num, 2660 (u64)octeon_read_csr64(oct, reg)); 2661 2662 /*29130*/ 2663 reg = CN23XX_SLI_PKT_CNT_INT; 2664 len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg, 2665 (u64)octeon_read_csr64(oct, reg)); 2666 2667 /*0x29140*/ 2668 reg = CN23XX_SLI_PKT_TIME_INT; 2669 len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg, 2670 (u64)octeon_read_csr64(oct, reg)); 2671 2672 /*0x29160*/ 2673 reg = 0x29160; 2674 len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg, 2675 (u64)octeon_read_csr64(oct, reg)); 2676 2677 /*0x29180*/ 2678 reg = CN23XX_SLI_OQ_WMARK; 2679 len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n", 2680 reg, (u64)octeon_read_csr64(oct, reg)); 2681 2682 /*0x291E0*/ 2683 reg = CN23XX_SLI_PKT_IOQ_RING_RST; 2684 len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg, 2685 (u64)octeon_read_csr64(oct, reg)); 2686 2687 /*0x29210*/ 2688 reg = CN23XX_SLI_GBL_CONTROL; 2689 len += sprintf(s + len, 2690 "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg, 2691 (u64)octeon_read_csr64(oct, reg)); 2692 2693 /*0x29220*/ 2694 reg = 0x29220; 2695 len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n", 2696 reg, (u64)octeon_read_csr64(oct, reg)); 2697 2698 /*PF only*/ 2699 if (pf_num == 0) { 2700 /*0x29260*/ 2701 reg = CN23XX_SLI_OUT_BP_EN_W1S; 2702 len += sprintf(s + len, 2703 "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n", 2704 reg, (u64)octeon_read_csr64(oct, reg)); 2705 } else if (pf_num == 1) { 2706 /*0x29270*/ 2707 reg = CN23XX_SLI_OUT_BP_EN2_W1S; 2708 len += sprintf(s + len, 2709 "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n", 2710 reg, (u64)octeon_read_csr64(oct, reg)); 2711 } 2712 2713 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2714 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i); 2715 len += 2716 sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2717 reg, i, (u64)octeon_read_csr64(oct, reg)); 2718 } 2719 2720 /*0x10040*/ 2721 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2722 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2723 len += sprintf(s + len, 2724 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2725 reg, i, (u64)octeon_read_csr64(oct, reg)); 2726 } 2727 2728 /*0x10080*/ 2729 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2730 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i); 2731 len += sprintf(s + len, 2732 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2733 reg, i, (u64)octeon_read_csr64(oct, reg)); 2734 } 2735 2736 /*0x10090*/ 2737 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2738 reg = CN23XX_SLI_OQ_SIZE(i); 2739 len += sprintf( 2740 s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2741 reg, i, (u64)octeon_read_csr64(oct, reg)); 2742 } 2743 2744 /*0x10050*/ 2745 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2746 reg = CN23XX_SLI_OQ_PKT_CONTROL(i); 2747 len += sprintf( 2748 s + len, 2749 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2750 reg, i, (u64)octeon_read_csr64(oct, reg)); 2751 } 2752 2753 /*0x10070*/ 2754 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2755 reg = CN23XX_SLI_OQ_BASE_ADDR64(i); 2756 len += sprintf(s + len, 2757 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2758 reg, i, (u64)octeon_read_csr64(oct, reg)); 2759 } 2760 2761 /*0x100a0*/ 2762 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2763 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i); 2764 len += sprintf(s + len, 2765 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2766 reg, i, (u64)octeon_read_csr64(oct, reg)); 2767 } 2768 2769 /*0x100b0*/ 2770 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2771 reg = CN23XX_SLI_OQ_PKTS_SENT(i); 2772 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2773 reg, i, (u64)octeon_read_csr64(oct, reg)); 2774 } 2775 2776 /*0x100c0*/ 2777 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2778 reg = 0x100c0 + i * CN23XX_OQ_OFFSET; 2779 len += sprintf(s + len, 2780 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2781 reg, i, (u64)octeon_read_csr64(oct, reg)); 2782 2783 /*0x10000*/ 2784 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2785 reg = CN23XX_SLI_IQ_PKT_CONTROL64(i); 2786 len += sprintf( 2787 s + len, 2788 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2789 reg, i, (u64)octeon_read_csr64(oct, reg)); 2790 } 2791 2792 /*0x10010*/ 2793 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2794 reg = CN23XX_SLI_IQ_BASE_ADDR64(i); 2795 len += sprintf( 2796 s + len, 2797 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg, 2798 i, (u64)octeon_read_csr64(oct, reg)); 2799 } 2800 2801 /*0x10020*/ 2802 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2803 reg = CN23XX_SLI_IQ_DOORBELL(i); 2804 len += sprintf( 2805 s + len, 2806 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2807 reg, i, (u64)octeon_read_csr64(oct, reg)); 2808 } 2809 2810 /*0x10030*/ 2811 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2812 reg = CN23XX_SLI_IQ_SIZE(i); 2813 len += sprintf( 2814 s + len, 2815 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2816 reg, i, (u64)octeon_read_csr64(oct, reg)); 2817 } 2818 2819 /*0x10040*/ 2820 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) 2821 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2822 len += sprintf(s + len, 2823 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2824 reg, i, (u64)octeon_read_csr64(oct, reg)); 2825 } 2826 2827 return len; 2828 } 2829 2830 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct) 2831 { 2832 int len = 0; 2833 u32 reg; 2834 int i; 2835 2836 /* PCI Window Registers */ 2837 2838 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2839 2840 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2841 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i); 2842 len += sprintf(s + len, 2843 "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2844 reg, i, (u64)octeon_read_csr64(oct, reg)); 2845 } 2846 2847 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2848 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2849 len += sprintf(s + len, 2850 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2851 reg, i, (u64)octeon_read_csr64(oct, reg)); 2852 } 2853 2854 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2855 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i); 2856 len += sprintf(s + len, 2857 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2858 reg, i, (u64)octeon_read_csr64(oct, reg)); 2859 } 2860 2861 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2862 reg = CN23XX_VF_SLI_OQ_SIZE(i); 2863 len += sprintf(s + len, 2864 "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2865 reg, i, (u64)octeon_read_csr64(oct, reg)); 2866 } 2867 2868 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2869 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i); 2870 len += sprintf(s + len, 2871 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2872 reg, i, (u64)octeon_read_csr64(oct, reg)); 2873 } 2874 2875 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2876 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i); 2877 len += sprintf(s + len, 2878 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2879 reg, i, (u64)octeon_read_csr64(oct, reg)); 2880 } 2881 2882 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2883 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i); 2884 len += sprintf(s + len, 2885 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2886 reg, i, (u64)octeon_read_csr64(oct, reg)); 2887 } 2888 2889 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2890 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i); 2891 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2892 reg, i, (u64)octeon_read_csr64(oct, reg)); 2893 } 2894 2895 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2896 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET; 2897 len += sprintf(s + len, 2898 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2899 reg, i, (u64)octeon_read_csr64(oct, reg)); 2900 } 2901 2902 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2903 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET; 2904 len += sprintf(s + len, 2905 "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n", 2906 reg, i, (u64)octeon_read_csr64(oct, reg)); 2907 } 2908 2909 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2910 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i); 2911 len += sprintf(s + len, 2912 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2913 reg, i, (u64)octeon_read_csr64(oct, reg)); 2914 } 2915 2916 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2917 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i); 2918 len += sprintf(s + len, 2919 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", 2920 reg, i, (u64)octeon_read_csr64(oct, reg)); 2921 } 2922 2923 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2924 reg = CN23XX_VF_SLI_IQ_DOORBELL(i); 2925 len += sprintf(s + len, 2926 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2927 reg, i, (u64)octeon_read_csr64(oct, reg)); 2928 } 2929 2930 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2931 reg = CN23XX_VF_SLI_IQ_SIZE(i); 2932 len += sprintf(s + len, 2933 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2934 reg, i, (u64)octeon_read_csr64(oct, reg)); 2935 } 2936 2937 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2938 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2939 len += sprintf(s + len, 2940 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2941 reg, i, (u64)octeon_read_csr64(oct, reg)); 2942 } 2943 2944 return len; 2945 } 2946 2947 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct) 2948 { 2949 u32 reg; 2950 int i, len = 0; 2951 2952 /* PCI Window Registers */ 2953 2954 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2955 reg = CN6XXX_WIN_WR_ADDR_LO; 2956 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n", 2957 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg)); 2958 reg = CN6XXX_WIN_WR_ADDR_HI; 2959 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n", 2960 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg)); 2961 reg = CN6XXX_WIN_RD_ADDR_LO; 2962 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n", 2963 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg)); 2964 reg = CN6XXX_WIN_RD_ADDR_HI; 2965 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n", 2966 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg)); 2967 reg = CN6XXX_WIN_WR_DATA_LO; 2968 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n", 2969 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg)); 2970 reg = CN6XXX_WIN_WR_DATA_HI; 2971 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n", 2972 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg)); 2973 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n", 2974 CN6XXX_WIN_WR_MASK_REG, 2975 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG)); 2976 2977 /* PCI Interrupt Register */ 2978 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n", 2979 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct, 2980 CN6XXX_SLI_INT_ENB64_PORT0)); 2981 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n", 2982 CN6XXX_SLI_INT_ENB64_PORT1, 2983 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1)); 2984 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64, 2985 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64)); 2986 2987 /* PCI Output queue registers */ 2988 for (i = 0; i < oct->num_oqs; i++) { 2989 reg = CN6XXX_SLI_OQ_PKTS_SENT(i); 2990 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n", 2991 reg, i, octeon_read_csr(oct, reg)); 2992 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i); 2993 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n", 2994 reg, i, octeon_read_csr(oct, reg)); 2995 } 2996 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS; 2997 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n", 2998 reg, octeon_read_csr(oct, reg)); 2999 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME; 3000 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n", 3001 reg, octeon_read_csr(oct, reg)); 3002 3003 /* PCI Input queue registers */ 3004 for (i = 0; i <= 3; i++) { 3005 u32 reg; 3006 3007 reg = CN6XXX_SLI_IQ_DOORBELL(i); 3008 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n", 3009 reg, i, octeon_read_csr(oct, reg)); 3010 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i); 3011 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n", 3012 reg, i, octeon_read_csr(oct, reg)); 3013 } 3014 3015 /* PCI DMA registers */ 3016 3017 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n", 3018 CN6XXX_DMA_CNT(0), 3019 octeon_read_csr(oct, CN6XXX_DMA_CNT(0))); 3020 reg = CN6XXX_DMA_PKT_INT_LEVEL(0); 3021 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n", 3022 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg)); 3023 reg = CN6XXX_DMA_TIME_INT_LEVEL(0); 3024 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n", 3025 CN6XXX_DMA_TIME_INT_LEVEL(0), 3026 octeon_read_csr(oct, reg)); 3027 3028 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n", 3029 CN6XXX_DMA_CNT(1), 3030 octeon_read_csr(oct, CN6XXX_DMA_CNT(1))); 3031 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 3032 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n", 3033 CN6XXX_DMA_PKT_INT_LEVEL(1), 3034 octeon_read_csr(oct, reg)); 3035 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 3036 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n", 3037 CN6XXX_DMA_TIME_INT_LEVEL(1), 3038 octeon_read_csr(oct, reg)); 3039 3040 /* PCI Index registers */ 3041 3042 len += sprintf(s + len, "\n"); 3043 3044 for (i = 0; i < 16; i++) { 3045 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port)); 3046 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n", 3047 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg); 3048 } 3049 3050 return len; 3051 } 3052 3053 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct) 3054 { 3055 u32 val; 3056 int i, len = 0; 3057 3058 /* PCI CONFIG Registers */ 3059 3060 len += sprintf(s + len, 3061 "\n\t Octeon Config space Registers\n\n"); 3062 3063 for (i = 0; i <= 13; i++) { 3064 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 3065 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 3066 (i * 4), i, val); 3067 } 3068 3069 for (i = 30; i <= 34; i++) { 3070 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 3071 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 3072 (i * 4), i, val); 3073 } 3074 3075 return len; 3076 } 3077 3078 /* Return register dump user app. */ 3079 static void lio_get_regs(struct net_device *dev, 3080 struct ethtool_regs *regs, void *regbuf) 3081 { 3082 struct lio *lio = GET_LIO(dev); 3083 int len = 0; 3084 struct octeon_device *oct = lio->oct_dev; 3085 3086 regs->version = OCT_ETHTOOL_REGSVER; 3087 3088 switch (oct->chip_id) { 3089 case OCTEON_CN23XX_PF_VID: 3090 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX); 3091 len += cn23xx_read_csr_reg(regbuf + len, oct); 3092 break; 3093 case OCTEON_CN23XX_VF_VID: 3094 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF); 3095 len += cn23xx_vf_read_csr_reg(regbuf + len, oct); 3096 break; 3097 case OCTEON_CN68XX: 3098 case OCTEON_CN66XX: 3099 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); 3100 len += cn6xxx_read_csr_reg(regbuf + len, oct); 3101 len += cn6xxx_read_config_reg(regbuf + len, oct); 3102 break; 3103 default: 3104 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n", 3105 __func__, oct->chip_id); 3106 } 3107 } 3108 3109 static u32 lio_get_priv_flags(struct net_device *netdev) 3110 { 3111 struct lio *lio = GET_LIO(netdev); 3112 3113 return lio->oct_dev->priv_flags; 3114 } 3115 3116 static int lio_set_priv_flags(struct net_device *netdev, u32 flags) 3117 { 3118 struct lio *lio = GET_LIO(netdev); 3119 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES)); 3120 3121 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES, 3122 intr_by_tx_bytes); 3123 return 0; 3124 } 3125 3126 static const struct ethtool_ops lio_ethtool_ops = { 3127 .get_link_ksettings = lio_get_link_ksettings, 3128 .set_link_ksettings = lio_set_link_ksettings, 3129 .get_link = ethtool_op_get_link, 3130 .get_drvinfo = lio_get_drvinfo, 3131 .get_ringparam = lio_ethtool_get_ringparam, 3132 .set_ringparam = lio_ethtool_set_ringparam, 3133 .get_channels = lio_ethtool_get_channels, 3134 .set_channels = lio_ethtool_set_channels, 3135 .set_phys_id = lio_set_phys_id, 3136 .get_eeprom_len = lio_get_eeprom_len, 3137 .get_eeprom = lio_get_eeprom, 3138 .get_strings = lio_get_strings, 3139 .get_ethtool_stats = lio_get_ethtool_stats, 3140 .get_pauseparam = lio_get_pauseparam, 3141 .set_pauseparam = lio_set_pauseparam, 3142 .get_regs_len = lio_get_regs_len, 3143 .get_regs = lio_get_regs, 3144 .get_msglevel = lio_get_msglevel, 3145 .set_msglevel = lio_set_msglevel, 3146 .get_sset_count = lio_get_sset_count, 3147 .get_coalesce = lio_get_intr_coalesce, 3148 .set_coalesce = lio_set_intr_coalesce, 3149 .get_priv_flags = lio_get_priv_flags, 3150 .set_priv_flags = lio_set_priv_flags, 3151 .get_ts_info = lio_get_ts_info, 3152 }; 3153 3154 static const struct ethtool_ops lio_vf_ethtool_ops = { 3155 .get_link_ksettings = lio_get_link_ksettings, 3156 .get_link = ethtool_op_get_link, 3157 .get_drvinfo = lio_get_vf_drvinfo, 3158 .get_ringparam = lio_ethtool_get_ringparam, 3159 .set_ringparam = lio_ethtool_set_ringparam, 3160 .get_channels = lio_ethtool_get_channels, 3161 .set_channels = lio_ethtool_set_channels, 3162 .get_strings = lio_vf_get_strings, 3163 .get_ethtool_stats = lio_vf_get_ethtool_stats, 3164 .get_regs_len = lio_get_regs_len, 3165 .get_regs = lio_get_regs, 3166 .get_msglevel = lio_get_msglevel, 3167 .set_msglevel = lio_vf_set_msglevel, 3168 .get_sset_count = lio_vf_get_sset_count, 3169 .get_coalesce = lio_get_intr_coalesce, 3170 .set_coalesce = lio_set_intr_coalesce, 3171 .get_priv_flags = lio_get_priv_flags, 3172 .set_priv_flags = lio_set_priv_flags, 3173 .get_ts_info = lio_get_ts_info, 3174 }; 3175 3176 void liquidio_set_ethtool_ops(struct net_device *netdev) 3177 { 3178 struct lio *lio = GET_LIO(netdev); 3179 struct octeon_device *oct = lio->oct_dev; 3180 3181 if (OCTEON_CN23XX_VF(oct)) 3182 netdev->ethtool_ops = &lio_vf_ethtool_ops; 3183 else 3184 netdev->ethtool_ops = &lio_ethtool_ops; 3185 } 3186