1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 /* ethtool support for ixgbe */ 29 30 #include <linux/interrupt.h> 31 #include <linux/types.h> 32 #include <linux/module.h> 33 #include <linux/slab.h> 34 #include <linux/pci.h> 35 #include <linux/netdevice.h> 36 #include <linux/ethtool.h> 37 #include <linux/vmalloc.h> 38 #include <linux/highmem.h> 39 #include <linux/uaccess.h> 40 41 #include "ixgbe.h" 42 43 44 #define IXGBE_ALL_RAR_ENTRIES 16 45 46 enum {NETDEV_STATS, IXGBE_STATS}; 47 48 struct ixgbe_stats { 49 char stat_string[ETH_GSTRING_LEN]; 50 int type; 51 int sizeof_stat; 52 int stat_offset; 53 }; 54 55 #define IXGBE_STAT(m) IXGBE_STATS, \ 56 sizeof(((struct ixgbe_adapter *)0)->m), \ 57 offsetof(struct ixgbe_adapter, m) 58 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ 59 sizeof(((struct rtnl_link_stats64 *)0)->m), \ 60 offsetof(struct rtnl_link_stats64, m) 61 62 static const struct ixgbe_stats ixgbe_gstrings_stats[] = { 63 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, 64 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, 65 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, 66 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, 67 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, 68 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, 69 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, 70 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, 71 {"lsc_int", IXGBE_STAT(lsc_int)}, 72 {"tx_busy", IXGBE_STAT(tx_busy)}, 73 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 74 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, 75 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, 76 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, 77 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, 78 {"multicast", IXGBE_NETDEV_STAT(multicast)}, 79 {"broadcast", IXGBE_STAT(stats.bprc)}, 80 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, 81 {"collisions", IXGBE_NETDEV_STAT(collisions)}, 82 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, 83 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, 84 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, 85 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, 86 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 87 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 88 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 89 {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, 90 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, 91 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, 92 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, 93 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, 94 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, 95 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, 96 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, 97 {"tx_restart_queue", IXGBE_STAT(restart_queue)}, 98 {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, 99 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, 100 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, 101 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, 102 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, 103 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, 104 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, 105 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 106 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 107 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 108 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, 109 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, 110 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, 111 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, 112 #ifdef IXGBE_FCOE 113 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 114 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 115 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, 116 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, 117 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, 118 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, 119 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, 120 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, 121 #endif /* IXGBE_FCOE */ 122 }; 123 124 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so 125 * we set the num_rx_queues to evaluate to num_tx_queues. This is 126 * used because we do not have a good way to get the max number of 127 * rx queues with CONFIG_RPS disabled. 128 */ 129 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues 130 131 #define IXGBE_QUEUE_STATS_LEN ( \ 132 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ 133 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 134 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 135 #define IXGBE_PB_STATS_LEN ( \ 136 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ 137 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ 138 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ 139 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 140 / sizeof(u64)) 141 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 142 IXGBE_PB_STATS_LEN + \ 143 IXGBE_QUEUE_STATS_LEN) 144 145 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 146 "Register test (offline)", "Eeprom test (offline)", 147 "Interrupt test (offline)", "Loopback test (offline)", 148 "Link test (on/offline)" 149 }; 150 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 151 152 static int ixgbe_get_settings(struct net_device *netdev, 153 struct ethtool_cmd *ecmd) 154 { 155 struct ixgbe_adapter *adapter = netdev_priv(netdev); 156 struct ixgbe_hw *hw = &adapter->hw; 157 u32 link_speed = 0; 158 bool link_up; 159 160 ecmd->supported = SUPPORTED_10000baseT_Full; 161 ecmd->autoneg = AUTONEG_ENABLE; 162 ecmd->transceiver = XCVR_EXTERNAL; 163 if ((hw->phy.media_type == ixgbe_media_type_copper) || 164 (hw->phy.multispeed_fiber)) { 165 ecmd->supported |= (SUPPORTED_1000baseT_Full | 166 SUPPORTED_Autoneg); 167 168 switch (hw->mac.type) { 169 case ixgbe_mac_X540: 170 ecmd->supported |= SUPPORTED_100baseT_Full; 171 break; 172 default: 173 break; 174 } 175 176 ecmd->advertising = ADVERTISED_Autoneg; 177 if (hw->phy.autoneg_advertised) { 178 if (hw->phy.autoneg_advertised & 179 IXGBE_LINK_SPEED_100_FULL) 180 ecmd->advertising |= ADVERTISED_100baseT_Full; 181 if (hw->phy.autoneg_advertised & 182 IXGBE_LINK_SPEED_10GB_FULL) 183 ecmd->advertising |= ADVERTISED_10000baseT_Full; 184 if (hw->phy.autoneg_advertised & 185 IXGBE_LINK_SPEED_1GB_FULL) 186 ecmd->advertising |= ADVERTISED_1000baseT_Full; 187 } else { 188 /* 189 * Default advertised modes in case 190 * phy.autoneg_advertised isn't set. 191 */ 192 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 193 ADVERTISED_1000baseT_Full); 194 if (hw->mac.type == ixgbe_mac_X540) 195 ecmd->advertising |= ADVERTISED_100baseT_Full; 196 } 197 198 if (hw->phy.media_type == ixgbe_media_type_copper) { 199 ecmd->supported |= SUPPORTED_TP; 200 ecmd->advertising |= ADVERTISED_TP; 201 ecmd->port = PORT_TP; 202 } else { 203 ecmd->supported |= SUPPORTED_FIBRE; 204 ecmd->advertising |= ADVERTISED_FIBRE; 205 ecmd->port = PORT_FIBRE; 206 } 207 } else if (hw->phy.media_type == ixgbe_media_type_backplane) { 208 /* Set as FIBRE until SERDES defined in kernel */ 209 if (hw->device_id == IXGBE_DEV_ID_82598_BX) { 210 ecmd->supported = (SUPPORTED_1000baseT_Full | 211 SUPPORTED_FIBRE); 212 ecmd->advertising = (ADVERTISED_1000baseT_Full | 213 ADVERTISED_FIBRE); 214 ecmd->port = PORT_FIBRE; 215 ecmd->autoneg = AUTONEG_DISABLE; 216 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || 217 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { 218 ecmd->supported |= (SUPPORTED_1000baseT_Full | 219 SUPPORTED_Autoneg | 220 SUPPORTED_FIBRE); 221 ecmd->advertising = (ADVERTISED_10000baseT_Full | 222 ADVERTISED_1000baseT_Full | 223 ADVERTISED_Autoneg | 224 ADVERTISED_FIBRE); 225 ecmd->port = PORT_FIBRE; 226 } else { 227 ecmd->supported |= (SUPPORTED_1000baseT_Full | 228 SUPPORTED_FIBRE); 229 ecmd->advertising = (ADVERTISED_10000baseT_Full | 230 ADVERTISED_1000baseT_Full | 231 ADVERTISED_FIBRE); 232 ecmd->port = PORT_FIBRE; 233 } 234 } else { 235 ecmd->supported |= SUPPORTED_FIBRE; 236 ecmd->advertising = (ADVERTISED_10000baseT_Full | 237 ADVERTISED_FIBRE); 238 ecmd->port = PORT_FIBRE; 239 ecmd->autoneg = AUTONEG_DISABLE; 240 } 241 242 /* Get PHY type */ 243 switch (adapter->hw.phy.type) { 244 case ixgbe_phy_tn: 245 case ixgbe_phy_aq: 246 case ixgbe_phy_cu_unknown: 247 /* Copper 10G-BASET */ 248 ecmd->port = PORT_TP; 249 break; 250 case ixgbe_phy_qt: 251 ecmd->port = PORT_FIBRE; 252 break; 253 case ixgbe_phy_nl: 254 case ixgbe_phy_sfp_passive_tyco: 255 case ixgbe_phy_sfp_passive_unknown: 256 case ixgbe_phy_sfp_ftl: 257 case ixgbe_phy_sfp_avago: 258 case ixgbe_phy_sfp_intel: 259 case ixgbe_phy_sfp_unknown: 260 switch (adapter->hw.phy.sfp_type) { 261 /* SFP+ devices, further checking needed */ 262 case ixgbe_sfp_type_da_cu: 263 case ixgbe_sfp_type_da_cu_core0: 264 case ixgbe_sfp_type_da_cu_core1: 265 ecmd->port = PORT_DA; 266 break; 267 case ixgbe_sfp_type_sr: 268 case ixgbe_sfp_type_lr: 269 case ixgbe_sfp_type_srlr_core0: 270 case ixgbe_sfp_type_srlr_core1: 271 ecmd->port = PORT_FIBRE; 272 break; 273 case ixgbe_sfp_type_not_present: 274 ecmd->port = PORT_NONE; 275 break; 276 case ixgbe_sfp_type_1g_cu_core0: 277 case ixgbe_sfp_type_1g_cu_core1: 278 ecmd->port = PORT_TP; 279 ecmd->supported = SUPPORTED_TP; 280 ecmd->advertising = (ADVERTISED_1000baseT_Full | 281 ADVERTISED_TP); 282 break; 283 case ixgbe_sfp_type_unknown: 284 default: 285 ecmd->port = PORT_OTHER; 286 break; 287 } 288 break; 289 case ixgbe_phy_xaui: 290 ecmd->port = PORT_NONE; 291 break; 292 case ixgbe_phy_unknown: 293 case ixgbe_phy_generic: 294 case ixgbe_phy_sfp_unsupported: 295 default: 296 ecmd->port = PORT_OTHER; 297 break; 298 } 299 300 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 301 if (link_up) { 302 switch (link_speed) { 303 case IXGBE_LINK_SPEED_10GB_FULL: 304 ethtool_cmd_speed_set(ecmd, SPEED_10000); 305 break; 306 case IXGBE_LINK_SPEED_1GB_FULL: 307 ethtool_cmd_speed_set(ecmd, SPEED_1000); 308 break; 309 case IXGBE_LINK_SPEED_100_FULL: 310 ethtool_cmd_speed_set(ecmd, SPEED_100); 311 break; 312 default: 313 break; 314 } 315 ecmd->duplex = DUPLEX_FULL; 316 } else { 317 ethtool_cmd_speed_set(ecmd, -1); 318 ecmd->duplex = -1; 319 } 320 321 return 0; 322 } 323 324 static int ixgbe_set_settings(struct net_device *netdev, 325 struct ethtool_cmd *ecmd) 326 { 327 struct ixgbe_adapter *adapter = netdev_priv(netdev); 328 struct ixgbe_hw *hw = &adapter->hw; 329 u32 advertised, old; 330 s32 err = 0; 331 332 if ((hw->phy.media_type == ixgbe_media_type_copper) || 333 (hw->phy.multispeed_fiber)) { 334 /* 335 * this function does not support duplex forcing, but can 336 * limit the advertising of the adapter to the specified speed 337 */ 338 if (ecmd->autoneg == AUTONEG_DISABLE) 339 return -EINVAL; 340 341 if (ecmd->advertising & ~ecmd->supported) 342 return -EINVAL; 343 344 old = hw->phy.autoneg_advertised; 345 advertised = 0; 346 if (ecmd->advertising & ADVERTISED_10000baseT_Full) 347 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 348 349 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 350 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 351 352 if (ecmd->advertising & ADVERTISED_100baseT_Full) 353 advertised |= IXGBE_LINK_SPEED_100_FULL; 354 355 if (old == advertised) 356 return err; 357 /* this sets the link speed and restarts auto-neg */ 358 hw->mac.autotry_restart = true; 359 err = hw->mac.ops.setup_link(hw, advertised, true, true); 360 if (err) { 361 e_info(probe, "setup link failed with code %d\n", err); 362 hw->mac.ops.setup_link(hw, old, true, true); 363 } 364 } else { 365 /* in this case we currently only support 10Gb/FULL */ 366 u32 speed = ethtool_cmd_speed(ecmd); 367 if ((ecmd->autoneg == AUTONEG_ENABLE) || 368 (ecmd->advertising != ADVERTISED_10000baseT_Full) || 369 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 370 return -EINVAL; 371 } 372 373 return err; 374 } 375 376 static void ixgbe_get_pauseparam(struct net_device *netdev, 377 struct ethtool_pauseparam *pause) 378 { 379 struct ixgbe_adapter *adapter = netdev_priv(netdev); 380 struct ixgbe_hw *hw = &adapter->hw; 381 382 if (hw->fc.disable_fc_autoneg) 383 pause->autoneg = 0; 384 else 385 pause->autoneg = 1; 386 387 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 388 pause->rx_pause = 1; 389 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 390 pause->tx_pause = 1; 391 } else if (hw->fc.current_mode == ixgbe_fc_full) { 392 pause->rx_pause = 1; 393 pause->tx_pause = 1; 394 #ifdef CONFIG_DCB 395 } else if (hw->fc.current_mode == ixgbe_fc_pfc) { 396 pause->rx_pause = 0; 397 pause->tx_pause = 0; 398 #endif 399 } 400 } 401 402 static int ixgbe_set_pauseparam(struct net_device *netdev, 403 struct ethtool_pauseparam *pause) 404 { 405 struct ixgbe_adapter *adapter = netdev_priv(netdev); 406 struct ixgbe_hw *hw = &adapter->hw; 407 struct ixgbe_fc_info fc; 408 409 #ifdef CONFIG_DCB 410 if (adapter->dcb_cfg.pfc_mode_enable || 411 ((hw->mac.type == ixgbe_mac_82598EB) && 412 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))) 413 return -EINVAL; 414 415 #endif 416 fc = hw->fc; 417 418 if (pause->autoneg != AUTONEG_ENABLE) 419 fc.disable_fc_autoneg = true; 420 else 421 fc.disable_fc_autoneg = false; 422 423 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) 424 fc.requested_mode = ixgbe_fc_full; 425 else if (pause->rx_pause && !pause->tx_pause) 426 fc.requested_mode = ixgbe_fc_rx_pause; 427 else if (!pause->rx_pause && pause->tx_pause) 428 fc.requested_mode = ixgbe_fc_tx_pause; 429 else if (!pause->rx_pause && !pause->tx_pause) 430 fc.requested_mode = ixgbe_fc_none; 431 else 432 return -EINVAL; 433 434 #ifdef CONFIG_DCB 435 adapter->last_lfc_mode = fc.requested_mode; 436 #endif 437 438 /* if the thing changed then we'll update and use new autoneg */ 439 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { 440 hw->fc = fc; 441 if (netif_running(netdev)) 442 ixgbe_reinit_locked(adapter); 443 else 444 ixgbe_reset(adapter); 445 } 446 447 return 0; 448 } 449 450 static u32 ixgbe_get_msglevel(struct net_device *netdev) 451 { 452 struct ixgbe_adapter *adapter = netdev_priv(netdev); 453 return adapter->msg_enable; 454 } 455 456 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) 457 { 458 struct ixgbe_adapter *adapter = netdev_priv(netdev); 459 adapter->msg_enable = data; 460 } 461 462 static int ixgbe_get_regs_len(struct net_device *netdev) 463 { 464 #define IXGBE_REGS_LEN 1129 465 return IXGBE_REGS_LEN * sizeof(u32); 466 } 467 468 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 469 470 static void ixgbe_get_regs(struct net_device *netdev, 471 struct ethtool_regs *regs, void *p) 472 { 473 struct ixgbe_adapter *adapter = netdev_priv(netdev); 474 struct ixgbe_hw *hw = &adapter->hw; 475 u32 *regs_buff = p; 476 u8 i; 477 478 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); 479 480 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; 481 482 /* General Registers */ 483 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); 484 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); 485 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 486 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); 487 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); 488 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 489 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); 490 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); 491 492 /* NVM Register */ 493 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); 494 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); 495 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); 496 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); 497 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); 498 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); 499 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); 500 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); 501 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); 502 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); 503 504 /* Interrupt */ 505 /* don't read EICR because it can clear interrupt causes, instead 506 * read EICS which is a shadow but doesn't clear EICR */ 507 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); 508 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 509 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 510 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); 511 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); 512 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); 513 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); 514 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 515 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 516 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 517 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); 518 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 519 520 /* Flow Control */ 521 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); 522 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); 523 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 524 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 525 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 526 for (i = 0; i < 8; i++) { 527 switch (hw->mac.type) { 528 case ixgbe_mac_82598EB: 529 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 530 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 531 break; 532 case ixgbe_mac_82599EB: 533 case ixgbe_mac_X540: 534 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); 535 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 536 break; 537 default: 538 break; 539 } 540 } 541 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 542 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 543 544 /* Receive DMA */ 545 for (i = 0; i < 64; i++) 546 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); 547 for (i = 0; i < 64; i++) 548 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); 549 for (i = 0; i < 64; i++) 550 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); 551 for (i = 0; i < 64; i++) 552 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); 553 for (i = 0; i < 64; i++) 554 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); 555 for (i = 0; i < 64; i++) 556 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 557 for (i = 0; i < 16; i++) 558 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 559 for (i = 0; i < 16; i++) 560 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 561 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 562 for (i = 0; i < 8; i++) 563 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 564 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 565 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); 566 567 /* Receive */ 568 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 569 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); 570 for (i = 0; i < 16; i++) 571 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 572 for (i = 0; i < 16; i++) 573 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 574 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); 575 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 576 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 577 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); 578 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); 579 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 580 for (i = 0; i < 8; i++) 581 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); 582 for (i = 0; i < 8; i++) 583 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); 584 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); 585 586 /* Transmit */ 587 for (i = 0; i < 32; i++) 588 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); 589 for (i = 0; i < 32; i++) 590 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); 591 for (i = 0; i < 32; i++) 592 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); 593 for (i = 0; i < 32; i++) 594 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); 595 for (i = 0; i < 32; i++) 596 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); 597 for (i = 0; i < 32; i++) 598 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 599 for (i = 0; i < 32; i++) 600 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); 601 for (i = 0; i < 32; i++) 602 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); 603 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); 604 for (i = 0; i < 16; i++) 605 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 606 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); 607 for (i = 0; i < 8; i++) 608 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); 609 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); 610 611 /* Wake Up */ 612 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); 613 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); 614 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); 615 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); 616 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); 617 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); 618 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); 619 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 620 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 621 622 /* DCB */ 623 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 624 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 625 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 626 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); 627 for (i = 0; i < 8; i++) 628 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); 629 for (i = 0; i < 8; i++) 630 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); 631 for (i = 0; i < 8; i++) 632 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); 633 for (i = 0; i < 8; i++) 634 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); 635 for (i = 0; i < 8; i++) 636 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); 637 for (i = 0; i < 8; i++) 638 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); 639 640 /* Statistics */ 641 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); 642 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); 643 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); 644 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); 645 for (i = 0; i < 8; i++) 646 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); 647 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); 648 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); 649 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); 650 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); 651 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); 652 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); 653 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); 654 for (i = 0; i < 8; i++) 655 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); 656 for (i = 0; i < 8; i++) 657 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); 658 for (i = 0; i < 8; i++) 659 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); 660 for (i = 0; i < 8; i++) 661 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); 662 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); 663 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); 664 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); 665 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); 666 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); 667 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); 668 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); 669 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); 670 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); 671 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); 672 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); 673 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); 674 for (i = 0; i < 8; i++) 675 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); 676 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); 677 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); 678 regs_buff[956] = IXGBE_GET_STAT(adapter, roc); 679 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); 680 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); 681 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); 682 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); 683 regs_buff[961] = IXGBE_GET_STAT(adapter, tor); 684 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); 685 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); 686 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); 687 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); 688 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); 689 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); 690 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); 691 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); 692 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); 693 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); 694 regs_buff[973] = IXGBE_GET_STAT(adapter, xec); 695 for (i = 0; i < 16; i++) 696 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); 697 for (i = 0; i < 16; i++) 698 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); 699 for (i = 0; i < 16; i++) 700 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); 701 for (i = 0; i < 16; i++) 702 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); 703 704 /* MAC */ 705 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); 706 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 707 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 708 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); 709 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); 710 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 711 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 712 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); 713 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); 714 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); 715 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); 716 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); 717 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); 718 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); 719 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); 720 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); 721 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); 722 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); 723 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); 724 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); 725 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); 726 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); 727 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); 728 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); 729 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); 730 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); 731 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); 732 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); 733 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 734 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); 735 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); 736 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); 737 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 738 739 /* Diagnostic */ 740 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 741 for (i = 0; i < 8; i++) 742 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 743 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 744 for (i = 0; i < 4; i++) 745 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); 746 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 747 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 748 for (i = 0; i < 8; i++) 749 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 750 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 751 for (i = 0; i < 4; i++) 752 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); 753 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 754 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 755 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); 756 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); 757 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); 758 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); 759 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); 760 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); 761 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); 762 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); 763 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); 764 for (i = 0; i < 8; i++) 765 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 766 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 767 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 768 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); 769 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); 770 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); 771 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); 772 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); 773 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); 774 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); 775 776 /* 82599 X540 specific registers */ 777 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); 778 } 779 780 static int ixgbe_get_eeprom_len(struct net_device *netdev) 781 { 782 struct ixgbe_adapter *adapter = netdev_priv(netdev); 783 return adapter->hw.eeprom.word_size * 2; 784 } 785 786 static int ixgbe_get_eeprom(struct net_device *netdev, 787 struct ethtool_eeprom *eeprom, u8 *bytes) 788 { 789 struct ixgbe_adapter *adapter = netdev_priv(netdev); 790 struct ixgbe_hw *hw = &adapter->hw; 791 u16 *eeprom_buff; 792 int first_word, last_word, eeprom_len; 793 int ret_val = 0; 794 u16 i; 795 796 if (eeprom->len == 0) 797 return -EINVAL; 798 799 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 800 801 first_word = eeprom->offset >> 1; 802 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 803 eeprom_len = last_word - first_word + 1; 804 805 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); 806 if (!eeprom_buff) 807 return -ENOMEM; 808 809 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, 810 eeprom_buff); 811 812 /* Device's eeprom is always little-endian, word addressable */ 813 for (i = 0; i < eeprom_len; i++) 814 le16_to_cpus(&eeprom_buff[i]); 815 816 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); 817 kfree(eeprom_buff); 818 819 return ret_val; 820 } 821 822 static int ixgbe_set_eeprom(struct net_device *netdev, 823 struct ethtool_eeprom *eeprom, u8 *bytes) 824 { 825 struct ixgbe_adapter *adapter = netdev_priv(netdev); 826 struct ixgbe_hw *hw = &adapter->hw; 827 u16 *eeprom_buff; 828 void *ptr; 829 int max_len, first_word, last_word, ret_val = 0; 830 u16 i; 831 832 if (eeprom->len == 0) 833 return -EINVAL; 834 835 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 836 return -EINVAL; 837 838 max_len = hw->eeprom.word_size * 2; 839 840 first_word = eeprom->offset >> 1; 841 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 842 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 843 if (!eeprom_buff) 844 return -ENOMEM; 845 846 ptr = eeprom_buff; 847 848 if (eeprom->offset & 1) { 849 /* 850 * need read/modify/write of first changed EEPROM word 851 * only the second byte of the word is being modified 852 */ 853 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]); 854 if (ret_val) 855 goto err; 856 857 ptr++; 858 } 859 if ((eeprom->offset + eeprom->len) & 1) { 860 /* 861 * need read/modify/write of last changed EEPROM word 862 * only the first byte of the word is being modified 863 */ 864 ret_val = hw->eeprom.ops.read(hw, last_word, 865 &eeprom_buff[last_word - first_word]); 866 if (ret_val) 867 goto err; 868 } 869 870 /* Device's eeprom is always little-endian, word addressable */ 871 for (i = 0; i < last_word - first_word + 1; i++) 872 le16_to_cpus(&eeprom_buff[i]); 873 874 memcpy(ptr, bytes, eeprom->len); 875 876 for (i = 0; i < last_word - first_word + 1; i++) 877 cpu_to_le16s(&eeprom_buff[i]); 878 879 ret_val = hw->eeprom.ops.write_buffer(hw, first_word, 880 last_word - first_word + 1, 881 eeprom_buff); 882 883 /* Update the checksum */ 884 if (ret_val == 0) 885 hw->eeprom.ops.update_checksum(hw); 886 887 err: 888 kfree(eeprom_buff); 889 return ret_val; 890 } 891 892 static void ixgbe_get_drvinfo(struct net_device *netdev, 893 struct ethtool_drvinfo *drvinfo) 894 { 895 struct ixgbe_adapter *adapter = netdev_priv(netdev); 896 u32 nvm_track_id; 897 898 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); 899 strlcpy(drvinfo->version, ixgbe_driver_version, 900 sizeof(drvinfo->version)); 901 902 nvm_track_id = (adapter->eeprom_verh << 16) | 903 adapter->eeprom_verl; 904 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", 905 nvm_track_id); 906 907 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 908 sizeof(drvinfo->bus_info)); 909 drvinfo->n_stats = IXGBE_STATS_LEN; 910 drvinfo->testinfo_len = IXGBE_TEST_LEN; 911 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 912 } 913 914 static void ixgbe_get_ringparam(struct net_device *netdev, 915 struct ethtool_ringparam *ring) 916 { 917 struct ixgbe_adapter *adapter = netdev_priv(netdev); 918 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 919 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 920 921 ring->rx_max_pending = IXGBE_MAX_RXD; 922 ring->tx_max_pending = IXGBE_MAX_TXD; 923 ring->rx_pending = rx_ring->count; 924 ring->tx_pending = tx_ring->count; 925 } 926 927 static int ixgbe_set_ringparam(struct net_device *netdev, 928 struct ethtool_ringparam *ring) 929 { 930 struct ixgbe_adapter *adapter = netdev_priv(netdev); 931 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 932 int i, err = 0; 933 u32 new_rx_count, new_tx_count; 934 bool need_update = false; 935 936 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 937 return -EINVAL; 938 939 new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD); 940 new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD); 941 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); 942 943 new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD); 944 new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD); 945 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 946 947 if ((new_tx_count == adapter->tx_ring[0]->count) && 948 (new_rx_count == adapter->rx_ring[0]->count)) { 949 /* nothing to do */ 950 return 0; 951 } 952 953 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 954 usleep_range(1000, 2000); 955 956 if (!netif_running(adapter->netdev)) { 957 for (i = 0; i < adapter->num_tx_queues; i++) 958 adapter->tx_ring[i]->count = new_tx_count; 959 for (i = 0; i < adapter->num_rx_queues; i++) 960 adapter->rx_ring[i]->count = new_rx_count; 961 adapter->tx_ring_count = new_tx_count; 962 adapter->rx_ring_count = new_rx_count; 963 goto clear_reset; 964 } 965 966 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 967 if (!temp_tx_ring) { 968 err = -ENOMEM; 969 goto clear_reset; 970 } 971 972 if (new_tx_count != adapter->tx_ring_count) { 973 for (i = 0; i < adapter->num_tx_queues; i++) { 974 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 975 sizeof(struct ixgbe_ring)); 976 temp_tx_ring[i].count = new_tx_count; 977 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); 978 if (err) { 979 while (i) { 980 i--; 981 ixgbe_free_tx_resources(&temp_tx_ring[i]); 982 } 983 goto clear_reset; 984 } 985 } 986 need_update = true; 987 } 988 989 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 990 if (!temp_rx_ring) { 991 err = -ENOMEM; 992 goto err_setup; 993 } 994 995 if (new_rx_count != adapter->rx_ring_count) { 996 for (i = 0; i < adapter->num_rx_queues; i++) { 997 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 998 sizeof(struct ixgbe_ring)); 999 temp_rx_ring[i].count = new_rx_count; 1000 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); 1001 if (err) { 1002 while (i) { 1003 i--; 1004 ixgbe_free_rx_resources(&temp_rx_ring[i]); 1005 } 1006 goto err_setup; 1007 } 1008 } 1009 need_update = true; 1010 } 1011 1012 /* if rings need to be updated, here's the place to do it in one shot */ 1013 if (need_update) { 1014 ixgbe_down(adapter); 1015 1016 /* tx */ 1017 if (new_tx_count != adapter->tx_ring_count) { 1018 for (i = 0; i < adapter->num_tx_queues; i++) { 1019 ixgbe_free_tx_resources(adapter->tx_ring[i]); 1020 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 1021 sizeof(struct ixgbe_ring)); 1022 } 1023 adapter->tx_ring_count = new_tx_count; 1024 } 1025 1026 /* rx */ 1027 if (new_rx_count != adapter->rx_ring_count) { 1028 for (i = 0; i < adapter->num_rx_queues; i++) { 1029 ixgbe_free_rx_resources(adapter->rx_ring[i]); 1030 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 1031 sizeof(struct ixgbe_ring)); 1032 } 1033 adapter->rx_ring_count = new_rx_count; 1034 } 1035 ixgbe_up(adapter); 1036 } 1037 1038 vfree(temp_rx_ring); 1039 err_setup: 1040 vfree(temp_tx_ring); 1041 clear_reset: 1042 clear_bit(__IXGBE_RESETTING, &adapter->state); 1043 return err; 1044 } 1045 1046 static int ixgbe_get_sset_count(struct net_device *netdev, int sset) 1047 { 1048 switch (sset) { 1049 case ETH_SS_TEST: 1050 return IXGBE_TEST_LEN; 1051 case ETH_SS_STATS: 1052 return IXGBE_STATS_LEN; 1053 default: 1054 return -EOPNOTSUPP; 1055 } 1056 } 1057 1058 static void ixgbe_get_ethtool_stats(struct net_device *netdev, 1059 struct ethtool_stats *stats, u64 *data) 1060 { 1061 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1062 struct rtnl_link_stats64 temp; 1063 const struct rtnl_link_stats64 *net_stats; 1064 unsigned int start; 1065 struct ixgbe_ring *ring; 1066 int i, j; 1067 char *p = NULL; 1068 1069 ixgbe_update_stats(adapter); 1070 net_stats = dev_get_stats(netdev, &temp); 1071 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1072 switch (ixgbe_gstrings_stats[i].type) { 1073 case NETDEV_STATS: 1074 p = (char *) net_stats + 1075 ixgbe_gstrings_stats[i].stat_offset; 1076 break; 1077 case IXGBE_STATS: 1078 p = (char *) adapter + 1079 ixgbe_gstrings_stats[i].stat_offset; 1080 break; 1081 } 1082 1083 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1084 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1085 } 1086 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1087 ring = adapter->tx_ring[j]; 1088 if (!ring) { 1089 data[i] = 0; 1090 data[i+1] = 0; 1091 i += 2; 1092 continue; 1093 } 1094 1095 do { 1096 start = u64_stats_fetch_begin_bh(&ring->syncp); 1097 data[i] = ring->stats.packets; 1098 data[i+1] = ring->stats.bytes; 1099 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1100 i += 2; 1101 } 1102 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1103 ring = adapter->rx_ring[j]; 1104 if (!ring) { 1105 data[i] = 0; 1106 data[i+1] = 0; 1107 i += 2; 1108 continue; 1109 } 1110 1111 do { 1112 start = u64_stats_fetch_begin_bh(&ring->syncp); 1113 data[i] = ring->stats.packets; 1114 data[i+1] = ring->stats.bytes; 1115 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1116 i += 2; 1117 } 1118 1119 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1120 data[i++] = adapter->stats.pxontxc[j]; 1121 data[i++] = adapter->stats.pxofftxc[j]; 1122 } 1123 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1124 data[i++] = adapter->stats.pxonrxc[j]; 1125 data[i++] = adapter->stats.pxoffrxc[j]; 1126 } 1127 } 1128 1129 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1130 u8 *data) 1131 { 1132 char *p = (char *)data; 1133 int i; 1134 1135 switch (stringset) { 1136 case ETH_SS_TEST: 1137 memcpy(data, *ixgbe_gstrings_test, 1138 IXGBE_TEST_LEN * ETH_GSTRING_LEN); 1139 break; 1140 case ETH_SS_STATS: 1141 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1142 memcpy(p, ixgbe_gstrings_stats[i].stat_string, 1143 ETH_GSTRING_LEN); 1144 p += ETH_GSTRING_LEN; 1145 } 1146 for (i = 0; i < netdev->num_tx_queues; i++) { 1147 sprintf(p, "tx_queue_%u_packets", i); 1148 p += ETH_GSTRING_LEN; 1149 sprintf(p, "tx_queue_%u_bytes", i); 1150 p += ETH_GSTRING_LEN; 1151 } 1152 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { 1153 sprintf(p, "rx_queue_%u_packets", i); 1154 p += ETH_GSTRING_LEN; 1155 sprintf(p, "rx_queue_%u_bytes", i); 1156 p += ETH_GSTRING_LEN; 1157 } 1158 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1159 sprintf(p, "tx_pb_%u_pxon", i); 1160 p += ETH_GSTRING_LEN; 1161 sprintf(p, "tx_pb_%u_pxoff", i); 1162 p += ETH_GSTRING_LEN; 1163 } 1164 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1165 sprintf(p, "rx_pb_%u_pxon", i); 1166 p += ETH_GSTRING_LEN; 1167 sprintf(p, "rx_pb_%u_pxoff", i); 1168 p += ETH_GSTRING_LEN; 1169 } 1170 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 1171 break; 1172 } 1173 } 1174 1175 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) 1176 { 1177 struct ixgbe_hw *hw = &adapter->hw; 1178 bool link_up; 1179 u32 link_speed = 0; 1180 *data = 0; 1181 1182 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 1183 if (link_up) 1184 return *data; 1185 else 1186 *data = 1; 1187 return *data; 1188 } 1189 1190 /* ethtool register test data */ 1191 struct ixgbe_reg_test { 1192 u16 reg; 1193 u8 array_len; 1194 u8 test_type; 1195 u32 mask; 1196 u32 write; 1197 }; 1198 1199 /* In the hardware, registers are laid out either singly, in arrays 1200 * spaced 0x40 bytes apart, or in contiguous tables. We assume 1201 * most tests take place on arrays or single registers (handled 1202 * as a single-element array) and special-case the tables. 1203 * Table tests are always pattern tests. 1204 * 1205 * We also make provision for some required setup steps by specifying 1206 * registers to be written without any read-back testing. 1207 */ 1208 1209 #define PATTERN_TEST 1 1210 #define SET_READ_TEST 2 1211 #define WRITE_NO_TEST 3 1212 #define TABLE32_TEST 4 1213 #define TABLE64_TEST_LO 5 1214 #define TABLE64_TEST_HI 6 1215 1216 /* default 82599 register test */ 1217 static const struct ixgbe_reg_test reg_test_82599[] = { 1218 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1219 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1220 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1221 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1222 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 1223 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1224 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1225 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1226 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1227 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1228 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1229 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1230 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1231 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1232 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, 1233 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, 1234 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1235 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, 1236 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1237 { 0, 0, 0, 0 } 1238 }; 1239 1240 /* default 82598 register test */ 1241 static const struct ixgbe_reg_test reg_test_82598[] = { 1242 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1243 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1244 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1245 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1246 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1247 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1248 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1249 /* Enable all four RX queues before testing. */ 1250 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1251 /* RDH is read-only for 82598, only test RDT. */ 1252 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1253 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1254 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1255 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1256 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, 1257 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1258 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1259 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1260 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, 1261 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, 1262 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1263 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, 1264 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1265 { 0, 0, 0, 0 } 1266 }; 1267 1268 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, 1269 u32 mask, u32 write) 1270 { 1271 u32 pat, val, before; 1272 static const u32 test_pattern[] = { 1273 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1274 1275 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { 1276 before = readl(adapter->hw.hw_addr + reg); 1277 writel((test_pattern[pat] & write), 1278 (adapter->hw.hw_addr + reg)); 1279 val = readl(adapter->hw.hw_addr + reg); 1280 if (val != (test_pattern[pat] & write & mask)) { 1281 e_err(drv, "pattern test reg %04X failed: got " 1282 "0x%08X expected 0x%08X\n", 1283 reg, val, (test_pattern[pat] & write & mask)); 1284 *data = reg; 1285 writel(before, adapter->hw.hw_addr + reg); 1286 return 1; 1287 } 1288 writel(before, adapter->hw.hw_addr + reg); 1289 } 1290 return 0; 1291 } 1292 1293 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, 1294 u32 mask, u32 write) 1295 { 1296 u32 val, before; 1297 before = readl(adapter->hw.hw_addr + reg); 1298 writel((write & mask), (adapter->hw.hw_addr + reg)); 1299 val = readl(adapter->hw.hw_addr + reg); 1300 if ((write & mask) != (val & mask)) { 1301 e_err(drv, "set/check reg %04X test failed: got 0x%08X " 1302 "expected 0x%08X\n", reg, (val & mask), (write & mask)); 1303 *data = reg; 1304 writel(before, (adapter->hw.hw_addr + reg)); 1305 return 1; 1306 } 1307 writel(before, (adapter->hw.hw_addr + reg)); 1308 return 0; 1309 } 1310 1311 #define REG_PATTERN_TEST(reg, mask, write) \ 1312 do { \ 1313 if (reg_pattern_test(adapter, data, reg, mask, write)) \ 1314 return 1; \ 1315 } while (0) \ 1316 1317 1318 #define REG_SET_AND_CHECK(reg, mask, write) \ 1319 do { \ 1320 if (reg_set_and_check(adapter, data, reg, mask, write)) \ 1321 return 1; \ 1322 } while (0) \ 1323 1324 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1325 { 1326 const struct ixgbe_reg_test *test; 1327 u32 value, before, after; 1328 u32 i, toggle; 1329 1330 switch (adapter->hw.mac.type) { 1331 case ixgbe_mac_82598EB: 1332 toggle = 0x7FFFF3FF; 1333 test = reg_test_82598; 1334 break; 1335 case ixgbe_mac_82599EB: 1336 case ixgbe_mac_X540: 1337 toggle = 0x7FFFF30F; 1338 test = reg_test_82599; 1339 break; 1340 default: 1341 *data = 1; 1342 return 1; 1343 break; 1344 } 1345 1346 /* 1347 * Because the status register is such a special case, 1348 * we handle it separately from the rest of the register 1349 * tests. Some bits are read-only, some toggle, and some 1350 * are writeable on newer MACs. 1351 */ 1352 before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); 1353 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); 1354 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); 1355 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; 1356 if (value != after) { 1357 e_err(drv, "failed STATUS register test got: 0x%08X " 1358 "expected: 0x%08X\n", after, value); 1359 *data = 1; 1360 return 1; 1361 } 1362 /* restore previous status */ 1363 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); 1364 1365 /* 1366 * Perform the remainder of the register test, looping through 1367 * the test table until we either fail or reach the null entry. 1368 */ 1369 while (test->reg) { 1370 for (i = 0; i < test->array_len; i++) { 1371 switch (test->test_type) { 1372 case PATTERN_TEST: 1373 REG_PATTERN_TEST(test->reg + (i * 0x40), 1374 test->mask, 1375 test->write); 1376 break; 1377 case SET_READ_TEST: 1378 REG_SET_AND_CHECK(test->reg + (i * 0x40), 1379 test->mask, 1380 test->write); 1381 break; 1382 case WRITE_NO_TEST: 1383 writel(test->write, 1384 (adapter->hw.hw_addr + test->reg) 1385 + (i * 0x40)); 1386 break; 1387 case TABLE32_TEST: 1388 REG_PATTERN_TEST(test->reg + (i * 4), 1389 test->mask, 1390 test->write); 1391 break; 1392 case TABLE64_TEST_LO: 1393 REG_PATTERN_TEST(test->reg + (i * 8), 1394 test->mask, 1395 test->write); 1396 break; 1397 case TABLE64_TEST_HI: 1398 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1399 test->mask, 1400 test->write); 1401 break; 1402 } 1403 } 1404 test++; 1405 } 1406 1407 *data = 0; 1408 return 0; 1409 } 1410 1411 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) 1412 { 1413 struct ixgbe_hw *hw = &adapter->hw; 1414 if (hw->eeprom.ops.validate_checksum(hw, NULL)) 1415 *data = 1; 1416 else 1417 *data = 0; 1418 return *data; 1419 } 1420 1421 static irqreturn_t ixgbe_test_intr(int irq, void *data) 1422 { 1423 struct net_device *netdev = (struct net_device *) data; 1424 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1425 1426 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); 1427 1428 return IRQ_HANDLED; 1429 } 1430 1431 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) 1432 { 1433 struct net_device *netdev = adapter->netdev; 1434 u32 mask, i = 0, shared_int = true; 1435 u32 irq = adapter->pdev->irq; 1436 1437 *data = 0; 1438 1439 /* Hook up test interrupt handler just for this test */ 1440 if (adapter->msix_entries) { 1441 /* NOTE: we don't test MSI-X interrupts here, yet */ 1442 return 0; 1443 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1444 shared_int = false; 1445 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, 1446 netdev)) { 1447 *data = 1; 1448 return -1; 1449 } 1450 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1451 netdev->name, netdev)) { 1452 shared_int = false; 1453 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1454 netdev->name, netdev)) { 1455 *data = 1; 1456 return -1; 1457 } 1458 e_info(hw, "testing %s interrupt\n", shared_int ? 1459 "shared" : "unshared"); 1460 1461 /* Disable all the interrupts */ 1462 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1463 IXGBE_WRITE_FLUSH(&adapter->hw); 1464 usleep_range(10000, 20000); 1465 1466 /* Test each interrupt */ 1467 for (; i < 10; i++) { 1468 /* Interrupt to test */ 1469 mask = 1 << i; 1470 1471 if (!shared_int) { 1472 /* 1473 * Disable the interrupts to be reported in 1474 * the cause register and then force the same 1475 * interrupt and see if one gets posted. If 1476 * an interrupt was posted to the bus, the 1477 * test failed. 1478 */ 1479 adapter->test_icr = 0; 1480 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1481 ~mask & 0x00007FFF); 1482 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1483 ~mask & 0x00007FFF); 1484 IXGBE_WRITE_FLUSH(&adapter->hw); 1485 usleep_range(10000, 20000); 1486 1487 if (adapter->test_icr & mask) { 1488 *data = 3; 1489 break; 1490 } 1491 } 1492 1493 /* 1494 * Enable the interrupt to be reported in the cause 1495 * register and then force the same interrupt and see 1496 * if one gets posted. If an interrupt was not posted 1497 * to the bus, the test failed. 1498 */ 1499 adapter->test_icr = 0; 1500 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1501 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1502 IXGBE_WRITE_FLUSH(&adapter->hw); 1503 usleep_range(10000, 20000); 1504 1505 if (!(adapter->test_icr &mask)) { 1506 *data = 4; 1507 break; 1508 } 1509 1510 if (!shared_int) { 1511 /* 1512 * Disable the other interrupts to be reported in 1513 * the cause register and then force the other 1514 * interrupts and see if any get posted. If 1515 * an interrupt was posted to the bus, the 1516 * test failed. 1517 */ 1518 adapter->test_icr = 0; 1519 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1520 ~mask & 0x00007FFF); 1521 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1522 ~mask & 0x00007FFF); 1523 IXGBE_WRITE_FLUSH(&adapter->hw); 1524 usleep_range(10000, 20000); 1525 1526 if (adapter->test_icr) { 1527 *data = 5; 1528 break; 1529 } 1530 } 1531 } 1532 1533 /* Disable all the interrupts */ 1534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1535 IXGBE_WRITE_FLUSH(&adapter->hw); 1536 usleep_range(10000, 20000); 1537 1538 /* Unhook test interrupt handler */ 1539 free_irq(irq, netdev); 1540 1541 return *data; 1542 } 1543 1544 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) 1545 { 1546 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1547 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1548 struct ixgbe_hw *hw = &adapter->hw; 1549 u32 reg_ctl; 1550 1551 /* shut down the DMA engines now so they can be reinitialized later */ 1552 1553 /* first Rx */ 1554 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1555 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1556 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1557 ixgbe_disable_rx_queue(adapter, rx_ring); 1558 1559 /* now Tx */ 1560 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); 1561 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1562 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); 1563 1564 switch (hw->mac.type) { 1565 case ixgbe_mac_82599EB: 1566 case ixgbe_mac_X540: 1567 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1568 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1569 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1570 break; 1571 default: 1572 break; 1573 } 1574 1575 ixgbe_reset(adapter); 1576 1577 ixgbe_free_tx_resources(&adapter->test_tx_ring); 1578 ixgbe_free_rx_resources(&adapter->test_rx_ring); 1579 } 1580 1581 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1582 { 1583 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1584 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1585 u32 rctl, reg_data; 1586 int ret_val; 1587 int err; 1588 1589 /* Setup Tx descriptor ring and Tx buffers */ 1590 tx_ring->count = IXGBE_DEFAULT_TXD; 1591 tx_ring->queue_index = 0; 1592 tx_ring->dev = &adapter->pdev->dev; 1593 tx_ring->netdev = adapter->netdev; 1594 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; 1595 1596 err = ixgbe_setup_tx_resources(tx_ring); 1597 if (err) 1598 return 1; 1599 1600 switch (adapter->hw.mac.type) { 1601 case ixgbe_mac_82599EB: 1602 case ixgbe_mac_X540: 1603 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1604 reg_data |= IXGBE_DMATXCTL_TE; 1605 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1606 break; 1607 default: 1608 break; 1609 } 1610 1611 ixgbe_configure_tx_ring(adapter, tx_ring); 1612 1613 /* Setup Rx Descriptor ring and Rx buffers */ 1614 rx_ring->count = IXGBE_DEFAULT_RXD; 1615 rx_ring->queue_index = 0; 1616 rx_ring->dev = &adapter->pdev->dev; 1617 rx_ring->netdev = adapter->netdev; 1618 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; 1619 1620 err = ixgbe_setup_rx_resources(rx_ring); 1621 if (err) { 1622 ret_val = 4; 1623 goto err_nomem; 1624 } 1625 1626 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1627 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1628 1629 ixgbe_configure_rx_ring(adapter, rx_ring); 1630 1631 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1632 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1633 1634 return 0; 1635 1636 err_nomem: 1637 ixgbe_free_desc_rings(adapter); 1638 return ret_val; 1639 } 1640 1641 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) 1642 { 1643 struct ixgbe_hw *hw = &adapter->hw; 1644 u32 reg_data; 1645 1646 /* X540 needs to set the MACC.FLU bit to force link up */ 1647 if (adapter->hw.mac.type == ixgbe_mac_X540) { 1648 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); 1649 reg_data |= IXGBE_MACC_FLU; 1650 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); 1651 } 1652 1653 /* right now we only support MAC loopback in the driver */ 1654 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1655 /* Setup MAC loopback */ 1656 reg_data |= IXGBE_HLREG0_LPBK; 1657 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); 1658 1659 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1660 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; 1661 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); 1662 1663 reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1664 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1665 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1666 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); 1667 IXGBE_WRITE_FLUSH(hw); 1668 usleep_range(10000, 20000); 1669 1670 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1671 if (hw->mac.type == ixgbe_mac_82598EB) { 1672 u8 atlas; 1673 1674 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); 1675 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 1676 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); 1677 1678 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); 1679 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 1680 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); 1681 1682 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); 1683 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 1684 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); 1685 1686 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); 1687 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 1688 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); 1689 } 1690 1691 return 0; 1692 } 1693 1694 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) 1695 { 1696 u32 reg_data; 1697 1698 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1699 reg_data &= ~IXGBE_HLREG0_LPBK; 1700 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1701 } 1702 1703 static void ixgbe_create_lbtest_frame(struct sk_buff *skb, 1704 unsigned int frame_size) 1705 { 1706 memset(skb->data, 0xFF, frame_size); 1707 frame_size >>= 1; 1708 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); 1709 memset(&skb->data[frame_size + 10], 0xBE, 1); 1710 memset(&skb->data[frame_size + 12], 0xAF, 1); 1711 } 1712 1713 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, 1714 unsigned int frame_size) 1715 { 1716 unsigned char *data; 1717 bool match = true; 1718 1719 frame_size >>= 1; 1720 1721 data = kmap(rx_buffer->page) + rx_buffer->page_offset; 1722 1723 if (data[3] != 0xFF || 1724 data[frame_size + 10] != 0xBE || 1725 data[frame_size + 12] != 0xAF) 1726 match = false; 1727 1728 kunmap(rx_buffer->page); 1729 1730 return match; 1731 } 1732 1733 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, 1734 struct ixgbe_ring *tx_ring, 1735 unsigned int size) 1736 { 1737 union ixgbe_adv_rx_desc *rx_desc; 1738 struct ixgbe_rx_buffer *rx_buffer; 1739 struct ixgbe_tx_buffer *tx_buffer; 1740 u16 rx_ntc, tx_ntc, count = 0; 1741 1742 /* initialize next to clean and descriptor values */ 1743 rx_ntc = rx_ring->next_to_clean; 1744 tx_ntc = tx_ring->next_to_clean; 1745 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1746 1747 while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { 1748 /* check Rx buffer */ 1749 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; 1750 1751 /* sync Rx buffer for CPU read */ 1752 dma_sync_single_for_cpu(rx_ring->dev, 1753 rx_buffer->dma, 1754 ixgbe_rx_bufsz(rx_ring), 1755 DMA_FROM_DEVICE); 1756 1757 /* verify contents of skb */ 1758 if (ixgbe_check_lbtest_frame(rx_buffer, size)) 1759 count++; 1760 1761 /* sync Rx buffer for device write */ 1762 dma_sync_single_for_device(rx_ring->dev, 1763 rx_buffer->dma, 1764 ixgbe_rx_bufsz(rx_ring), 1765 DMA_FROM_DEVICE); 1766 1767 /* unmap buffer on Tx side */ 1768 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; 1769 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); 1770 1771 /* increment Rx/Tx next to clean counters */ 1772 rx_ntc++; 1773 if (rx_ntc == rx_ring->count) 1774 rx_ntc = 0; 1775 tx_ntc++; 1776 if (tx_ntc == tx_ring->count) 1777 tx_ntc = 0; 1778 1779 /* fetch next descriptor */ 1780 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1781 } 1782 1783 /* re-map buffers to ring, store next to clean values */ 1784 ixgbe_alloc_rx_buffers(rx_ring, count); 1785 rx_ring->next_to_clean = rx_ntc; 1786 tx_ring->next_to_clean = tx_ntc; 1787 1788 return count; 1789 } 1790 1791 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1792 { 1793 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1794 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1795 int i, j, lc, good_cnt, ret_val = 0; 1796 unsigned int size = 1024; 1797 netdev_tx_t tx_ret_val; 1798 struct sk_buff *skb; 1799 1800 /* allocate test skb */ 1801 skb = alloc_skb(size, GFP_KERNEL); 1802 if (!skb) 1803 return 11; 1804 1805 /* place data into test skb */ 1806 ixgbe_create_lbtest_frame(skb, size); 1807 skb_put(skb, size); 1808 1809 /* 1810 * Calculate the loop count based on the largest descriptor ring 1811 * The idea is to wrap the largest ring a number of times using 64 1812 * send/receive pairs during each loop 1813 */ 1814 1815 if (rx_ring->count <= tx_ring->count) 1816 lc = ((tx_ring->count / 64) * 2) + 1; 1817 else 1818 lc = ((rx_ring->count / 64) * 2) + 1; 1819 1820 for (j = 0; j <= lc; j++) { 1821 /* reset count of good packets */ 1822 good_cnt = 0; 1823 1824 /* place 64 packets on the transmit queue*/ 1825 for (i = 0; i < 64; i++) { 1826 skb_get(skb); 1827 tx_ret_val = ixgbe_xmit_frame_ring(skb, 1828 adapter, 1829 tx_ring); 1830 if (tx_ret_val == NETDEV_TX_OK) 1831 good_cnt++; 1832 } 1833 1834 if (good_cnt != 64) { 1835 ret_val = 12; 1836 break; 1837 } 1838 1839 /* allow 200 milliseconds for packets to go from Tx to Rx */ 1840 msleep(200); 1841 1842 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); 1843 if (good_cnt != 64) { 1844 ret_val = 13; 1845 break; 1846 } 1847 } 1848 1849 /* free the original skb */ 1850 kfree_skb(skb); 1851 1852 return ret_val; 1853 } 1854 1855 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) 1856 { 1857 *data = ixgbe_setup_desc_rings(adapter); 1858 if (*data) 1859 goto out; 1860 *data = ixgbe_setup_loopback_test(adapter); 1861 if (*data) 1862 goto err_loopback; 1863 *data = ixgbe_run_loopback_test(adapter); 1864 ixgbe_loopback_cleanup(adapter); 1865 1866 err_loopback: 1867 ixgbe_free_desc_rings(adapter); 1868 out: 1869 return *data; 1870 } 1871 1872 static void ixgbe_diag_test(struct net_device *netdev, 1873 struct ethtool_test *eth_test, u64 *data) 1874 { 1875 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1876 bool if_running = netif_running(netdev); 1877 1878 set_bit(__IXGBE_TESTING, &adapter->state); 1879 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1880 /* Offline tests */ 1881 1882 e_info(hw, "offline testing starting\n"); 1883 1884 /* Link test performed before hardware reset so autoneg doesn't 1885 * interfere with test result */ 1886 if (ixgbe_link_test(adapter, &data[4])) 1887 eth_test->flags |= ETH_TEST_FL_FAILED; 1888 1889 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 1890 int i; 1891 for (i = 0; i < adapter->num_vfs; i++) { 1892 if (adapter->vfinfo[i].clear_to_send) { 1893 netdev_warn(netdev, "%s", 1894 "offline diagnostic is not " 1895 "supported when VFs are " 1896 "present\n"); 1897 data[0] = 1; 1898 data[1] = 1; 1899 data[2] = 1; 1900 data[3] = 1; 1901 eth_test->flags |= ETH_TEST_FL_FAILED; 1902 clear_bit(__IXGBE_TESTING, 1903 &adapter->state); 1904 goto skip_ol_tests; 1905 } 1906 } 1907 } 1908 1909 if (if_running) 1910 /* indicate we're in test mode */ 1911 dev_close(netdev); 1912 else 1913 ixgbe_reset(adapter); 1914 1915 e_info(hw, "register testing starting\n"); 1916 if (ixgbe_reg_test(adapter, &data[0])) 1917 eth_test->flags |= ETH_TEST_FL_FAILED; 1918 1919 ixgbe_reset(adapter); 1920 e_info(hw, "eeprom testing starting\n"); 1921 if (ixgbe_eeprom_test(adapter, &data[1])) 1922 eth_test->flags |= ETH_TEST_FL_FAILED; 1923 1924 ixgbe_reset(adapter); 1925 e_info(hw, "interrupt testing starting\n"); 1926 if (ixgbe_intr_test(adapter, &data[2])) 1927 eth_test->flags |= ETH_TEST_FL_FAILED; 1928 1929 /* If SRIOV or VMDq is enabled then skip MAC 1930 * loopback diagnostic. */ 1931 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 1932 IXGBE_FLAG_VMDQ_ENABLED)) { 1933 e_info(hw, "Skip MAC loopback diagnostic in VT " 1934 "mode\n"); 1935 data[3] = 0; 1936 goto skip_loopback; 1937 } 1938 1939 ixgbe_reset(adapter); 1940 e_info(hw, "loopback testing starting\n"); 1941 if (ixgbe_loopback_test(adapter, &data[3])) 1942 eth_test->flags |= ETH_TEST_FL_FAILED; 1943 1944 skip_loopback: 1945 ixgbe_reset(adapter); 1946 1947 clear_bit(__IXGBE_TESTING, &adapter->state); 1948 if (if_running) 1949 dev_open(netdev); 1950 } else { 1951 e_info(hw, "online testing starting\n"); 1952 /* Online tests */ 1953 if (ixgbe_link_test(adapter, &data[4])) 1954 eth_test->flags |= ETH_TEST_FL_FAILED; 1955 1956 /* Online tests aren't run; pass by default */ 1957 data[0] = 0; 1958 data[1] = 0; 1959 data[2] = 0; 1960 data[3] = 0; 1961 1962 clear_bit(__IXGBE_TESTING, &adapter->state); 1963 } 1964 skip_ol_tests: 1965 msleep_interruptible(4 * 1000); 1966 } 1967 1968 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 1969 struct ethtool_wolinfo *wol) 1970 { 1971 struct ixgbe_hw *hw = &adapter->hw; 1972 int retval = 1; 1973 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; 1974 1975 /* WOL not supported except for the following */ 1976 switch(hw->device_id) { 1977 case IXGBE_DEV_ID_82599_SFP: 1978 /* Only these subdevices could supports WOL */ 1979 switch (hw->subsystem_device_id) { 1980 case IXGBE_SUBDEV_ID_82599_560FLR: 1981 /* only support first port */ 1982 if (hw->bus.func != 0) { 1983 wol->supported = 0; 1984 break; 1985 } 1986 case IXGBE_SUBDEV_ID_82599_SFP: 1987 retval = 0; 1988 break; 1989 default: 1990 wol->supported = 0; 1991 break; 1992 } 1993 break; 1994 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 1995 /* All except this subdevice support WOL */ 1996 if (hw->subsystem_device_id == 1997 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { 1998 wol->supported = 0; 1999 break; 2000 } 2001 retval = 0; 2002 break; 2003 case IXGBE_DEV_ID_82599_KX4: 2004 retval = 0; 2005 break; 2006 case IXGBE_DEV_ID_X540T: 2007 /* check eeprom to see if enabled wol */ 2008 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 2009 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && 2010 (hw->bus.func == 0))) { 2011 retval = 0; 2012 break; 2013 } 2014 2015 /* All others not supported */ 2016 wol->supported = 0; 2017 break; 2018 default: 2019 wol->supported = 0; 2020 } 2021 2022 return retval; 2023 } 2024 2025 static void ixgbe_get_wol(struct net_device *netdev, 2026 struct ethtool_wolinfo *wol) 2027 { 2028 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2029 2030 wol->supported = WAKE_UCAST | WAKE_MCAST | 2031 WAKE_BCAST | WAKE_MAGIC; 2032 wol->wolopts = 0; 2033 2034 if (ixgbe_wol_exclusion(adapter, wol) || 2035 !device_can_wakeup(&adapter->pdev->dev)) 2036 return; 2037 2038 if (adapter->wol & IXGBE_WUFC_EX) 2039 wol->wolopts |= WAKE_UCAST; 2040 if (adapter->wol & IXGBE_WUFC_MC) 2041 wol->wolopts |= WAKE_MCAST; 2042 if (adapter->wol & IXGBE_WUFC_BC) 2043 wol->wolopts |= WAKE_BCAST; 2044 if (adapter->wol & IXGBE_WUFC_MAG) 2045 wol->wolopts |= WAKE_MAGIC; 2046 } 2047 2048 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2049 { 2050 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2051 2052 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 2053 return -EOPNOTSUPP; 2054 2055 if (ixgbe_wol_exclusion(adapter, wol)) 2056 return wol->wolopts ? -EOPNOTSUPP : 0; 2057 2058 adapter->wol = 0; 2059 2060 if (wol->wolopts & WAKE_UCAST) 2061 adapter->wol |= IXGBE_WUFC_EX; 2062 if (wol->wolopts & WAKE_MCAST) 2063 adapter->wol |= IXGBE_WUFC_MC; 2064 if (wol->wolopts & WAKE_BCAST) 2065 adapter->wol |= IXGBE_WUFC_BC; 2066 if (wol->wolopts & WAKE_MAGIC) 2067 adapter->wol |= IXGBE_WUFC_MAG; 2068 2069 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 2070 2071 return 0; 2072 } 2073 2074 static int ixgbe_nway_reset(struct net_device *netdev) 2075 { 2076 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2077 2078 if (netif_running(netdev)) 2079 ixgbe_reinit_locked(adapter); 2080 2081 return 0; 2082 } 2083 2084 static int ixgbe_set_phys_id(struct net_device *netdev, 2085 enum ethtool_phys_id_state state) 2086 { 2087 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2088 struct ixgbe_hw *hw = &adapter->hw; 2089 2090 switch (state) { 2091 case ETHTOOL_ID_ACTIVE: 2092 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2093 return 2; 2094 2095 case ETHTOOL_ID_ON: 2096 hw->mac.ops.led_on(hw, IXGBE_LED_ON); 2097 break; 2098 2099 case ETHTOOL_ID_OFF: 2100 hw->mac.ops.led_off(hw, IXGBE_LED_ON); 2101 break; 2102 2103 case ETHTOOL_ID_INACTIVE: 2104 /* Restore LED settings */ 2105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); 2106 break; 2107 } 2108 2109 return 0; 2110 } 2111 2112 static int ixgbe_get_coalesce(struct net_device *netdev, 2113 struct ethtool_coalesce *ec) 2114 { 2115 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2116 2117 /* only valid if in constant ITR mode */ 2118 if (adapter->rx_itr_setting <= 1) 2119 ec->rx_coalesce_usecs = adapter->rx_itr_setting; 2120 else 2121 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; 2122 2123 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 2124 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 2125 return 0; 2126 2127 /* only valid if in constant ITR mode */ 2128 if (adapter->tx_itr_setting <= 1) 2129 ec->tx_coalesce_usecs = adapter->tx_itr_setting; 2130 else 2131 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; 2132 2133 return 0; 2134 } 2135 2136 /* 2137 * this function must be called before setting the new value of 2138 * rx_itr_setting 2139 */ 2140 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) 2141 { 2142 struct net_device *netdev = adapter->netdev; 2143 2144 /* nothing to do if LRO or RSC are not enabled */ 2145 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) || 2146 !(netdev->features & NETIF_F_LRO)) 2147 return false; 2148 2149 /* check the feature flag value and enable RSC if necessary */ 2150 if (adapter->rx_itr_setting == 1 || 2151 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 2152 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 2153 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2154 e_info(probe, "rx-usecs value high enough " 2155 "to re-enable RSC\n"); 2156 return true; 2157 } 2158 /* if interrupt rate is too high then disable RSC */ 2159 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2160 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 2161 e_info(probe, "rx-usecs set too low, disabling RSC\n"); 2162 return true; 2163 } 2164 return false; 2165 } 2166 2167 static int ixgbe_set_coalesce(struct net_device *netdev, 2168 struct ethtool_coalesce *ec) 2169 { 2170 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2171 struct ixgbe_q_vector *q_vector; 2172 int i; 2173 int num_vectors; 2174 u16 tx_itr_param, rx_itr_param; 2175 bool need_reset = false; 2176 2177 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2178 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count 2179 && ec->tx_coalesce_usecs) 2180 return -EINVAL; 2181 2182 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || 2183 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) 2184 return -EINVAL; 2185 2186 if (ec->rx_coalesce_usecs > 1) 2187 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 2188 else 2189 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 2190 2191 if (adapter->rx_itr_setting == 1) 2192 rx_itr_param = IXGBE_20K_ITR; 2193 else 2194 rx_itr_param = adapter->rx_itr_setting; 2195 2196 if (ec->tx_coalesce_usecs > 1) 2197 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 2198 else 2199 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 2200 2201 if (adapter->tx_itr_setting == 1) 2202 tx_itr_param = IXGBE_10K_ITR; 2203 else 2204 tx_itr_param = adapter->tx_itr_setting; 2205 2206 /* check the old value and enable RSC if necessary */ 2207 need_reset = ixgbe_update_rsc(adapter); 2208 2209 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 2210 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2211 else 2212 num_vectors = 1; 2213 2214 for (i = 0; i < num_vectors; i++) { 2215 q_vector = adapter->q_vector[i]; 2216 if (q_vector->tx.count && !q_vector->rx.count) 2217 /* tx only */ 2218 q_vector->itr = tx_itr_param; 2219 else 2220 /* rx only or mixed */ 2221 q_vector->itr = rx_itr_param; 2222 ixgbe_write_eitr(q_vector); 2223 } 2224 2225 /* 2226 * do reset here at the end to make sure EITR==0 case is handled 2227 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings 2228 * also locks in RSC enable/disable which requires reset 2229 */ 2230 if (need_reset) 2231 ixgbe_do_reset(netdev); 2232 2233 return 0; 2234 } 2235 2236 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2237 struct ethtool_rxnfc *cmd) 2238 { 2239 union ixgbe_atr_input *mask = &adapter->fdir_mask; 2240 struct ethtool_rx_flow_spec *fsp = 2241 (struct ethtool_rx_flow_spec *)&cmd->fs; 2242 struct hlist_node *node, *node2; 2243 struct ixgbe_fdir_filter *rule = NULL; 2244 2245 /* report total rule count */ 2246 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2247 2248 hlist_for_each_entry_safe(rule, node, node2, 2249 &adapter->fdir_filter_list, fdir_node) { 2250 if (fsp->location <= rule->sw_idx) 2251 break; 2252 } 2253 2254 if (!rule || fsp->location != rule->sw_idx) 2255 return -EINVAL; 2256 2257 /* fill out the flow spec entry */ 2258 2259 /* set flow type field */ 2260 switch (rule->filter.formatted.flow_type) { 2261 case IXGBE_ATR_FLOW_TYPE_TCPV4: 2262 fsp->flow_type = TCP_V4_FLOW; 2263 break; 2264 case IXGBE_ATR_FLOW_TYPE_UDPV4: 2265 fsp->flow_type = UDP_V4_FLOW; 2266 break; 2267 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 2268 fsp->flow_type = SCTP_V4_FLOW; 2269 break; 2270 case IXGBE_ATR_FLOW_TYPE_IPV4: 2271 fsp->flow_type = IP_USER_FLOW; 2272 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 2273 fsp->h_u.usr_ip4_spec.proto = 0; 2274 fsp->m_u.usr_ip4_spec.proto = 0; 2275 break; 2276 default: 2277 return -EINVAL; 2278 } 2279 2280 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; 2281 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; 2282 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; 2283 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; 2284 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; 2285 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; 2286 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; 2287 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; 2288 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; 2289 fsp->m_ext.vlan_tci = mask->formatted.vlan_id; 2290 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; 2291 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; 2292 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); 2293 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); 2294 fsp->flow_type |= FLOW_EXT; 2295 2296 /* record action */ 2297 if (rule->action == IXGBE_FDIR_DROP_QUEUE) 2298 fsp->ring_cookie = RX_CLS_FLOW_DISC; 2299 else 2300 fsp->ring_cookie = rule->action; 2301 2302 return 0; 2303 } 2304 2305 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, 2306 struct ethtool_rxnfc *cmd, 2307 u32 *rule_locs) 2308 { 2309 struct hlist_node *node, *node2; 2310 struct ixgbe_fdir_filter *rule; 2311 int cnt = 0; 2312 2313 /* report total rule count */ 2314 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2315 2316 hlist_for_each_entry_safe(rule, node, node2, 2317 &adapter->fdir_filter_list, fdir_node) { 2318 if (cnt == cmd->rule_cnt) 2319 return -EMSGSIZE; 2320 rule_locs[cnt] = rule->sw_idx; 2321 cnt++; 2322 } 2323 2324 cmd->rule_cnt = cnt; 2325 2326 return 0; 2327 } 2328 2329 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, 2330 struct ethtool_rxnfc *cmd) 2331 { 2332 cmd->data = 0; 2333 2334 /* if RSS is disabled then report no hashing */ 2335 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) 2336 return 0; 2337 2338 /* Report default options for RSS on ixgbe */ 2339 switch (cmd->flow_type) { 2340 case TCP_V4_FLOW: 2341 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2342 case UDP_V4_FLOW: 2343 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2344 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2345 case SCTP_V4_FLOW: 2346 case AH_ESP_V4_FLOW: 2347 case AH_V4_FLOW: 2348 case ESP_V4_FLOW: 2349 case IPV4_FLOW: 2350 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 2351 break; 2352 case TCP_V6_FLOW: 2353 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2354 case UDP_V6_FLOW: 2355 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2356 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2357 case SCTP_V6_FLOW: 2358 case AH_ESP_V6_FLOW: 2359 case AH_V6_FLOW: 2360 case ESP_V6_FLOW: 2361 case IPV6_FLOW: 2362 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 2363 break; 2364 default: 2365 return -EINVAL; 2366 } 2367 2368 return 0; 2369 } 2370 2371 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 2372 u32 *rule_locs) 2373 { 2374 struct ixgbe_adapter *adapter = netdev_priv(dev); 2375 int ret = -EOPNOTSUPP; 2376 2377 switch (cmd->cmd) { 2378 case ETHTOOL_GRXRINGS: 2379 cmd->data = adapter->num_rx_queues; 2380 ret = 0; 2381 break; 2382 case ETHTOOL_GRXCLSRLCNT: 2383 cmd->rule_cnt = adapter->fdir_filter_count; 2384 ret = 0; 2385 break; 2386 case ETHTOOL_GRXCLSRULE: 2387 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); 2388 break; 2389 case ETHTOOL_GRXCLSRLALL: 2390 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); 2391 break; 2392 case ETHTOOL_GRXFH: 2393 ret = ixgbe_get_rss_hash_opts(adapter, cmd); 2394 break; 2395 default: 2396 break; 2397 } 2398 2399 return ret; 2400 } 2401 2402 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2403 struct ixgbe_fdir_filter *input, 2404 u16 sw_idx) 2405 { 2406 struct ixgbe_hw *hw = &adapter->hw; 2407 struct hlist_node *node, *node2, *parent; 2408 struct ixgbe_fdir_filter *rule; 2409 int err = -EINVAL; 2410 2411 parent = NULL; 2412 rule = NULL; 2413 2414 hlist_for_each_entry_safe(rule, node, node2, 2415 &adapter->fdir_filter_list, fdir_node) { 2416 /* hash found, or no matching entry */ 2417 if (rule->sw_idx >= sw_idx) 2418 break; 2419 parent = node; 2420 } 2421 2422 /* if there is an old rule occupying our place remove it */ 2423 if (rule && (rule->sw_idx == sw_idx)) { 2424 if (!input || (rule->filter.formatted.bkt_hash != 2425 input->filter.formatted.bkt_hash)) { 2426 err = ixgbe_fdir_erase_perfect_filter_82599(hw, 2427 &rule->filter, 2428 sw_idx); 2429 } 2430 2431 hlist_del(&rule->fdir_node); 2432 kfree(rule); 2433 adapter->fdir_filter_count--; 2434 } 2435 2436 /* 2437 * If no input this was a delete, err should be 0 if a rule was 2438 * successfully found and removed from the list else -EINVAL 2439 */ 2440 if (!input) 2441 return err; 2442 2443 /* initialize node and set software index */ 2444 INIT_HLIST_NODE(&input->fdir_node); 2445 2446 /* add filter to the list */ 2447 if (parent) 2448 hlist_add_after(parent, &input->fdir_node); 2449 else 2450 hlist_add_head(&input->fdir_node, 2451 &adapter->fdir_filter_list); 2452 2453 /* update counts */ 2454 adapter->fdir_filter_count++; 2455 2456 return 0; 2457 } 2458 2459 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, 2460 u8 *flow_type) 2461 { 2462 switch (fsp->flow_type & ~FLOW_EXT) { 2463 case TCP_V4_FLOW: 2464 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2465 break; 2466 case UDP_V4_FLOW: 2467 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2468 break; 2469 case SCTP_V4_FLOW: 2470 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2471 break; 2472 case IP_USER_FLOW: 2473 switch (fsp->h_u.usr_ip4_spec.proto) { 2474 case IPPROTO_TCP: 2475 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2476 break; 2477 case IPPROTO_UDP: 2478 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2479 break; 2480 case IPPROTO_SCTP: 2481 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2482 break; 2483 case 0: 2484 if (!fsp->m_u.usr_ip4_spec.proto) { 2485 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; 2486 break; 2487 } 2488 default: 2489 return 0; 2490 } 2491 break; 2492 default: 2493 return 0; 2494 } 2495 2496 return 1; 2497 } 2498 2499 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2500 struct ethtool_rxnfc *cmd) 2501 { 2502 struct ethtool_rx_flow_spec *fsp = 2503 (struct ethtool_rx_flow_spec *)&cmd->fs; 2504 struct ixgbe_hw *hw = &adapter->hw; 2505 struct ixgbe_fdir_filter *input; 2506 union ixgbe_atr_input mask; 2507 int err; 2508 2509 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 2510 return -EOPNOTSUPP; 2511 2512 /* 2513 * Don't allow programming if the action is a queue greater than 2514 * the number of online Rx queues. 2515 */ 2516 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && 2517 (fsp->ring_cookie >= adapter->num_rx_queues)) 2518 return -EINVAL; 2519 2520 /* Don't allow indexes to exist outside of available space */ 2521 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { 2522 e_err(drv, "Location out of range\n"); 2523 return -EINVAL; 2524 } 2525 2526 input = kzalloc(sizeof(*input), GFP_ATOMIC); 2527 if (!input) 2528 return -ENOMEM; 2529 2530 memset(&mask, 0, sizeof(union ixgbe_atr_input)); 2531 2532 /* set SW index */ 2533 input->sw_idx = fsp->location; 2534 2535 /* record flow type */ 2536 if (!ixgbe_flowspec_to_flow_type(fsp, 2537 &input->filter.formatted.flow_type)) { 2538 e_err(drv, "Unrecognized flow type\n"); 2539 goto err_out; 2540 } 2541 2542 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 2543 IXGBE_ATR_L4TYPE_MASK; 2544 2545 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) 2546 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; 2547 2548 /* Copy input into formatted structures */ 2549 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 2550 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; 2551 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 2552 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; 2553 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; 2554 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; 2555 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 2556 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 2557 2558 if (fsp->flow_type & FLOW_EXT) { 2559 input->filter.formatted.vm_pool = 2560 (unsigned char)ntohl(fsp->h_ext.data[1]); 2561 mask.formatted.vm_pool = 2562 (unsigned char)ntohl(fsp->m_ext.data[1]); 2563 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; 2564 mask.formatted.vlan_id = fsp->m_ext.vlan_tci; 2565 input->filter.formatted.flex_bytes = 2566 fsp->h_ext.vlan_etype; 2567 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; 2568 } 2569 2570 /* determine if we need to drop or route the packet */ 2571 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) 2572 input->action = IXGBE_FDIR_DROP_QUEUE; 2573 else 2574 input->action = fsp->ring_cookie; 2575 2576 spin_lock(&adapter->fdir_perfect_lock); 2577 2578 if (hlist_empty(&adapter->fdir_filter_list)) { 2579 /* save mask and program input mask into HW */ 2580 memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); 2581 err = ixgbe_fdir_set_input_mask_82599(hw, &mask); 2582 if (err) { 2583 e_err(drv, "Error writing mask\n"); 2584 goto err_out_w_lock; 2585 } 2586 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { 2587 e_err(drv, "Only one mask supported per port\n"); 2588 goto err_out_w_lock; 2589 } 2590 2591 /* apply mask and compute/store hash */ 2592 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); 2593 2594 /* program filters to filter memory */ 2595 err = ixgbe_fdir_write_perfect_filter_82599(hw, 2596 &input->filter, input->sw_idx, 2597 (input->action == IXGBE_FDIR_DROP_QUEUE) ? 2598 IXGBE_FDIR_DROP_QUEUE : 2599 adapter->rx_ring[input->action]->reg_idx); 2600 if (err) 2601 goto err_out_w_lock; 2602 2603 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); 2604 2605 spin_unlock(&adapter->fdir_perfect_lock); 2606 2607 return err; 2608 err_out_w_lock: 2609 spin_unlock(&adapter->fdir_perfect_lock); 2610 err_out: 2611 kfree(input); 2612 return -EINVAL; 2613 } 2614 2615 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2616 struct ethtool_rxnfc *cmd) 2617 { 2618 struct ethtool_rx_flow_spec *fsp = 2619 (struct ethtool_rx_flow_spec *)&cmd->fs; 2620 int err; 2621 2622 spin_lock(&adapter->fdir_perfect_lock); 2623 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); 2624 spin_unlock(&adapter->fdir_perfect_lock); 2625 2626 return err; 2627 } 2628 2629 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ 2630 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2631 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, 2632 struct ethtool_rxnfc *nfc) 2633 { 2634 u32 flags2 = adapter->flags2; 2635 2636 /* 2637 * RSS does not support anything other than hashing 2638 * to queues on src and dst IPs and ports 2639 */ 2640 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 2641 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 2642 return -EINVAL; 2643 2644 switch (nfc->flow_type) { 2645 case TCP_V4_FLOW: 2646 case TCP_V6_FLOW: 2647 if (!(nfc->data & RXH_IP_SRC) || 2648 !(nfc->data & RXH_IP_DST) || 2649 !(nfc->data & RXH_L4_B_0_1) || 2650 !(nfc->data & RXH_L4_B_2_3)) 2651 return -EINVAL; 2652 break; 2653 case UDP_V4_FLOW: 2654 if (!(nfc->data & RXH_IP_SRC) || 2655 !(nfc->data & RXH_IP_DST)) 2656 return -EINVAL; 2657 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 2658 case 0: 2659 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; 2660 break; 2661 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 2662 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; 2663 break; 2664 default: 2665 return -EINVAL; 2666 } 2667 break; 2668 case UDP_V6_FLOW: 2669 if (!(nfc->data & RXH_IP_SRC) || 2670 !(nfc->data & RXH_IP_DST)) 2671 return -EINVAL; 2672 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 2673 case 0: 2674 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; 2675 break; 2676 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 2677 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; 2678 break; 2679 default: 2680 return -EINVAL; 2681 } 2682 break; 2683 case AH_ESP_V4_FLOW: 2684 case AH_V4_FLOW: 2685 case ESP_V4_FLOW: 2686 case SCTP_V4_FLOW: 2687 case AH_ESP_V6_FLOW: 2688 case AH_V6_FLOW: 2689 case ESP_V6_FLOW: 2690 case SCTP_V6_FLOW: 2691 if (!(nfc->data & RXH_IP_SRC) || 2692 !(nfc->data & RXH_IP_DST) || 2693 (nfc->data & RXH_L4_B_0_1) || 2694 (nfc->data & RXH_L4_B_2_3)) 2695 return -EINVAL; 2696 break; 2697 default: 2698 return -EINVAL; 2699 } 2700 2701 /* if we changed something we need to update flags */ 2702 if (flags2 != adapter->flags2) { 2703 struct ixgbe_hw *hw = &adapter->hw; 2704 u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); 2705 2706 if ((flags2 & UDP_RSS_FLAGS) && 2707 !(adapter->flags2 & UDP_RSS_FLAGS)) 2708 e_warn(drv, "enabling UDP RSS: fragmented packets" 2709 " may arrive out of order to the stack above\n"); 2710 2711 adapter->flags2 = flags2; 2712 2713 /* Perform hash on these packet types */ 2714 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 2715 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 2716 | IXGBE_MRQC_RSS_FIELD_IPV6 2717 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 2718 2719 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2720 IXGBE_MRQC_RSS_FIELD_IPV6_UDP); 2721 2722 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2723 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 2724 2725 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2726 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 2727 2728 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2729 } 2730 2731 return 0; 2732 } 2733 2734 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 2735 { 2736 struct ixgbe_adapter *adapter = netdev_priv(dev); 2737 int ret = -EOPNOTSUPP; 2738 2739 switch (cmd->cmd) { 2740 case ETHTOOL_SRXCLSRLINS: 2741 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); 2742 break; 2743 case ETHTOOL_SRXCLSRLDEL: 2744 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); 2745 break; 2746 case ETHTOOL_SRXFH: 2747 ret = ixgbe_set_rss_hash_opt(adapter, cmd); 2748 break; 2749 default: 2750 break; 2751 } 2752 2753 return ret; 2754 } 2755 2756 static const struct ethtool_ops ixgbe_ethtool_ops = { 2757 .get_settings = ixgbe_get_settings, 2758 .set_settings = ixgbe_set_settings, 2759 .get_drvinfo = ixgbe_get_drvinfo, 2760 .get_regs_len = ixgbe_get_regs_len, 2761 .get_regs = ixgbe_get_regs, 2762 .get_wol = ixgbe_get_wol, 2763 .set_wol = ixgbe_set_wol, 2764 .nway_reset = ixgbe_nway_reset, 2765 .get_link = ethtool_op_get_link, 2766 .get_eeprom_len = ixgbe_get_eeprom_len, 2767 .get_eeprom = ixgbe_get_eeprom, 2768 .set_eeprom = ixgbe_set_eeprom, 2769 .get_ringparam = ixgbe_get_ringparam, 2770 .set_ringparam = ixgbe_set_ringparam, 2771 .get_pauseparam = ixgbe_get_pauseparam, 2772 .set_pauseparam = ixgbe_set_pauseparam, 2773 .get_msglevel = ixgbe_get_msglevel, 2774 .set_msglevel = ixgbe_set_msglevel, 2775 .self_test = ixgbe_diag_test, 2776 .get_strings = ixgbe_get_strings, 2777 .set_phys_id = ixgbe_set_phys_id, 2778 .get_sset_count = ixgbe_get_sset_count, 2779 .get_ethtool_stats = ixgbe_get_ethtool_stats, 2780 .get_coalesce = ixgbe_get_coalesce, 2781 .set_coalesce = ixgbe_set_coalesce, 2782 .get_rxnfc = ixgbe_get_rxnfc, 2783 .set_rxnfc = ixgbe_set_rxnfc, 2784 }; 2785 2786 void ixgbe_set_ethtool_ops(struct net_device *netdev) 2787 { 2788 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); 2789 } 2790