1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 /* ethtool support for ixgbe */ 29 30 #include <linux/interrupt.h> 31 #include <linux/types.h> 32 #include <linux/module.h> 33 #include <linux/slab.h> 34 #include <linux/pci.h> 35 #include <linux/netdevice.h> 36 #include <linux/ethtool.h> 37 #include <linux/vmalloc.h> 38 #include <linux/uaccess.h> 39 40 #include "ixgbe.h" 41 42 43 #define IXGBE_ALL_RAR_ENTRIES 16 44 45 enum {NETDEV_STATS, IXGBE_STATS}; 46 47 struct ixgbe_stats { 48 char stat_string[ETH_GSTRING_LEN]; 49 int type; 50 int sizeof_stat; 51 int stat_offset; 52 }; 53 54 #define IXGBE_STAT(m) IXGBE_STATS, \ 55 sizeof(((struct ixgbe_adapter *)0)->m), \ 56 offsetof(struct ixgbe_adapter, m) 57 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ 58 sizeof(((struct rtnl_link_stats64 *)0)->m), \ 59 offsetof(struct rtnl_link_stats64, m) 60 61 static const struct ixgbe_stats ixgbe_gstrings_stats[] = { 62 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, 63 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, 64 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, 65 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, 66 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, 67 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, 68 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, 69 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, 70 {"lsc_int", IXGBE_STAT(lsc_int)}, 71 {"tx_busy", IXGBE_STAT(tx_busy)}, 72 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 73 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, 74 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, 75 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, 76 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, 77 {"multicast", IXGBE_NETDEV_STAT(multicast)}, 78 {"broadcast", IXGBE_STAT(stats.bprc)}, 79 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, 80 {"collisions", IXGBE_NETDEV_STAT(collisions)}, 81 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, 82 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, 83 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, 84 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, 85 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 86 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 87 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 88 {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, 89 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, 90 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, 91 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, 92 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, 93 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, 94 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, 95 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, 96 {"tx_restart_queue", IXGBE_STAT(restart_queue)}, 97 {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, 98 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, 99 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, 100 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, 101 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, 102 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, 103 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, 104 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 105 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 106 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 107 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, 108 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, 109 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, 110 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, 111 #ifdef IXGBE_FCOE 112 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 113 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 114 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, 115 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, 116 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, 117 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, 118 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, 119 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, 120 #endif /* IXGBE_FCOE */ 121 }; 122 123 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so 124 * we set the num_rx_queues to evaluate to num_tx_queues. This is 125 * used because we do not have a good way to get the max number of 126 * rx queues with CONFIG_RPS disabled. 127 */ 128 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues 129 130 #define IXGBE_QUEUE_STATS_LEN ( \ 131 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ 132 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 133 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 134 #define IXGBE_PB_STATS_LEN ( \ 135 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ 136 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ 137 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ 138 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 139 / sizeof(u64)) 140 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 141 IXGBE_PB_STATS_LEN + \ 142 IXGBE_QUEUE_STATS_LEN) 143 144 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 145 "Register test (offline)", "Eeprom test (offline)", 146 "Interrupt test (offline)", "Loopback test (offline)", 147 "Link test (on/offline)" 148 }; 149 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 150 151 static int ixgbe_get_settings(struct net_device *netdev, 152 struct ethtool_cmd *ecmd) 153 { 154 struct ixgbe_adapter *adapter = netdev_priv(netdev); 155 struct ixgbe_hw *hw = &adapter->hw; 156 u32 link_speed = 0; 157 bool link_up; 158 159 ecmd->supported = SUPPORTED_10000baseT_Full; 160 ecmd->autoneg = AUTONEG_ENABLE; 161 ecmd->transceiver = XCVR_EXTERNAL; 162 if ((hw->phy.media_type == ixgbe_media_type_copper) || 163 (hw->phy.multispeed_fiber)) { 164 ecmd->supported |= (SUPPORTED_1000baseT_Full | 165 SUPPORTED_Autoneg); 166 167 switch (hw->mac.type) { 168 case ixgbe_mac_X540: 169 ecmd->supported |= SUPPORTED_100baseT_Full; 170 break; 171 default: 172 break; 173 } 174 175 ecmd->advertising = ADVERTISED_Autoneg; 176 if (hw->phy.autoneg_advertised) { 177 if (hw->phy.autoneg_advertised & 178 IXGBE_LINK_SPEED_100_FULL) 179 ecmd->advertising |= ADVERTISED_100baseT_Full; 180 if (hw->phy.autoneg_advertised & 181 IXGBE_LINK_SPEED_10GB_FULL) 182 ecmd->advertising |= ADVERTISED_10000baseT_Full; 183 if (hw->phy.autoneg_advertised & 184 IXGBE_LINK_SPEED_1GB_FULL) 185 ecmd->advertising |= ADVERTISED_1000baseT_Full; 186 } else { 187 /* 188 * Default advertised modes in case 189 * phy.autoneg_advertised isn't set. 190 */ 191 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 192 ADVERTISED_1000baseT_Full); 193 if (hw->mac.type == ixgbe_mac_X540) 194 ecmd->advertising |= ADVERTISED_100baseT_Full; 195 } 196 197 if (hw->phy.media_type == ixgbe_media_type_copper) { 198 ecmd->supported |= SUPPORTED_TP; 199 ecmd->advertising |= ADVERTISED_TP; 200 ecmd->port = PORT_TP; 201 } else { 202 ecmd->supported |= SUPPORTED_FIBRE; 203 ecmd->advertising |= ADVERTISED_FIBRE; 204 ecmd->port = PORT_FIBRE; 205 } 206 } else if (hw->phy.media_type == ixgbe_media_type_backplane) { 207 /* Set as FIBRE until SERDES defined in kernel */ 208 if (hw->device_id == IXGBE_DEV_ID_82598_BX) { 209 ecmd->supported = (SUPPORTED_1000baseT_Full | 210 SUPPORTED_FIBRE); 211 ecmd->advertising = (ADVERTISED_1000baseT_Full | 212 ADVERTISED_FIBRE); 213 ecmd->port = PORT_FIBRE; 214 ecmd->autoneg = AUTONEG_DISABLE; 215 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || 216 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { 217 ecmd->supported |= (SUPPORTED_1000baseT_Full | 218 SUPPORTED_Autoneg | 219 SUPPORTED_FIBRE); 220 ecmd->advertising = (ADVERTISED_10000baseT_Full | 221 ADVERTISED_1000baseT_Full | 222 ADVERTISED_Autoneg | 223 ADVERTISED_FIBRE); 224 ecmd->port = PORT_FIBRE; 225 } else { 226 ecmd->supported |= (SUPPORTED_1000baseT_Full | 227 SUPPORTED_FIBRE); 228 ecmd->advertising = (ADVERTISED_10000baseT_Full | 229 ADVERTISED_1000baseT_Full | 230 ADVERTISED_FIBRE); 231 ecmd->port = PORT_FIBRE; 232 } 233 } else { 234 ecmd->supported |= SUPPORTED_FIBRE; 235 ecmd->advertising = (ADVERTISED_10000baseT_Full | 236 ADVERTISED_FIBRE); 237 ecmd->port = PORT_FIBRE; 238 ecmd->autoneg = AUTONEG_DISABLE; 239 } 240 241 /* Get PHY type */ 242 switch (adapter->hw.phy.type) { 243 case ixgbe_phy_tn: 244 case ixgbe_phy_aq: 245 case ixgbe_phy_cu_unknown: 246 /* Copper 10G-BASET */ 247 ecmd->port = PORT_TP; 248 break; 249 case ixgbe_phy_qt: 250 ecmd->port = PORT_FIBRE; 251 break; 252 case ixgbe_phy_nl: 253 case ixgbe_phy_sfp_passive_tyco: 254 case ixgbe_phy_sfp_passive_unknown: 255 case ixgbe_phy_sfp_ftl: 256 case ixgbe_phy_sfp_avago: 257 case ixgbe_phy_sfp_intel: 258 case ixgbe_phy_sfp_unknown: 259 switch (adapter->hw.phy.sfp_type) { 260 /* SFP+ devices, further checking needed */ 261 case ixgbe_sfp_type_da_cu: 262 case ixgbe_sfp_type_da_cu_core0: 263 case ixgbe_sfp_type_da_cu_core1: 264 ecmd->port = PORT_DA; 265 break; 266 case ixgbe_sfp_type_sr: 267 case ixgbe_sfp_type_lr: 268 case ixgbe_sfp_type_srlr_core0: 269 case ixgbe_sfp_type_srlr_core1: 270 ecmd->port = PORT_FIBRE; 271 break; 272 case ixgbe_sfp_type_not_present: 273 ecmd->port = PORT_NONE; 274 break; 275 case ixgbe_sfp_type_1g_cu_core0: 276 case ixgbe_sfp_type_1g_cu_core1: 277 ecmd->port = PORT_TP; 278 ecmd->supported = SUPPORTED_TP; 279 ecmd->advertising = (ADVERTISED_1000baseT_Full | 280 ADVERTISED_TP); 281 break; 282 case ixgbe_sfp_type_unknown: 283 default: 284 ecmd->port = PORT_OTHER; 285 break; 286 } 287 break; 288 case ixgbe_phy_xaui: 289 ecmd->port = PORT_NONE; 290 break; 291 case ixgbe_phy_unknown: 292 case ixgbe_phy_generic: 293 case ixgbe_phy_sfp_unsupported: 294 default: 295 ecmd->port = PORT_OTHER; 296 break; 297 } 298 299 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 300 if (link_up) { 301 switch (link_speed) { 302 case IXGBE_LINK_SPEED_10GB_FULL: 303 ethtool_cmd_speed_set(ecmd, SPEED_10000); 304 break; 305 case IXGBE_LINK_SPEED_1GB_FULL: 306 ethtool_cmd_speed_set(ecmd, SPEED_1000); 307 break; 308 case IXGBE_LINK_SPEED_100_FULL: 309 ethtool_cmd_speed_set(ecmd, SPEED_100); 310 break; 311 default: 312 break; 313 } 314 ecmd->duplex = DUPLEX_FULL; 315 } else { 316 ethtool_cmd_speed_set(ecmd, -1); 317 ecmd->duplex = -1; 318 } 319 320 return 0; 321 } 322 323 static int ixgbe_set_settings(struct net_device *netdev, 324 struct ethtool_cmd *ecmd) 325 { 326 struct ixgbe_adapter *adapter = netdev_priv(netdev); 327 struct ixgbe_hw *hw = &adapter->hw; 328 u32 advertised, old; 329 s32 err = 0; 330 331 if ((hw->phy.media_type == ixgbe_media_type_copper) || 332 (hw->phy.multispeed_fiber)) { 333 /* 334 * this function does not support duplex forcing, but can 335 * limit the advertising of the adapter to the specified speed 336 */ 337 if (ecmd->autoneg == AUTONEG_DISABLE) 338 return -EINVAL; 339 340 if (ecmd->advertising & ~ecmd->supported) 341 return -EINVAL; 342 343 old = hw->phy.autoneg_advertised; 344 advertised = 0; 345 if (ecmd->advertising & ADVERTISED_10000baseT_Full) 346 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 347 348 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 349 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 350 351 if (ecmd->advertising & ADVERTISED_100baseT_Full) 352 advertised |= IXGBE_LINK_SPEED_100_FULL; 353 354 if (old == advertised) 355 return err; 356 /* this sets the link speed and restarts auto-neg */ 357 hw->mac.autotry_restart = true; 358 err = hw->mac.ops.setup_link(hw, advertised, true, true); 359 if (err) { 360 e_info(probe, "setup link failed with code %d\n", err); 361 hw->mac.ops.setup_link(hw, old, true, true); 362 } 363 } else { 364 /* in this case we currently only support 10Gb/FULL */ 365 u32 speed = ethtool_cmd_speed(ecmd); 366 if ((ecmd->autoneg == AUTONEG_ENABLE) || 367 (ecmd->advertising != ADVERTISED_10000baseT_Full) || 368 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 369 return -EINVAL; 370 } 371 372 return err; 373 } 374 375 static void ixgbe_get_pauseparam(struct net_device *netdev, 376 struct ethtool_pauseparam *pause) 377 { 378 struct ixgbe_adapter *adapter = netdev_priv(netdev); 379 struct ixgbe_hw *hw = &adapter->hw; 380 381 if (hw->fc.disable_fc_autoneg) 382 pause->autoneg = 0; 383 else 384 pause->autoneg = 1; 385 386 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 387 pause->rx_pause = 1; 388 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 389 pause->tx_pause = 1; 390 } else if (hw->fc.current_mode == ixgbe_fc_full) { 391 pause->rx_pause = 1; 392 pause->tx_pause = 1; 393 #ifdef CONFIG_DCB 394 } else if (hw->fc.current_mode == ixgbe_fc_pfc) { 395 pause->rx_pause = 0; 396 pause->tx_pause = 0; 397 #endif 398 } 399 } 400 401 static int ixgbe_set_pauseparam(struct net_device *netdev, 402 struct ethtool_pauseparam *pause) 403 { 404 struct ixgbe_adapter *adapter = netdev_priv(netdev); 405 struct ixgbe_hw *hw = &adapter->hw; 406 struct ixgbe_fc_info fc; 407 408 #ifdef CONFIG_DCB 409 if (adapter->dcb_cfg.pfc_mode_enable || 410 ((hw->mac.type == ixgbe_mac_82598EB) && 411 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))) 412 return -EINVAL; 413 414 #endif 415 fc = hw->fc; 416 417 if (pause->autoneg != AUTONEG_ENABLE) 418 fc.disable_fc_autoneg = true; 419 else 420 fc.disable_fc_autoneg = false; 421 422 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) 423 fc.requested_mode = ixgbe_fc_full; 424 else if (pause->rx_pause && !pause->tx_pause) 425 fc.requested_mode = ixgbe_fc_rx_pause; 426 else if (!pause->rx_pause && pause->tx_pause) 427 fc.requested_mode = ixgbe_fc_tx_pause; 428 else if (!pause->rx_pause && !pause->tx_pause) 429 fc.requested_mode = ixgbe_fc_none; 430 else 431 return -EINVAL; 432 433 #ifdef CONFIG_DCB 434 adapter->last_lfc_mode = fc.requested_mode; 435 #endif 436 437 /* if the thing changed then we'll update and use new autoneg */ 438 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { 439 hw->fc = fc; 440 if (netif_running(netdev)) 441 ixgbe_reinit_locked(adapter); 442 else 443 ixgbe_reset(adapter); 444 } 445 446 return 0; 447 } 448 449 static u32 ixgbe_get_msglevel(struct net_device *netdev) 450 { 451 struct ixgbe_adapter *adapter = netdev_priv(netdev); 452 return adapter->msg_enable; 453 } 454 455 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) 456 { 457 struct ixgbe_adapter *adapter = netdev_priv(netdev); 458 adapter->msg_enable = data; 459 } 460 461 static int ixgbe_get_regs_len(struct net_device *netdev) 462 { 463 #define IXGBE_REGS_LEN 1129 464 return IXGBE_REGS_LEN * sizeof(u32); 465 } 466 467 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 468 469 static void ixgbe_get_regs(struct net_device *netdev, 470 struct ethtool_regs *regs, void *p) 471 { 472 struct ixgbe_adapter *adapter = netdev_priv(netdev); 473 struct ixgbe_hw *hw = &adapter->hw; 474 u32 *regs_buff = p; 475 u8 i; 476 477 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); 478 479 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; 480 481 /* General Registers */ 482 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); 483 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); 484 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 485 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); 486 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); 487 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 488 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); 489 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); 490 491 /* NVM Register */ 492 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); 493 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); 494 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); 495 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); 496 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); 497 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); 498 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); 499 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); 500 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); 501 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); 502 503 /* Interrupt */ 504 /* don't read EICR because it can clear interrupt causes, instead 505 * read EICS which is a shadow but doesn't clear EICR */ 506 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); 507 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 508 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 509 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); 510 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); 511 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); 512 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); 513 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 514 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 515 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 516 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); 517 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 518 519 /* Flow Control */ 520 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); 521 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); 522 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 523 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 524 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 525 for (i = 0; i < 8; i++) { 526 switch (hw->mac.type) { 527 case ixgbe_mac_82598EB: 528 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 529 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 530 break; 531 case ixgbe_mac_82599EB: 532 case ixgbe_mac_X540: 533 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); 534 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 535 break; 536 default: 537 break; 538 } 539 } 540 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 541 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 542 543 /* Receive DMA */ 544 for (i = 0; i < 64; i++) 545 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); 546 for (i = 0; i < 64; i++) 547 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); 548 for (i = 0; i < 64; i++) 549 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); 550 for (i = 0; i < 64; i++) 551 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); 552 for (i = 0; i < 64; i++) 553 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); 554 for (i = 0; i < 64; i++) 555 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 556 for (i = 0; i < 16; i++) 557 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 558 for (i = 0; i < 16; i++) 559 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 560 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 561 for (i = 0; i < 8; i++) 562 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 563 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 564 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); 565 566 /* Receive */ 567 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 568 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); 569 for (i = 0; i < 16; i++) 570 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 571 for (i = 0; i < 16; i++) 572 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 573 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); 574 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 575 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 576 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); 577 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); 578 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 579 for (i = 0; i < 8; i++) 580 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); 581 for (i = 0; i < 8; i++) 582 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); 583 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); 584 585 /* Transmit */ 586 for (i = 0; i < 32; i++) 587 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); 588 for (i = 0; i < 32; i++) 589 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); 590 for (i = 0; i < 32; i++) 591 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); 592 for (i = 0; i < 32; i++) 593 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); 594 for (i = 0; i < 32; i++) 595 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); 596 for (i = 0; i < 32; i++) 597 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 598 for (i = 0; i < 32; i++) 599 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); 600 for (i = 0; i < 32; i++) 601 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); 602 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); 603 for (i = 0; i < 16; i++) 604 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 605 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); 606 for (i = 0; i < 8; i++) 607 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); 608 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); 609 610 /* Wake Up */ 611 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); 612 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); 613 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); 614 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); 615 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); 616 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); 617 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); 618 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 619 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 620 621 /* DCB */ 622 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 623 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 624 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 625 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); 626 for (i = 0; i < 8; i++) 627 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); 628 for (i = 0; i < 8; i++) 629 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); 630 for (i = 0; i < 8; i++) 631 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); 632 for (i = 0; i < 8; i++) 633 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); 634 for (i = 0; i < 8; i++) 635 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); 636 for (i = 0; i < 8; i++) 637 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); 638 639 /* Statistics */ 640 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); 641 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); 642 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); 643 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); 644 for (i = 0; i < 8; i++) 645 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); 646 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); 647 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); 648 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); 649 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); 650 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); 651 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); 652 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); 653 for (i = 0; i < 8; i++) 654 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); 655 for (i = 0; i < 8; i++) 656 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); 657 for (i = 0; i < 8; i++) 658 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); 659 for (i = 0; i < 8; i++) 660 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); 661 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); 662 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); 663 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); 664 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); 665 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); 666 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); 667 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); 668 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); 669 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); 670 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); 671 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); 672 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); 673 for (i = 0; i < 8; i++) 674 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); 675 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); 676 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); 677 regs_buff[956] = IXGBE_GET_STAT(adapter, roc); 678 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); 679 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); 680 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); 681 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); 682 regs_buff[961] = IXGBE_GET_STAT(adapter, tor); 683 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); 684 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); 685 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); 686 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); 687 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); 688 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); 689 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); 690 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); 691 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); 692 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); 693 regs_buff[973] = IXGBE_GET_STAT(adapter, xec); 694 for (i = 0; i < 16; i++) 695 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); 696 for (i = 0; i < 16; i++) 697 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); 698 for (i = 0; i < 16; i++) 699 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); 700 for (i = 0; i < 16; i++) 701 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); 702 703 /* MAC */ 704 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); 705 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 706 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 707 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); 708 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); 709 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 710 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 711 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); 712 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); 713 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); 714 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); 715 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); 716 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); 717 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); 718 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); 719 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); 720 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); 721 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); 722 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); 723 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); 724 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); 725 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); 726 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); 727 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); 728 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); 729 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); 730 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); 731 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); 732 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 733 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); 734 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); 735 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); 736 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 737 738 /* Diagnostic */ 739 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 740 for (i = 0; i < 8; i++) 741 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 742 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 743 for (i = 0; i < 4; i++) 744 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); 745 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 746 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 747 for (i = 0; i < 8; i++) 748 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 749 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 750 for (i = 0; i < 4; i++) 751 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); 752 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 753 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 754 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); 755 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); 756 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); 757 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); 758 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); 759 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); 760 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); 761 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); 762 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); 763 for (i = 0; i < 8; i++) 764 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 765 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 766 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 767 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); 768 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); 769 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); 770 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); 771 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); 772 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); 773 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); 774 775 /* 82599 X540 specific registers */ 776 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); 777 } 778 779 static int ixgbe_get_eeprom_len(struct net_device *netdev) 780 { 781 struct ixgbe_adapter *adapter = netdev_priv(netdev); 782 return adapter->hw.eeprom.word_size * 2; 783 } 784 785 static int ixgbe_get_eeprom(struct net_device *netdev, 786 struct ethtool_eeprom *eeprom, u8 *bytes) 787 { 788 struct ixgbe_adapter *adapter = netdev_priv(netdev); 789 struct ixgbe_hw *hw = &adapter->hw; 790 u16 *eeprom_buff; 791 int first_word, last_word, eeprom_len; 792 int ret_val = 0; 793 u16 i; 794 795 if (eeprom->len == 0) 796 return -EINVAL; 797 798 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 799 800 first_word = eeprom->offset >> 1; 801 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 802 eeprom_len = last_word - first_word + 1; 803 804 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); 805 if (!eeprom_buff) 806 return -ENOMEM; 807 808 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, 809 eeprom_buff); 810 811 /* Device's eeprom is always little-endian, word addressable */ 812 for (i = 0; i < eeprom_len; i++) 813 le16_to_cpus(&eeprom_buff[i]); 814 815 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); 816 kfree(eeprom_buff); 817 818 return ret_val; 819 } 820 821 static int ixgbe_set_eeprom(struct net_device *netdev, 822 struct ethtool_eeprom *eeprom, u8 *bytes) 823 { 824 struct ixgbe_adapter *adapter = netdev_priv(netdev); 825 struct ixgbe_hw *hw = &adapter->hw; 826 u16 *eeprom_buff; 827 void *ptr; 828 int max_len, first_word, last_word, ret_val = 0; 829 u16 i; 830 831 if (eeprom->len == 0) 832 return -EINVAL; 833 834 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 835 return -EINVAL; 836 837 max_len = hw->eeprom.word_size * 2; 838 839 first_word = eeprom->offset >> 1; 840 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 841 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 842 if (!eeprom_buff) 843 return -ENOMEM; 844 845 ptr = eeprom_buff; 846 847 if (eeprom->offset & 1) { 848 /* 849 * need read/modify/write of first changed EEPROM word 850 * only the second byte of the word is being modified 851 */ 852 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]); 853 if (ret_val) 854 goto err; 855 856 ptr++; 857 } 858 if ((eeprom->offset + eeprom->len) & 1) { 859 /* 860 * need read/modify/write of last changed EEPROM word 861 * only the first byte of the word is being modified 862 */ 863 ret_val = hw->eeprom.ops.read(hw, last_word, 864 &eeprom_buff[last_word - first_word]); 865 if (ret_val) 866 goto err; 867 } 868 869 /* Device's eeprom is always little-endian, word addressable */ 870 for (i = 0; i < last_word - first_word + 1; i++) 871 le16_to_cpus(&eeprom_buff[i]); 872 873 memcpy(ptr, bytes, eeprom->len); 874 875 for (i = 0; i < last_word - first_word + 1; i++) 876 cpu_to_le16s(&eeprom_buff[i]); 877 878 ret_val = hw->eeprom.ops.write_buffer(hw, first_word, 879 last_word - first_word + 1, 880 eeprom_buff); 881 882 /* Update the checksum */ 883 if (ret_val == 0) 884 hw->eeprom.ops.update_checksum(hw); 885 886 err: 887 kfree(eeprom_buff); 888 return ret_val; 889 } 890 891 static void ixgbe_get_drvinfo(struct net_device *netdev, 892 struct ethtool_drvinfo *drvinfo) 893 { 894 struct ixgbe_adapter *adapter = netdev_priv(netdev); 895 u32 nvm_track_id; 896 897 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); 898 strlcpy(drvinfo->version, ixgbe_driver_version, 899 sizeof(drvinfo->version)); 900 901 nvm_track_id = (adapter->eeprom_verh << 16) | 902 adapter->eeprom_verl; 903 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", 904 nvm_track_id); 905 906 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 907 sizeof(drvinfo->bus_info)); 908 drvinfo->n_stats = IXGBE_STATS_LEN; 909 drvinfo->testinfo_len = IXGBE_TEST_LEN; 910 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 911 } 912 913 static void ixgbe_get_ringparam(struct net_device *netdev, 914 struct ethtool_ringparam *ring) 915 { 916 struct ixgbe_adapter *adapter = netdev_priv(netdev); 917 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 918 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 919 920 ring->rx_max_pending = IXGBE_MAX_RXD; 921 ring->tx_max_pending = IXGBE_MAX_TXD; 922 ring->rx_pending = rx_ring->count; 923 ring->tx_pending = tx_ring->count; 924 } 925 926 static int ixgbe_set_ringparam(struct net_device *netdev, 927 struct ethtool_ringparam *ring) 928 { 929 struct ixgbe_adapter *adapter = netdev_priv(netdev); 930 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 931 int i, err = 0; 932 u32 new_rx_count, new_tx_count; 933 bool need_update = false; 934 935 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 936 return -EINVAL; 937 938 new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD); 939 new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD); 940 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); 941 942 new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD); 943 new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); 944 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 945 946 if ((new_tx_count == adapter->tx_ring[0]->count) && 947 (new_rx_count == adapter->rx_ring[0]->count)) { 948 /* nothing to do */ 949 return 0; 950 } 951 952 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 953 usleep_range(1000, 2000); 954 955 if (!netif_running(adapter->netdev)) { 956 for (i = 0; i < adapter->num_tx_queues; i++) 957 adapter->tx_ring[i]->count = new_tx_count; 958 for (i = 0; i < adapter->num_rx_queues; i++) 959 adapter->rx_ring[i]->count = new_rx_count; 960 adapter->tx_ring_count = new_tx_count; 961 adapter->rx_ring_count = new_rx_count; 962 goto clear_reset; 963 } 964 965 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 966 if (!temp_tx_ring) { 967 err = -ENOMEM; 968 goto clear_reset; 969 } 970 971 if (new_tx_count != adapter->tx_ring_count) { 972 for (i = 0; i < adapter->num_tx_queues; i++) { 973 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 974 sizeof(struct ixgbe_ring)); 975 temp_tx_ring[i].count = new_tx_count; 976 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); 977 if (err) { 978 while (i) { 979 i--; 980 ixgbe_free_tx_resources(&temp_tx_ring[i]); 981 } 982 goto clear_reset; 983 } 984 } 985 need_update = true; 986 } 987 988 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 989 if (!temp_rx_ring) { 990 err = -ENOMEM; 991 goto err_setup; 992 } 993 994 if (new_rx_count != adapter->rx_ring_count) { 995 for (i = 0; i < adapter->num_rx_queues; i++) { 996 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 997 sizeof(struct ixgbe_ring)); 998 temp_rx_ring[i].count = new_rx_count; 999 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); 1000 if (err) { 1001 while (i) { 1002 i--; 1003 ixgbe_free_rx_resources(&temp_rx_ring[i]); 1004 } 1005 goto err_setup; 1006 } 1007 } 1008 need_update = true; 1009 } 1010 1011 /* if rings need to be updated, here's the place to do it in one shot */ 1012 if (need_update) { 1013 ixgbe_down(adapter); 1014 1015 /* tx */ 1016 if (new_tx_count != adapter->tx_ring_count) { 1017 for (i = 0; i < adapter->num_tx_queues; i++) { 1018 ixgbe_free_tx_resources(adapter->tx_ring[i]); 1019 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 1020 sizeof(struct ixgbe_ring)); 1021 } 1022 adapter->tx_ring_count = new_tx_count; 1023 } 1024 1025 /* rx */ 1026 if (new_rx_count != adapter->rx_ring_count) { 1027 for (i = 0; i < adapter->num_rx_queues; i++) { 1028 ixgbe_free_rx_resources(adapter->rx_ring[i]); 1029 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 1030 sizeof(struct ixgbe_ring)); 1031 } 1032 adapter->rx_ring_count = new_rx_count; 1033 } 1034 ixgbe_up(adapter); 1035 } 1036 1037 vfree(temp_rx_ring); 1038 err_setup: 1039 vfree(temp_tx_ring); 1040 clear_reset: 1041 clear_bit(__IXGBE_RESETTING, &adapter->state); 1042 return err; 1043 } 1044 1045 static int ixgbe_get_sset_count(struct net_device *netdev, int sset) 1046 { 1047 switch (sset) { 1048 case ETH_SS_TEST: 1049 return IXGBE_TEST_LEN; 1050 case ETH_SS_STATS: 1051 return IXGBE_STATS_LEN; 1052 default: 1053 return -EOPNOTSUPP; 1054 } 1055 } 1056 1057 static void ixgbe_get_ethtool_stats(struct net_device *netdev, 1058 struct ethtool_stats *stats, u64 *data) 1059 { 1060 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1061 struct rtnl_link_stats64 temp; 1062 const struct rtnl_link_stats64 *net_stats; 1063 unsigned int start; 1064 struct ixgbe_ring *ring; 1065 int i, j; 1066 char *p = NULL; 1067 1068 ixgbe_update_stats(adapter); 1069 net_stats = dev_get_stats(netdev, &temp); 1070 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1071 switch (ixgbe_gstrings_stats[i].type) { 1072 case NETDEV_STATS: 1073 p = (char *) net_stats + 1074 ixgbe_gstrings_stats[i].stat_offset; 1075 break; 1076 case IXGBE_STATS: 1077 p = (char *) adapter + 1078 ixgbe_gstrings_stats[i].stat_offset; 1079 break; 1080 } 1081 1082 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1083 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1084 } 1085 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1086 ring = adapter->tx_ring[j]; 1087 if (!ring) { 1088 data[i] = 0; 1089 data[i+1] = 0; 1090 i += 2; 1091 continue; 1092 } 1093 1094 do { 1095 start = u64_stats_fetch_begin_bh(&ring->syncp); 1096 data[i] = ring->stats.packets; 1097 data[i+1] = ring->stats.bytes; 1098 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1099 i += 2; 1100 } 1101 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1102 ring = adapter->rx_ring[j]; 1103 if (!ring) { 1104 data[i] = 0; 1105 data[i+1] = 0; 1106 i += 2; 1107 continue; 1108 } 1109 1110 do { 1111 start = u64_stats_fetch_begin_bh(&ring->syncp); 1112 data[i] = ring->stats.packets; 1113 data[i+1] = ring->stats.bytes; 1114 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1115 i += 2; 1116 } 1117 1118 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1119 data[i++] = adapter->stats.pxontxc[j]; 1120 data[i++] = adapter->stats.pxofftxc[j]; 1121 } 1122 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1123 data[i++] = adapter->stats.pxonrxc[j]; 1124 data[i++] = adapter->stats.pxoffrxc[j]; 1125 } 1126 } 1127 1128 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1129 u8 *data) 1130 { 1131 char *p = (char *)data; 1132 int i; 1133 1134 switch (stringset) { 1135 case ETH_SS_TEST: 1136 memcpy(data, *ixgbe_gstrings_test, 1137 IXGBE_TEST_LEN * ETH_GSTRING_LEN); 1138 break; 1139 case ETH_SS_STATS: 1140 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1141 memcpy(p, ixgbe_gstrings_stats[i].stat_string, 1142 ETH_GSTRING_LEN); 1143 p += ETH_GSTRING_LEN; 1144 } 1145 for (i = 0; i < netdev->num_tx_queues; i++) { 1146 sprintf(p, "tx_queue_%u_packets", i); 1147 p += ETH_GSTRING_LEN; 1148 sprintf(p, "tx_queue_%u_bytes", i); 1149 p += ETH_GSTRING_LEN; 1150 } 1151 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { 1152 sprintf(p, "rx_queue_%u_packets", i); 1153 p += ETH_GSTRING_LEN; 1154 sprintf(p, "rx_queue_%u_bytes", i); 1155 p += ETH_GSTRING_LEN; 1156 } 1157 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1158 sprintf(p, "tx_pb_%u_pxon", i); 1159 p += ETH_GSTRING_LEN; 1160 sprintf(p, "tx_pb_%u_pxoff", i); 1161 p += ETH_GSTRING_LEN; 1162 } 1163 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1164 sprintf(p, "rx_pb_%u_pxon", i); 1165 p += ETH_GSTRING_LEN; 1166 sprintf(p, "rx_pb_%u_pxoff", i); 1167 p += ETH_GSTRING_LEN; 1168 } 1169 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 1170 break; 1171 } 1172 } 1173 1174 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) 1175 { 1176 struct ixgbe_hw *hw = &adapter->hw; 1177 bool link_up; 1178 u32 link_speed = 0; 1179 *data = 0; 1180 1181 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 1182 if (link_up) 1183 return *data; 1184 else 1185 *data = 1; 1186 return *data; 1187 } 1188 1189 /* ethtool register test data */ 1190 struct ixgbe_reg_test { 1191 u16 reg; 1192 u8 array_len; 1193 u8 test_type; 1194 u32 mask; 1195 u32 write; 1196 }; 1197 1198 /* In the hardware, registers are laid out either singly, in arrays 1199 * spaced 0x40 bytes apart, or in contiguous tables. We assume 1200 * most tests take place on arrays or single registers (handled 1201 * as a single-element array) and special-case the tables. 1202 * Table tests are always pattern tests. 1203 * 1204 * We also make provision for some required setup steps by specifying 1205 * registers to be written without any read-back testing. 1206 */ 1207 1208 #define PATTERN_TEST 1 1209 #define SET_READ_TEST 2 1210 #define WRITE_NO_TEST 3 1211 #define TABLE32_TEST 4 1212 #define TABLE64_TEST_LO 5 1213 #define TABLE64_TEST_HI 6 1214 1215 /* default 82599 register test */ 1216 static const struct ixgbe_reg_test reg_test_82599[] = { 1217 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1218 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1219 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1220 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1221 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 1222 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1223 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1224 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1225 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1226 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1227 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1228 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1229 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1230 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1231 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, 1232 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, 1233 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1234 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, 1235 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1236 { 0, 0, 0, 0 } 1237 }; 1238 1239 /* default 82598 register test */ 1240 static const struct ixgbe_reg_test reg_test_82598[] = { 1241 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1242 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1243 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1244 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1245 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1246 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1247 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1248 /* Enable all four RX queues before testing. */ 1249 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1250 /* RDH is read-only for 82598, only test RDT. */ 1251 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1252 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1253 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1254 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1255 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, 1256 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1257 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1258 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1259 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, 1260 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, 1261 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1262 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, 1263 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1264 { 0, 0, 0, 0 } 1265 }; 1266 1267 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, 1268 u32 mask, u32 write) 1269 { 1270 u32 pat, val, before; 1271 static const u32 test_pattern[] = { 1272 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1273 1274 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { 1275 before = readl(adapter->hw.hw_addr + reg); 1276 writel((test_pattern[pat] & write), 1277 (adapter->hw.hw_addr + reg)); 1278 val = readl(adapter->hw.hw_addr + reg); 1279 if (val != (test_pattern[pat] & write & mask)) { 1280 e_err(drv, "pattern test reg %04X failed: got " 1281 "0x%08X expected 0x%08X\n", 1282 reg, val, (test_pattern[pat] & write & mask)); 1283 *data = reg; 1284 writel(before, adapter->hw.hw_addr + reg); 1285 return 1; 1286 } 1287 writel(before, adapter->hw.hw_addr + reg); 1288 } 1289 return 0; 1290 } 1291 1292 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, 1293 u32 mask, u32 write) 1294 { 1295 u32 val, before; 1296 before = readl(adapter->hw.hw_addr + reg); 1297 writel((write & mask), (adapter->hw.hw_addr + reg)); 1298 val = readl(adapter->hw.hw_addr + reg); 1299 if ((write & mask) != (val & mask)) { 1300 e_err(drv, "set/check reg %04X test failed: got 0x%08X " 1301 "expected 0x%08X\n", reg, (val & mask), (write & mask)); 1302 *data = reg; 1303 writel(before, (adapter->hw.hw_addr + reg)); 1304 return 1; 1305 } 1306 writel(before, (adapter->hw.hw_addr + reg)); 1307 return 0; 1308 } 1309 1310 #define REG_PATTERN_TEST(reg, mask, write) \ 1311 do { \ 1312 if (reg_pattern_test(adapter, data, reg, mask, write)) \ 1313 return 1; \ 1314 } while (0) \ 1315 1316 1317 #define REG_SET_AND_CHECK(reg, mask, write) \ 1318 do { \ 1319 if (reg_set_and_check(adapter, data, reg, mask, write)) \ 1320 return 1; \ 1321 } while (0) \ 1322 1323 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1324 { 1325 const struct ixgbe_reg_test *test; 1326 u32 value, before, after; 1327 u32 i, toggle; 1328 1329 switch (adapter->hw.mac.type) { 1330 case ixgbe_mac_82598EB: 1331 toggle = 0x7FFFF3FF; 1332 test = reg_test_82598; 1333 break; 1334 case ixgbe_mac_82599EB: 1335 case ixgbe_mac_X540: 1336 toggle = 0x7FFFF30F; 1337 test = reg_test_82599; 1338 break; 1339 default: 1340 *data = 1; 1341 return 1; 1342 break; 1343 } 1344 1345 /* 1346 * Because the status register is such a special case, 1347 * we handle it separately from the rest of the register 1348 * tests. Some bits are read-only, some toggle, and some 1349 * are writeable on newer MACs. 1350 */ 1351 before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); 1352 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); 1353 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); 1354 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; 1355 if (value != after) { 1356 e_err(drv, "failed STATUS register test got: 0x%08X " 1357 "expected: 0x%08X\n", after, value); 1358 *data = 1; 1359 return 1; 1360 } 1361 /* restore previous status */ 1362 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); 1363 1364 /* 1365 * Perform the remainder of the register test, looping through 1366 * the test table until we either fail or reach the null entry. 1367 */ 1368 while (test->reg) { 1369 for (i = 0; i < test->array_len; i++) { 1370 switch (test->test_type) { 1371 case PATTERN_TEST: 1372 REG_PATTERN_TEST(test->reg + (i * 0x40), 1373 test->mask, 1374 test->write); 1375 break; 1376 case SET_READ_TEST: 1377 REG_SET_AND_CHECK(test->reg + (i * 0x40), 1378 test->mask, 1379 test->write); 1380 break; 1381 case WRITE_NO_TEST: 1382 writel(test->write, 1383 (adapter->hw.hw_addr + test->reg) 1384 + (i * 0x40)); 1385 break; 1386 case TABLE32_TEST: 1387 REG_PATTERN_TEST(test->reg + (i * 4), 1388 test->mask, 1389 test->write); 1390 break; 1391 case TABLE64_TEST_LO: 1392 REG_PATTERN_TEST(test->reg + (i * 8), 1393 test->mask, 1394 test->write); 1395 break; 1396 case TABLE64_TEST_HI: 1397 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1398 test->mask, 1399 test->write); 1400 break; 1401 } 1402 } 1403 test++; 1404 } 1405 1406 *data = 0; 1407 return 0; 1408 } 1409 1410 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) 1411 { 1412 struct ixgbe_hw *hw = &adapter->hw; 1413 if (hw->eeprom.ops.validate_checksum(hw, NULL)) 1414 *data = 1; 1415 else 1416 *data = 0; 1417 return *data; 1418 } 1419 1420 static irqreturn_t ixgbe_test_intr(int irq, void *data) 1421 { 1422 struct net_device *netdev = (struct net_device *) data; 1423 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1424 1425 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); 1426 1427 return IRQ_HANDLED; 1428 } 1429 1430 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) 1431 { 1432 struct net_device *netdev = adapter->netdev; 1433 u32 mask, i = 0, shared_int = true; 1434 u32 irq = adapter->pdev->irq; 1435 1436 *data = 0; 1437 1438 /* Hook up test interrupt handler just for this test */ 1439 if (adapter->msix_entries) { 1440 /* NOTE: we don't test MSI-X interrupts here, yet */ 1441 return 0; 1442 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1443 shared_int = false; 1444 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, 1445 netdev)) { 1446 *data = 1; 1447 return -1; 1448 } 1449 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1450 netdev->name, netdev)) { 1451 shared_int = false; 1452 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1453 netdev->name, netdev)) { 1454 *data = 1; 1455 return -1; 1456 } 1457 e_info(hw, "testing %s interrupt\n", shared_int ? 1458 "shared" : "unshared"); 1459 1460 /* Disable all the interrupts */ 1461 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1462 IXGBE_WRITE_FLUSH(&adapter->hw); 1463 usleep_range(10000, 20000); 1464 1465 /* Test each interrupt */ 1466 for (; i < 10; i++) { 1467 /* Interrupt to test */ 1468 mask = 1 << i; 1469 1470 if (!shared_int) { 1471 /* 1472 * Disable the interrupts to be reported in 1473 * the cause register and then force the same 1474 * interrupt and see if one gets posted. If 1475 * an interrupt was posted to the bus, the 1476 * test failed. 1477 */ 1478 adapter->test_icr = 0; 1479 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1480 ~mask & 0x00007FFF); 1481 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1482 ~mask & 0x00007FFF); 1483 IXGBE_WRITE_FLUSH(&adapter->hw); 1484 usleep_range(10000, 20000); 1485 1486 if (adapter->test_icr & mask) { 1487 *data = 3; 1488 break; 1489 } 1490 } 1491 1492 /* 1493 * Enable the interrupt to be reported in the cause 1494 * register and then force the same interrupt and see 1495 * if one gets posted. If an interrupt was not posted 1496 * to the bus, the test failed. 1497 */ 1498 adapter->test_icr = 0; 1499 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1500 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1501 IXGBE_WRITE_FLUSH(&adapter->hw); 1502 usleep_range(10000, 20000); 1503 1504 if (!(adapter->test_icr &mask)) { 1505 *data = 4; 1506 break; 1507 } 1508 1509 if (!shared_int) { 1510 /* 1511 * Disable the other interrupts to be reported in 1512 * the cause register and then force the other 1513 * interrupts and see if any get posted. If 1514 * an interrupt was posted to the bus, the 1515 * test failed. 1516 */ 1517 adapter->test_icr = 0; 1518 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1519 ~mask & 0x00007FFF); 1520 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1521 ~mask & 0x00007FFF); 1522 IXGBE_WRITE_FLUSH(&adapter->hw); 1523 usleep_range(10000, 20000); 1524 1525 if (adapter->test_icr) { 1526 *data = 5; 1527 break; 1528 } 1529 } 1530 } 1531 1532 /* Disable all the interrupts */ 1533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1534 IXGBE_WRITE_FLUSH(&adapter->hw); 1535 usleep_range(10000, 20000); 1536 1537 /* Unhook test interrupt handler */ 1538 free_irq(irq, netdev); 1539 1540 return *data; 1541 } 1542 1543 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) 1544 { 1545 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1546 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1547 struct ixgbe_hw *hw = &adapter->hw; 1548 u32 reg_ctl; 1549 1550 /* shut down the DMA engines now so they can be reinitialized later */ 1551 1552 /* first Rx */ 1553 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1554 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1555 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1556 ixgbe_disable_rx_queue(adapter, rx_ring); 1557 1558 /* now Tx */ 1559 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); 1560 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1561 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); 1562 1563 switch (hw->mac.type) { 1564 case ixgbe_mac_82599EB: 1565 case ixgbe_mac_X540: 1566 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1567 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1568 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1569 break; 1570 default: 1571 break; 1572 } 1573 1574 ixgbe_reset(adapter); 1575 1576 ixgbe_free_tx_resources(&adapter->test_tx_ring); 1577 ixgbe_free_rx_resources(&adapter->test_rx_ring); 1578 } 1579 1580 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1581 { 1582 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1583 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1584 u32 rctl, reg_data; 1585 int ret_val; 1586 int err; 1587 1588 /* Setup Tx descriptor ring and Tx buffers */ 1589 tx_ring->count = IXGBE_DEFAULT_TXD; 1590 tx_ring->queue_index = 0; 1591 tx_ring->dev = &adapter->pdev->dev; 1592 tx_ring->netdev = adapter->netdev; 1593 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; 1594 tx_ring->numa_node = adapter->node; 1595 1596 err = ixgbe_setup_tx_resources(tx_ring); 1597 if (err) 1598 return 1; 1599 1600 switch (adapter->hw.mac.type) { 1601 case ixgbe_mac_82599EB: 1602 case ixgbe_mac_X540: 1603 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1604 reg_data |= IXGBE_DMATXCTL_TE; 1605 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1606 break; 1607 default: 1608 break; 1609 } 1610 1611 ixgbe_configure_tx_ring(adapter, tx_ring); 1612 1613 /* Setup Rx Descriptor ring and Rx buffers */ 1614 rx_ring->count = IXGBE_DEFAULT_RXD; 1615 rx_ring->queue_index = 0; 1616 rx_ring->dev = &adapter->pdev->dev; 1617 rx_ring->netdev = adapter->netdev; 1618 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; 1619 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K; 1620 rx_ring->numa_node = adapter->node; 1621 1622 err = ixgbe_setup_rx_resources(rx_ring); 1623 if (err) { 1624 ret_val = 4; 1625 goto err_nomem; 1626 } 1627 1628 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1629 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1630 1631 ixgbe_configure_rx_ring(adapter, rx_ring); 1632 1633 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1634 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1635 1636 return 0; 1637 1638 err_nomem: 1639 ixgbe_free_desc_rings(adapter); 1640 return ret_val; 1641 } 1642 1643 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) 1644 { 1645 struct ixgbe_hw *hw = &adapter->hw; 1646 u32 reg_data; 1647 1648 /* X540 needs to set the MACC.FLU bit to force link up */ 1649 if (adapter->hw.mac.type == ixgbe_mac_X540) { 1650 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); 1651 reg_data |= IXGBE_MACC_FLU; 1652 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); 1653 } 1654 1655 /* right now we only support MAC loopback in the driver */ 1656 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1657 /* Setup MAC loopback */ 1658 reg_data |= IXGBE_HLREG0_LPBK; 1659 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); 1660 1661 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1662 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; 1663 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); 1664 1665 reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1666 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1667 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1668 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); 1669 IXGBE_WRITE_FLUSH(hw); 1670 usleep_range(10000, 20000); 1671 1672 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1673 if (hw->mac.type == ixgbe_mac_82598EB) { 1674 u8 atlas; 1675 1676 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); 1677 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 1678 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); 1679 1680 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); 1681 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 1682 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); 1683 1684 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); 1685 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 1686 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); 1687 1688 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); 1689 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 1690 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); 1691 } 1692 1693 return 0; 1694 } 1695 1696 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) 1697 { 1698 u32 reg_data; 1699 1700 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1701 reg_data &= ~IXGBE_HLREG0_LPBK; 1702 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1703 } 1704 1705 static void ixgbe_create_lbtest_frame(struct sk_buff *skb, 1706 unsigned int frame_size) 1707 { 1708 memset(skb->data, 0xFF, frame_size); 1709 frame_size &= ~1; 1710 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1711 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1712 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1713 } 1714 1715 static int ixgbe_check_lbtest_frame(struct sk_buff *skb, 1716 unsigned int frame_size) 1717 { 1718 frame_size &= ~1; 1719 if (*(skb->data + 3) == 0xFF) { 1720 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1721 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1722 return 0; 1723 } 1724 } 1725 return 13; 1726 } 1727 1728 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, 1729 struct ixgbe_ring *tx_ring, 1730 unsigned int size) 1731 { 1732 union ixgbe_adv_rx_desc *rx_desc; 1733 struct ixgbe_rx_buffer *rx_buffer_info; 1734 struct ixgbe_tx_buffer *tx_buffer_info; 1735 const int bufsz = rx_ring->rx_buf_len; 1736 u32 staterr; 1737 u16 rx_ntc, tx_ntc, count = 0; 1738 1739 /* initialize next to clean and descriptor values */ 1740 rx_ntc = rx_ring->next_to_clean; 1741 tx_ntc = tx_ring->next_to_clean; 1742 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); 1743 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1744 1745 while (staterr & IXGBE_RXD_STAT_DD) { 1746 /* check Rx buffer */ 1747 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1748 1749 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ 1750 dma_unmap_single(rx_ring->dev, 1751 rx_buffer_info->dma, 1752 bufsz, 1753 DMA_FROM_DEVICE); 1754 rx_buffer_info->dma = 0; 1755 1756 /* verify contents of skb */ 1757 if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size)) 1758 count++; 1759 1760 /* unmap buffer on Tx side */ 1761 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1762 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1763 1764 /* increment Rx/Tx next to clean counters */ 1765 rx_ntc++; 1766 if (rx_ntc == rx_ring->count) 1767 rx_ntc = 0; 1768 tx_ntc++; 1769 if (tx_ntc == tx_ring->count) 1770 tx_ntc = 0; 1771 1772 /* fetch next descriptor */ 1773 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); 1774 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1775 } 1776 1777 /* re-map buffers to ring, store next to clean values */ 1778 ixgbe_alloc_rx_buffers(rx_ring, count); 1779 rx_ring->next_to_clean = rx_ntc; 1780 tx_ring->next_to_clean = tx_ntc; 1781 1782 return count; 1783 } 1784 1785 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1786 { 1787 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1788 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1789 int i, j, lc, good_cnt, ret_val = 0; 1790 unsigned int size = 1024; 1791 netdev_tx_t tx_ret_val; 1792 struct sk_buff *skb; 1793 1794 /* allocate test skb */ 1795 skb = alloc_skb(size, GFP_KERNEL); 1796 if (!skb) 1797 return 11; 1798 1799 /* place data into test skb */ 1800 ixgbe_create_lbtest_frame(skb, size); 1801 skb_put(skb, size); 1802 1803 /* 1804 * Calculate the loop count based on the largest descriptor ring 1805 * The idea is to wrap the largest ring a number of times using 64 1806 * send/receive pairs during each loop 1807 */ 1808 1809 if (rx_ring->count <= tx_ring->count) 1810 lc = ((tx_ring->count / 64) * 2) + 1; 1811 else 1812 lc = ((rx_ring->count / 64) * 2) + 1; 1813 1814 for (j = 0; j <= lc; j++) { 1815 /* reset count of good packets */ 1816 good_cnt = 0; 1817 1818 /* place 64 packets on the transmit queue*/ 1819 for (i = 0; i < 64; i++) { 1820 skb_get(skb); 1821 tx_ret_val = ixgbe_xmit_frame_ring(skb, 1822 adapter, 1823 tx_ring); 1824 if (tx_ret_val == NETDEV_TX_OK) 1825 good_cnt++; 1826 } 1827 1828 if (good_cnt != 64) { 1829 ret_val = 12; 1830 break; 1831 } 1832 1833 /* allow 200 milliseconds for packets to go from Tx to Rx */ 1834 msleep(200); 1835 1836 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); 1837 if (good_cnt != 64) { 1838 ret_val = 13; 1839 break; 1840 } 1841 } 1842 1843 /* free the original skb */ 1844 kfree_skb(skb); 1845 1846 return ret_val; 1847 } 1848 1849 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) 1850 { 1851 *data = ixgbe_setup_desc_rings(adapter); 1852 if (*data) 1853 goto out; 1854 *data = ixgbe_setup_loopback_test(adapter); 1855 if (*data) 1856 goto err_loopback; 1857 *data = ixgbe_run_loopback_test(adapter); 1858 ixgbe_loopback_cleanup(adapter); 1859 1860 err_loopback: 1861 ixgbe_free_desc_rings(adapter); 1862 out: 1863 return *data; 1864 } 1865 1866 static void ixgbe_diag_test(struct net_device *netdev, 1867 struct ethtool_test *eth_test, u64 *data) 1868 { 1869 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1870 bool if_running = netif_running(netdev); 1871 1872 set_bit(__IXGBE_TESTING, &adapter->state); 1873 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1874 /* Offline tests */ 1875 1876 e_info(hw, "offline testing starting\n"); 1877 1878 /* Link test performed before hardware reset so autoneg doesn't 1879 * interfere with test result */ 1880 if (ixgbe_link_test(adapter, &data[4])) 1881 eth_test->flags |= ETH_TEST_FL_FAILED; 1882 1883 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 1884 int i; 1885 for (i = 0; i < adapter->num_vfs; i++) { 1886 if (adapter->vfinfo[i].clear_to_send) { 1887 netdev_warn(netdev, "%s", 1888 "offline diagnostic is not " 1889 "supported when VFs are " 1890 "present\n"); 1891 data[0] = 1; 1892 data[1] = 1; 1893 data[2] = 1; 1894 data[3] = 1; 1895 eth_test->flags |= ETH_TEST_FL_FAILED; 1896 clear_bit(__IXGBE_TESTING, 1897 &adapter->state); 1898 goto skip_ol_tests; 1899 } 1900 } 1901 } 1902 1903 if (if_running) 1904 /* indicate we're in test mode */ 1905 dev_close(netdev); 1906 else 1907 ixgbe_reset(adapter); 1908 1909 e_info(hw, "register testing starting\n"); 1910 if (ixgbe_reg_test(adapter, &data[0])) 1911 eth_test->flags |= ETH_TEST_FL_FAILED; 1912 1913 ixgbe_reset(adapter); 1914 e_info(hw, "eeprom testing starting\n"); 1915 if (ixgbe_eeprom_test(adapter, &data[1])) 1916 eth_test->flags |= ETH_TEST_FL_FAILED; 1917 1918 ixgbe_reset(adapter); 1919 e_info(hw, "interrupt testing starting\n"); 1920 if (ixgbe_intr_test(adapter, &data[2])) 1921 eth_test->flags |= ETH_TEST_FL_FAILED; 1922 1923 /* If SRIOV or VMDq is enabled then skip MAC 1924 * loopback diagnostic. */ 1925 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 1926 IXGBE_FLAG_VMDQ_ENABLED)) { 1927 e_info(hw, "Skip MAC loopback diagnostic in VT " 1928 "mode\n"); 1929 data[3] = 0; 1930 goto skip_loopback; 1931 } 1932 1933 ixgbe_reset(adapter); 1934 e_info(hw, "loopback testing starting\n"); 1935 if (ixgbe_loopback_test(adapter, &data[3])) 1936 eth_test->flags |= ETH_TEST_FL_FAILED; 1937 1938 skip_loopback: 1939 ixgbe_reset(adapter); 1940 1941 clear_bit(__IXGBE_TESTING, &adapter->state); 1942 if (if_running) 1943 dev_open(netdev); 1944 } else { 1945 e_info(hw, "online testing starting\n"); 1946 /* Online tests */ 1947 if (ixgbe_link_test(adapter, &data[4])) 1948 eth_test->flags |= ETH_TEST_FL_FAILED; 1949 1950 /* Online tests aren't run; pass by default */ 1951 data[0] = 0; 1952 data[1] = 0; 1953 data[2] = 0; 1954 data[3] = 0; 1955 1956 clear_bit(__IXGBE_TESTING, &adapter->state); 1957 } 1958 skip_ol_tests: 1959 msleep_interruptible(4 * 1000); 1960 } 1961 1962 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 1963 struct ethtool_wolinfo *wol) 1964 { 1965 struct ixgbe_hw *hw = &adapter->hw; 1966 int retval = 1; 1967 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; 1968 1969 /* WOL not supported except for the following */ 1970 switch(hw->device_id) { 1971 case IXGBE_DEV_ID_82599_SFP: 1972 /* Only these subdevices could supports WOL */ 1973 switch (hw->subsystem_device_id) { 1974 case IXGBE_SUBDEV_ID_82599_560FLR: 1975 /* only support first port */ 1976 if (hw->bus.func != 0) { 1977 wol->supported = 0; 1978 break; 1979 } 1980 case IXGBE_SUBDEV_ID_82599_SFP: 1981 retval = 0; 1982 break; 1983 default: 1984 wol->supported = 0; 1985 break; 1986 } 1987 break; 1988 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 1989 /* All except this subdevice support WOL */ 1990 if (hw->subsystem_device_id == 1991 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { 1992 wol->supported = 0; 1993 break; 1994 } 1995 retval = 0; 1996 break; 1997 case IXGBE_DEV_ID_82599_KX4: 1998 retval = 0; 1999 break; 2000 case IXGBE_DEV_ID_X540T: 2001 /* check eeprom to see if enabled wol */ 2002 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 2003 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && 2004 (hw->bus.func == 0))) { 2005 retval = 0; 2006 break; 2007 } 2008 2009 /* All others not supported */ 2010 wol->supported = 0; 2011 break; 2012 default: 2013 wol->supported = 0; 2014 } 2015 2016 return retval; 2017 } 2018 2019 static void ixgbe_get_wol(struct net_device *netdev, 2020 struct ethtool_wolinfo *wol) 2021 { 2022 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2023 2024 wol->supported = WAKE_UCAST | WAKE_MCAST | 2025 WAKE_BCAST | WAKE_MAGIC; 2026 wol->wolopts = 0; 2027 2028 if (ixgbe_wol_exclusion(adapter, wol) || 2029 !device_can_wakeup(&adapter->pdev->dev)) 2030 return; 2031 2032 if (adapter->wol & IXGBE_WUFC_EX) 2033 wol->wolopts |= WAKE_UCAST; 2034 if (adapter->wol & IXGBE_WUFC_MC) 2035 wol->wolopts |= WAKE_MCAST; 2036 if (adapter->wol & IXGBE_WUFC_BC) 2037 wol->wolopts |= WAKE_BCAST; 2038 if (adapter->wol & IXGBE_WUFC_MAG) 2039 wol->wolopts |= WAKE_MAGIC; 2040 } 2041 2042 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2043 { 2044 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2045 2046 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 2047 return -EOPNOTSUPP; 2048 2049 if (ixgbe_wol_exclusion(adapter, wol)) 2050 return wol->wolopts ? -EOPNOTSUPP : 0; 2051 2052 adapter->wol = 0; 2053 2054 if (wol->wolopts & WAKE_UCAST) 2055 adapter->wol |= IXGBE_WUFC_EX; 2056 if (wol->wolopts & WAKE_MCAST) 2057 adapter->wol |= IXGBE_WUFC_MC; 2058 if (wol->wolopts & WAKE_BCAST) 2059 adapter->wol |= IXGBE_WUFC_BC; 2060 if (wol->wolopts & WAKE_MAGIC) 2061 adapter->wol |= IXGBE_WUFC_MAG; 2062 2063 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 2064 2065 return 0; 2066 } 2067 2068 static int ixgbe_nway_reset(struct net_device *netdev) 2069 { 2070 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2071 2072 if (netif_running(netdev)) 2073 ixgbe_reinit_locked(adapter); 2074 2075 return 0; 2076 } 2077 2078 static int ixgbe_set_phys_id(struct net_device *netdev, 2079 enum ethtool_phys_id_state state) 2080 { 2081 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2082 struct ixgbe_hw *hw = &adapter->hw; 2083 2084 switch (state) { 2085 case ETHTOOL_ID_ACTIVE: 2086 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2087 return 2; 2088 2089 case ETHTOOL_ID_ON: 2090 hw->mac.ops.led_on(hw, IXGBE_LED_ON); 2091 break; 2092 2093 case ETHTOOL_ID_OFF: 2094 hw->mac.ops.led_off(hw, IXGBE_LED_ON); 2095 break; 2096 2097 case ETHTOOL_ID_INACTIVE: 2098 /* Restore LED settings */ 2099 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); 2100 break; 2101 } 2102 2103 return 0; 2104 } 2105 2106 static int ixgbe_get_coalesce(struct net_device *netdev, 2107 struct ethtool_coalesce *ec) 2108 { 2109 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2110 2111 ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; 2112 2113 /* only valid if in constant ITR mode */ 2114 if (adapter->rx_itr_setting <= 1) 2115 ec->rx_coalesce_usecs = adapter->rx_itr_setting; 2116 else 2117 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; 2118 2119 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 2120 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 2121 return 0; 2122 2123 /* only valid if in constant ITR mode */ 2124 if (adapter->tx_itr_setting <= 1) 2125 ec->tx_coalesce_usecs = adapter->tx_itr_setting; 2126 else 2127 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; 2128 2129 return 0; 2130 } 2131 2132 /* 2133 * this function must be called before setting the new value of 2134 * rx_itr_setting 2135 */ 2136 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter, 2137 struct ethtool_coalesce *ec) 2138 { 2139 struct net_device *netdev = adapter->netdev; 2140 2141 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 2142 return false; 2143 2144 /* if interrupt rate is too high then disable RSC */ 2145 if (ec->rx_coalesce_usecs != 1 && 2146 ec->rx_coalesce_usecs <= (IXGBE_MIN_RSC_ITR >> 2)) { 2147 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2148 e_info(probe, "rx-usecs set too low, disabling RSC\n"); 2149 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 2150 return true; 2151 } 2152 } else { 2153 /* check the feature flag value and enable RSC if necessary */ 2154 if ((netdev->features & NETIF_F_LRO) && 2155 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 2156 e_info(probe, "rx-usecs set to %d, re-enabling RSC\n", 2157 ec->rx_coalesce_usecs); 2158 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2159 return true; 2160 } 2161 } 2162 return false; 2163 } 2164 2165 static int ixgbe_set_coalesce(struct net_device *netdev, 2166 struct ethtool_coalesce *ec) 2167 { 2168 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2169 struct ixgbe_q_vector *q_vector; 2170 int i; 2171 int num_vectors; 2172 u16 tx_itr_param, rx_itr_param; 2173 bool need_reset = false; 2174 2175 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2176 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count 2177 && ec->tx_coalesce_usecs) 2178 return -EINVAL; 2179 2180 if (ec->tx_max_coalesced_frames_irq) 2181 adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; 2182 2183 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || 2184 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) 2185 return -EINVAL; 2186 2187 /* check the old value and enable RSC if necessary */ 2188 need_reset = ixgbe_update_rsc(adapter, ec); 2189 2190 if (ec->rx_coalesce_usecs > 1) 2191 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 2192 else 2193 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 2194 2195 if (adapter->rx_itr_setting == 1) 2196 rx_itr_param = IXGBE_20K_ITR; 2197 else 2198 rx_itr_param = adapter->rx_itr_setting; 2199 2200 if (ec->tx_coalesce_usecs > 1) 2201 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 2202 else 2203 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 2204 2205 if (adapter->tx_itr_setting == 1) 2206 tx_itr_param = IXGBE_10K_ITR; 2207 else 2208 tx_itr_param = adapter->tx_itr_setting; 2209 2210 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 2211 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2212 else 2213 num_vectors = 1; 2214 2215 for (i = 0; i < num_vectors; i++) { 2216 q_vector = adapter->q_vector[i]; 2217 q_vector->tx.work_limit = adapter->tx_work_limit; 2218 if (q_vector->tx.count && !q_vector->rx.count) 2219 /* tx only */ 2220 q_vector->itr = tx_itr_param; 2221 else 2222 /* rx only or mixed */ 2223 q_vector->itr = rx_itr_param; 2224 ixgbe_write_eitr(q_vector); 2225 } 2226 2227 /* 2228 * do reset here at the end to make sure EITR==0 case is handled 2229 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings 2230 * also locks in RSC enable/disable which requires reset 2231 */ 2232 if (need_reset) 2233 ixgbe_do_reset(netdev); 2234 2235 return 0; 2236 } 2237 2238 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2239 struct ethtool_rxnfc *cmd) 2240 { 2241 union ixgbe_atr_input *mask = &adapter->fdir_mask; 2242 struct ethtool_rx_flow_spec *fsp = 2243 (struct ethtool_rx_flow_spec *)&cmd->fs; 2244 struct hlist_node *node, *node2; 2245 struct ixgbe_fdir_filter *rule = NULL; 2246 2247 /* report total rule count */ 2248 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2249 2250 hlist_for_each_entry_safe(rule, node, node2, 2251 &adapter->fdir_filter_list, fdir_node) { 2252 if (fsp->location <= rule->sw_idx) 2253 break; 2254 } 2255 2256 if (!rule || fsp->location != rule->sw_idx) 2257 return -EINVAL; 2258 2259 /* fill out the flow spec entry */ 2260 2261 /* set flow type field */ 2262 switch (rule->filter.formatted.flow_type) { 2263 case IXGBE_ATR_FLOW_TYPE_TCPV4: 2264 fsp->flow_type = TCP_V4_FLOW; 2265 break; 2266 case IXGBE_ATR_FLOW_TYPE_UDPV4: 2267 fsp->flow_type = UDP_V4_FLOW; 2268 break; 2269 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 2270 fsp->flow_type = SCTP_V4_FLOW; 2271 break; 2272 case IXGBE_ATR_FLOW_TYPE_IPV4: 2273 fsp->flow_type = IP_USER_FLOW; 2274 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 2275 fsp->h_u.usr_ip4_spec.proto = 0; 2276 fsp->m_u.usr_ip4_spec.proto = 0; 2277 break; 2278 default: 2279 return -EINVAL; 2280 } 2281 2282 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; 2283 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; 2284 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; 2285 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; 2286 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; 2287 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; 2288 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; 2289 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; 2290 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; 2291 fsp->m_ext.vlan_tci = mask->formatted.vlan_id; 2292 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; 2293 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; 2294 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); 2295 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); 2296 fsp->flow_type |= FLOW_EXT; 2297 2298 /* record action */ 2299 if (rule->action == IXGBE_FDIR_DROP_QUEUE) 2300 fsp->ring_cookie = RX_CLS_FLOW_DISC; 2301 else 2302 fsp->ring_cookie = rule->action; 2303 2304 return 0; 2305 } 2306 2307 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, 2308 struct ethtool_rxnfc *cmd, 2309 u32 *rule_locs) 2310 { 2311 struct hlist_node *node, *node2; 2312 struct ixgbe_fdir_filter *rule; 2313 int cnt = 0; 2314 2315 /* report total rule count */ 2316 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2317 2318 hlist_for_each_entry_safe(rule, node, node2, 2319 &adapter->fdir_filter_list, fdir_node) { 2320 if (cnt == cmd->rule_cnt) 2321 return -EMSGSIZE; 2322 rule_locs[cnt] = rule->sw_idx; 2323 cnt++; 2324 } 2325 2326 cmd->rule_cnt = cnt; 2327 2328 return 0; 2329 } 2330 2331 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 2332 u32 *rule_locs) 2333 { 2334 struct ixgbe_adapter *adapter = netdev_priv(dev); 2335 int ret = -EOPNOTSUPP; 2336 2337 switch (cmd->cmd) { 2338 case ETHTOOL_GRXRINGS: 2339 cmd->data = adapter->num_rx_queues; 2340 ret = 0; 2341 break; 2342 case ETHTOOL_GRXCLSRLCNT: 2343 cmd->rule_cnt = adapter->fdir_filter_count; 2344 ret = 0; 2345 break; 2346 case ETHTOOL_GRXCLSRULE: 2347 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); 2348 break; 2349 case ETHTOOL_GRXCLSRLALL: 2350 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); 2351 break; 2352 default: 2353 break; 2354 } 2355 2356 return ret; 2357 } 2358 2359 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2360 struct ixgbe_fdir_filter *input, 2361 u16 sw_idx) 2362 { 2363 struct ixgbe_hw *hw = &adapter->hw; 2364 struct hlist_node *node, *node2, *parent; 2365 struct ixgbe_fdir_filter *rule; 2366 int err = -EINVAL; 2367 2368 parent = NULL; 2369 rule = NULL; 2370 2371 hlist_for_each_entry_safe(rule, node, node2, 2372 &adapter->fdir_filter_list, fdir_node) { 2373 /* hash found, or no matching entry */ 2374 if (rule->sw_idx >= sw_idx) 2375 break; 2376 parent = node; 2377 } 2378 2379 /* if there is an old rule occupying our place remove it */ 2380 if (rule && (rule->sw_idx == sw_idx)) { 2381 if (!input || (rule->filter.formatted.bkt_hash != 2382 input->filter.formatted.bkt_hash)) { 2383 err = ixgbe_fdir_erase_perfect_filter_82599(hw, 2384 &rule->filter, 2385 sw_idx); 2386 } 2387 2388 hlist_del(&rule->fdir_node); 2389 kfree(rule); 2390 adapter->fdir_filter_count--; 2391 } 2392 2393 /* 2394 * If no input this was a delete, err should be 0 if a rule was 2395 * successfully found and removed from the list else -EINVAL 2396 */ 2397 if (!input) 2398 return err; 2399 2400 /* initialize node and set software index */ 2401 INIT_HLIST_NODE(&input->fdir_node); 2402 2403 /* add filter to the list */ 2404 if (parent) 2405 hlist_add_after(parent, &input->fdir_node); 2406 else 2407 hlist_add_head(&input->fdir_node, 2408 &adapter->fdir_filter_list); 2409 2410 /* update counts */ 2411 adapter->fdir_filter_count++; 2412 2413 return 0; 2414 } 2415 2416 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, 2417 u8 *flow_type) 2418 { 2419 switch (fsp->flow_type & ~FLOW_EXT) { 2420 case TCP_V4_FLOW: 2421 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2422 break; 2423 case UDP_V4_FLOW: 2424 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2425 break; 2426 case SCTP_V4_FLOW: 2427 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2428 break; 2429 case IP_USER_FLOW: 2430 switch (fsp->h_u.usr_ip4_spec.proto) { 2431 case IPPROTO_TCP: 2432 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2433 break; 2434 case IPPROTO_UDP: 2435 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2436 break; 2437 case IPPROTO_SCTP: 2438 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2439 break; 2440 case 0: 2441 if (!fsp->m_u.usr_ip4_spec.proto) { 2442 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; 2443 break; 2444 } 2445 default: 2446 return 0; 2447 } 2448 break; 2449 default: 2450 return 0; 2451 } 2452 2453 return 1; 2454 } 2455 2456 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2457 struct ethtool_rxnfc *cmd) 2458 { 2459 struct ethtool_rx_flow_spec *fsp = 2460 (struct ethtool_rx_flow_spec *)&cmd->fs; 2461 struct ixgbe_hw *hw = &adapter->hw; 2462 struct ixgbe_fdir_filter *input; 2463 union ixgbe_atr_input mask; 2464 int err; 2465 2466 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 2467 return -EOPNOTSUPP; 2468 2469 /* 2470 * Don't allow programming if the action is a queue greater than 2471 * the number of online Rx queues. 2472 */ 2473 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && 2474 (fsp->ring_cookie >= adapter->num_rx_queues)) 2475 return -EINVAL; 2476 2477 /* Don't allow indexes to exist outside of available space */ 2478 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { 2479 e_err(drv, "Location out of range\n"); 2480 return -EINVAL; 2481 } 2482 2483 input = kzalloc(sizeof(*input), GFP_ATOMIC); 2484 if (!input) 2485 return -ENOMEM; 2486 2487 memset(&mask, 0, sizeof(union ixgbe_atr_input)); 2488 2489 /* set SW index */ 2490 input->sw_idx = fsp->location; 2491 2492 /* record flow type */ 2493 if (!ixgbe_flowspec_to_flow_type(fsp, 2494 &input->filter.formatted.flow_type)) { 2495 e_err(drv, "Unrecognized flow type\n"); 2496 goto err_out; 2497 } 2498 2499 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 2500 IXGBE_ATR_L4TYPE_MASK; 2501 2502 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) 2503 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; 2504 2505 /* Copy input into formatted structures */ 2506 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 2507 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; 2508 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 2509 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; 2510 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; 2511 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; 2512 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 2513 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 2514 2515 if (fsp->flow_type & FLOW_EXT) { 2516 input->filter.formatted.vm_pool = 2517 (unsigned char)ntohl(fsp->h_ext.data[1]); 2518 mask.formatted.vm_pool = 2519 (unsigned char)ntohl(fsp->m_ext.data[1]); 2520 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; 2521 mask.formatted.vlan_id = fsp->m_ext.vlan_tci; 2522 input->filter.formatted.flex_bytes = 2523 fsp->h_ext.vlan_etype; 2524 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; 2525 } 2526 2527 /* determine if we need to drop or route the packet */ 2528 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) 2529 input->action = IXGBE_FDIR_DROP_QUEUE; 2530 else 2531 input->action = fsp->ring_cookie; 2532 2533 spin_lock(&adapter->fdir_perfect_lock); 2534 2535 if (hlist_empty(&adapter->fdir_filter_list)) { 2536 /* save mask and program input mask into HW */ 2537 memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); 2538 err = ixgbe_fdir_set_input_mask_82599(hw, &mask); 2539 if (err) { 2540 e_err(drv, "Error writing mask\n"); 2541 goto err_out_w_lock; 2542 } 2543 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { 2544 e_err(drv, "Only one mask supported per port\n"); 2545 goto err_out_w_lock; 2546 } 2547 2548 /* apply mask and compute/store hash */ 2549 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); 2550 2551 /* program filters to filter memory */ 2552 err = ixgbe_fdir_write_perfect_filter_82599(hw, 2553 &input->filter, input->sw_idx, 2554 (input->action == IXGBE_FDIR_DROP_QUEUE) ? 2555 IXGBE_FDIR_DROP_QUEUE : 2556 adapter->rx_ring[input->action]->reg_idx); 2557 if (err) 2558 goto err_out_w_lock; 2559 2560 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); 2561 2562 spin_unlock(&adapter->fdir_perfect_lock); 2563 2564 return err; 2565 err_out_w_lock: 2566 spin_unlock(&adapter->fdir_perfect_lock); 2567 err_out: 2568 kfree(input); 2569 return -EINVAL; 2570 } 2571 2572 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2573 struct ethtool_rxnfc *cmd) 2574 { 2575 struct ethtool_rx_flow_spec *fsp = 2576 (struct ethtool_rx_flow_spec *)&cmd->fs; 2577 int err; 2578 2579 spin_lock(&adapter->fdir_perfect_lock); 2580 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); 2581 spin_unlock(&adapter->fdir_perfect_lock); 2582 2583 return err; 2584 } 2585 2586 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 2587 { 2588 struct ixgbe_adapter *adapter = netdev_priv(dev); 2589 int ret = -EOPNOTSUPP; 2590 2591 switch (cmd->cmd) { 2592 case ETHTOOL_SRXCLSRLINS: 2593 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); 2594 break; 2595 case ETHTOOL_SRXCLSRLDEL: 2596 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); 2597 break; 2598 default: 2599 break; 2600 } 2601 2602 return ret; 2603 } 2604 2605 static const struct ethtool_ops ixgbe_ethtool_ops = { 2606 .get_settings = ixgbe_get_settings, 2607 .set_settings = ixgbe_set_settings, 2608 .get_drvinfo = ixgbe_get_drvinfo, 2609 .get_regs_len = ixgbe_get_regs_len, 2610 .get_regs = ixgbe_get_regs, 2611 .get_wol = ixgbe_get_wol, 2612 .set_wol = ixgbe_set_wol, 2613 .nway_reset = ixgbe_nway_reset, 2614 .get_link = ethtool_op_get_link, 2615 .get_eeprom_len = ixgbe_get_eeprom_len, 2616 .get_eeprom = ixgbe_get_eeprom, 2617 .set_eeprom = ixgbe_set_eeprom, 2618 .get_ringparam = ixgbe_get_ringparam, 2619 .set_ringparam = ixgbe_set_ringparam, 2620 .get_pauseparam = ixgbe_get_pauseparam, 2621 .set_pauseparam = ixgbe_set_pauseparam, 2622 .get_msglevel = ixgbe_get_msglevel, 2623 .set_msglevel = ixgbe_set_msglevel, 2624 .self_test = ixgbe_diag_test, 2625 .get_strings = ixgbe_get_strings, 2626 .set_phys_id = ixgbe_set_phys_id, 2627 .get_sset_count = ixgbe_get_sset_count, 2628 .get_ethtool_stats = ixgbe_get_ethtool_stats, 2629 .get_coalesce = ixgbe_get_coalesce, 2630 .set_coalesce = ixgbe_set_coalesce, 2631 .get_rxnfc = ixgbe_get_rxnfc, 2632 .set_rxnfc = ixgbe_set_rxnfc, 2633 }; 2634 2635 void ixgbe_set_ethtool_ops(struct net_device *netdev) 2636 { 2637 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); 2638 } 2639