1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2011 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 /* ethtool support for ixgbe */ 29 30 #include <linux/interrupt.h> 31 #include <linux/types.h> 32 #include <linux/module.h> 33 #include <linux/slab.h> 34 #include <linux/pci.h> 35 #include <linux/netdevice.h> 36 #include <linux/ethtool.h> 37 #include <linux/vmalloc.h> 38 #include <linux/uaccess.h> 39 40 #include "ixgbe.h" 41 42 43 #define IXGBE_ALL_RAR_ENTRIES 16 44 45 enum {NETDEV_STATS, IXGBE_STATS}; 46 47 struct ixgbe_stats { 48 char stat_string[ETH_GSTRING_LEN]; 49 int type; 50 int sizeof_stat; 51 int stat_offset; 52 }; 53 54 #define IXGBE_STAT(m) IXGBE_STATS, \ 55 sizeof(((struct ixgbe_adapter *)0)->m), \ 56 offsetof(struct ixgbe_adapter, m) 57 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ 58 sizeof(((struct rtnl_link_stats64 *)0)->m), \ 59 offsetof(struct rtnl_link_stats64, m) 60 61 static struct ixgbe_stats ixgbe_gstrings_stats[] = { 62 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, 63 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, 64 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, 65 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, 66 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, 67 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, 68 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, 69 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, 70 {"lsc_int", IXGBE_STAT(lsc_int)}, 71 {"tx_busy", IXGBE_STAT(tx_busy)}, 72 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 73 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, 74 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, 75 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, 76 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, 77 {"multicast", IXGBE_NETDEV_STAT(multicast)}, 78 {"broadcast", IXGBE_STAT(stats.bprc)}, 79 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, 80 {"collisions", IXGBE_NETDEV_STAT(collisions)}, 81 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, 82 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, 83 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, 84 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, 85 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 86 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 87 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 88 {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, 89 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, 90 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, 91 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, 92 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, 93 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, 94 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, 95 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, 96 {"tx_restart_queue", IXGBE_STAT(restart_queue)}, 97 {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, 98 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, 99 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, 100 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, 101 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, 102 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, 103 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, 104 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 105 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 106 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 107 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, 108 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, 109 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, 110 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, 111 #ifdef IXGBE_FCOE 112 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 113 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 114 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, 115 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, 116 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, 117 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, 118 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, 119 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, 120 #endif /* IXGBE_FCOE */ 121 }; 122 123 #define IXGBE_QUEUE_STATS_LEN \ 124 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ 125 ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ 126 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 127 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 128 #define IXGBE_PB_STATS_LEN ( \ 129 (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \ 130 IXGBE_FLAG_DCB_ENABLED) ? \ 131 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ 132 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ 133 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ 134 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 135 / sizeof(u64) : 0) 136 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 137 IXGBE_PB_STATS_LEN + \ 138 IXGBE_QUEUE_STATS_LEN) 139 140 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 141 "Register test (offline)", "Eeprom test (offline)", 142 "Interrupt test (offline)", "Loopback test (offline)", 143 "Link test (on/offline)" 144 }; 145 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 146 147 static int ixgbe_get_settings(struct net_device *netdev, 148 struct ethtool_cmd *ecmd) 149 { 150 struct ixgbe_adapter *adapter = netdev_priv(netdev); 151 struct ixgbe_hw *hw = &adapter->hw; 152 u32 link_speed = 0; 153 bool link_up; 154 155 ecmd->supported = SUPPORTED_10000baseT_Full; 156 ecmd->autoneg = AUTONEG_ENABLE; 157 ecmd->transceiver = XCVR_EXTERNAL; 158 if ((hw->phy.media_type == ixgbe_media_type_copper) || 159 (hw->phy.multispeed_fiber)) { 160 ecmd->supported |= (SUPPORTED_1000baseT_Full | 161 SUPPORTED_Autoneg); 162 163 switch (hw->mac.type) { 164 case ixgbe_mac_X540: 165 ecmd->supported |= SUPPORTED_100baseT_Full; 166 break; 167 default: 168 break; 169 } 170 171 ecmd->advertising = ADVERTISED_Autoneg; 172 if (hw->phy.autoneg_advertised) { 173 if (hw->phy.autoneg_advertised & 174 IXGBE_LINK_SPEED_100_FULL) 175 ecmd->advertising |= ADVERTISED_100baseT_Full; 176 if (hw->phy.autoneg_advertised & 177 IXGBE_LINK_SPEED_10GB_FULL) 178 ecmd->advertising |= ADVERTISED_10000baseT_Full; 179 if (hw->phy.autoneg_advertised & 180 IXGBE_LINK_SPEED_1GB_FULL) 181 ecmd->advertising |= ADVERTISED_1000baseT_Full; 182 } else { 183 /* 184 * Default advertised modes in case 185 * phy.autoneg_advertised isn't set. 186 */ 187 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 188 ADVERTISED_1000baseT_Full); 189 if (hw->mac.type == ixgbe_mac_X540) 190 ecmd->advertising |= ADVERTISED_100baseT_Full; 191 } 192 193 if (hw->phy.media_type == ixgbe_media_type_copper) { 194 ecmd->supported |= SUPPORTED_TP; 195 ecmd->advertising |= ADVERTISED_TP; 196 ecmd->port = PORT_TP; 197 } else { 198 ecmd->supported |= SUPPORTED_FIBRE; 199 ecmd->advertising |= ADVERTISED_FIBRE; 200 ecmd->port = PORT_FIBRE; 201 } 202 } else if (hw->phy.media_type == ixgbe_media_type_backplane) { 203 /* Set as FIBRE until SERDES defined in kernel */ 204 if (hw->device_id == IXGBE_DEV_ID_82598_BX) { 205 ecmd->supported = (SUPPORTED_1000baseT_Full | 206 SUPPORTED_FIBRE); 207 ecmd->advertising = (ADVERTISED_1000baseT_Full | 208 ADVERTISED_FIBRE); 209 ecmd->port = PORT_FIBRE; 210 ecmd->autoneg = AUTONEG_DISABLE; 211 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || 212 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { 213 ecmd->supported |= (SUPPORTED_1000baseT_Full | 214 SUPPORTED_Autoneg | 215 SUPPORTED_FIBRE); 216 ecmd->advertising = (ADVERTISED_10000baseT_Full | 217 ADVERTISED_1000baseT_Full | 218 ADVERTISED_Autoneg | 219 ADVERTISED_FIBRE); 220 ecmd->port = PORT_FIBRE; 221 } else { 222 ecmd->supported |= (SUPPORTED_1000baseT_Full | 223 SUPPORTED_FIBRE); 224 ecmd->advertising = (ADVERTISED_10000baseT_Full | 225 ADVERTISED_1000baseT_Full | 226 ADVERTISED_FIBRE); 227 ecmd->port = PORT_FIBRE; 228 } 229 } else { 230 ecmd->supported |= SUPPORTED_FIBRE; 231 ecmd->advertising = (ADVERTISED_10000baseT_Full | 232 ADVERTISED_FIBRE); 233 ecmd->port = PORT_FIBRE; 234 ecmd->autoneg = AUTONEG_DISABLE; 235 } 236 237 /* Get PHY type */ 238 switch (adapter->hw.phy.type) { 239 case ixgbe_phy_tn: 240 case ixgbe_phy_aq: 241 case ixgbe_phy_cu_unknown: 242 /* Copper 10G-BASET */ 243 ecmd->port = PORT_TP; 244 break; 245 case ixgbe_phy_qt: 246 ecmd->port = PORT_FIBRE; 247 break; 248 case ixgbe_phy_nl: 249 case ixgbe_phy_sfp_passive_tyco: 250 case ixgbe_phy_sfp_passive_unknown: 251 case ixgbe_phy_sfp_ftl: 252 case ixgbe_phy_sfp_avago: 253 case ixgbe_phy_sfp_intel: 254 case ixgbe_phy_sfp_unknown: 255 switch (adapter->hw.phy.sfp_type) { 256 /* SFP+ devices, further checking needed */ 257 case ixgbe_sfp_type_da_cu: 258 case ixgbe_sfp_type_da_cu_core0: 259 case ixgbe_sfp_type_da_cu_core1: 260 ecmd->port = PORT_DA; 261 break; 262 case ixgbe_sfp_type_sr: 263 case ixgbe_sfp_type_lr: 264 case ixgbe_sfp_type_srlr_core0: 265 case ixgbe_sfp_type_srlr_core1: 266 ecmd->port = PORT_FIBRE; 267 break; 268 case ixgbe_sfp_type_not_present: 269 ecmd->port = PORT_NONE; 270 break; 271 case ixgbe_sfp_type_1g_cu_core0: 272 case ixgbe_sfp_type_1g_cu_core1: 273 ecmd->port = PORT_TP; 274 ecmd->supported = SUPPORTED_TP; 275 ecmd->advertising = (ADVERTISED_1000baseT_Full | 276 ADVERTISED_TP); 277 break; 278 case ixgbe_sfp_type_unknown: 279 default: 280 ecmd->port = PORT_OTHER; 281 break; 282 } 283 break; 284 case ixgbe_phy_xaui: 285 ecmd->port = PORT_NONE; 286 break; 287 case ixgbe_phy_unknown: 288 case ixgbe_phy_generic: 289 case ixgbe_phy_sfp_unsupported: 290 default: 291 ecmd->port = PORT_OTHER; 292 break; 293 } 294 295 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 296 if (link_up) { 297 switch (link_speed) { 298 case IXGBE_LINK_SPEED_10GB_FULL: 299 ethtool_cmd_speed_set(ecmd, SPEED_10000); 300 break; 301 case IXGBE_LINK_SPEED_1GB_FULL: 302 ethtool_cmd_speed_set(ecmd, SPEED_1000); 303 break; 304 case IXGBE_LINK_SPEED_100_FULL: 305 ethtool_cmd_speed_set(ecmd, SPEED_100); 306 break; 307 default: 308 break; 309 } 310 ecmd->duplex = DUPLEX_FULL; 311 } else { 312 ethtool_cmd_speed_set(ecmd, -1); 313 ecmd->duplex = -1; 314 } 315 316 return 0; 317 } 318 319 static int ixgbe_set_settings(struct net_device *netdev, 320 struct ethtool_cmd *ecmd) 321 { 322 struct ixgbe_adapter *adapter = netdev_priv(netdev); 323 struct ixgbe_hw *hw = &adapter->hw; 324 u32 advertised, old; 325 s32 err = 0; 326 327 if ((hw->phy.media_type == ixgbe_media_type_copper) || 328 (hw->phy.multispeed_fiber)) { 329 /* 330 * this function does not support duplex forcing, but can 331 * limit the advertising of the adapter to the specified speed 332 */ 333 if (ecmd->autoneg == AUTONEG_DISABLE) 334 return -EINVAL; 335 336 if (ecmd->advertising & ~ecmd->supported) 337 return -EINVAL; 338 339 old = hw->phy.autoneg_advertised; 340 advertised = 0; 341 if (ecmd->advertising & ADVERTISED_10000baseT_Full) 342 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 343 344 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 345 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 346 347 if (ecmd->advertising & ADVERTISED_100baseT_Full) 348 advertised |= IXGBE_LINK_SPEED_100_FULL; 349 350 if (old == advertised) 351 return err; 352 /* this sets the link speed and restarts auto-neg */ 353 hw->mac.autotry_restart = true; 354 err = hw->mac.ops.setup_link(hw, advertised, true, true); 355 if (err) { 356 e_info(probe, "setup link failed with code %d\n", err); 357 hw->mac.ops.setup_link(hw, old, true, true); 358 } 359 } else { 360 /* in this case we currently only support 10Gb/FULL */ 361 u32 speed = ethtool_cmd_speed(ecmd); 362 if ((ecmd->autoneg == AUTONEG_ENABLE) || 363 (ecmd->advertising != ADVERTISED_10000baseT_Full) || 364 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 365 return -EINVAL; 366 } 367 368 return err; 369 } 370 371 static void ixgbe_get_pauseparam(struct net_device *netdev, 372 struct ethtool_pauseparam *pause) 373 { 374 struct ixgbe_adapter *adapter = netdev_priv(netdev); 375 struct ixgbe_hw *hw = &adapter->hw; 376 377 if (hw->fc.disable_fc_autoneg) 378 pause->autoneg = 0; 379 else 380 pause->autoneg = 1; 381 382 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 383 pause->rx_pause = 1; 384 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 385 pause->tx_pause = 1; 386 } else if (hw->fc.current_mode == ixgbe_fc_full) { 387 pause->rx_pause = 1; 388 pause->tx_pause = 1; 389 #ifdef CONFIG_DCB 390 } else if (hw->fc.current_mode == ixgbe_fc_pfc) { 391 pause->rx_pause = 0; 392 pause->tx_pause = 0; 393 #endif 394 } 395 } 396 397 static int ixgbe_set_pauseparam(struct net_device *netdev, 398 struct ethtool_pauseparam *pause) 399 { 400 struct ixgbe_adapter *adapter = netdev_priv(netdev); 401 struct ixgbe_hw *hw = &adapter->hw; 402 struct ixgbe_fc_info fc; 403 404 #ifdef CONFIG_DCB 405 if (adapter->dcb_cfg.pfc_mode_enable || 406 ((hw->mac.type == ixgbe_mac_82598EB) && 407 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))) 408 return -EINVAL; 409 410 #endif 411 fc = hw->fc; 412 413 if (pause->autoneg != AUTONEG_ENABLE) 414 fc.disable_fc_autoneg = true; 415 else 416 fc.disable_fc_autoneg = false; 417 418 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) 419 fc.requested_mode = ixgbe_fc_full; 420 else if (pause->rx_pause && !pause->tx_pause) 421 fc.requested_mode = ixgbe_fc_rx_pause; 422 else if (!pause->rx_pause && pause->tx_pause) 423 fc.requested_mode = ixgbe_fc_tx_pause; 424 else if (!pause->rx_pause && !pause->tx_pause) 425 fc.requested_mode = ixgbe_fc_none; 426 else 427 return -EINVAL; 428 429 #ifdef CONFIG_DCB 430 adapter->last_lfc_mode = fc.requested_mode; 431 #endif 432 433 /* if the thing changed then we'll update and use new autoneg */ 434 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { 435 hw->fc = fc; 436 if (netif_running(netdev)) 437 ixgbe_reinit_locked(adapter); 438 else 439 ixgbe_reset(adapter); 440 } 441 442 return 0; 443 } 444 445 static u32 ixgbe_get_msglevel(struct net_device *netdev) 446 { 447 struct ixgbe_adapter *adapter = netdev_priv(netdev); 448 return adapter->msg_enable; 449 } 450 451 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) 452 { 453 struct ixgbe_adapter *adapter = netdev_priv(netdev); 454 adapter->msg_enable = data; 455 } 456 457 static int ixgbe_get_regs_len(struct net_device *netdev) 458 { 459 #define IXGBE_REGS_LEN 1129 460 return IXGBE_REGS_LEN * sizeof(u32); 461 } 462 463 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 464 465 static void ixgbe_get_regs(struct net_device *netdev, 466 struct ethtool_regs *regs, void *p) 467 { 468 struct ixgbe_adapter *adapter = netdev_priv(netdev); 469 struct ixgbe_hw *hw = &adapter->hw; 470 u32 *regs_buff = p; 471 u8 i; 472 473 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); 474 475 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; 476 477 /* General Registers */ 478 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); 479 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); 480 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 481 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); 482 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); 483 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 484 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); 485 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); 486 487 /* NVM Register */ 488 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); 489 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); 490 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); 491 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); 492 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); 493 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); 494 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); 495 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); 496 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); 497 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); 498 499 /* Interrupt */ 500 /* don't read EICR because it can clear interrupt causes, instead 501 * read EICS which is a shadow but doesn't clear EICR */ 502 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); 503 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 504 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 505 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); 506 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); 507 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); 508 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); 509 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 510 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 511 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 512 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); 513 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 514 515 /* Flow Control */ 516 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); 517 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); 518 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 519 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 520 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 521 for (i = 0; i < 8; i++) { 522 switch (hw->mac.type) { 523 case ixgbe_mac_82598EB: 524 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 525 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 526 break; 527 case ixgbe_mac_82599EB: 528 case ixgbe_mac_X540: 529 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); 530 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 531 break; 532 default: 533 break; 534 } 535 } 536 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 537 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 538 539 /* Receive DMA */ 540 for (i = 0; i < 64; i++) 541 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); 542 for (i = 0; i < 64; i++) 543 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); 544 for (i = 0; i < 64; i++) 545 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); 546 for (i = 0; i < 64; i++) 547 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); 548 for (i = 0; i < 64; i++) 549 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); 550 for (i = 0; i < 64; i++) 551 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 552 for (i = 0; i < 16; i++) 553 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 554 for (i = 0; i < 16; i++) 555 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 556 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 557 for (i = 0; i < 8; i++) 558 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 559 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 560 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); 561 562 /* Receive */ 563 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 564 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); 565 for (i = 0; i < 16; i++) 566 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 567 for (i = 0; i < 16; i++) 568 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 569 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); 570 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 571 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 572 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); 573 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); 574 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 575 for (i = 0; i < 8; i++) 576 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); 577 for (i = 0; i < 8; i++) 578 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); 579 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); 580 581 /* Transmit */ 582 for (i = 0; i < 32; i++) 583 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); 584 for (i = 0; i < 32; i++) 585 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); 586 for (i = 0; i < 32; i++) 587 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); 588 for (i = 0; i < 32; i++) 589 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); 590 for (i = 0; i < 32; i++) 591 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); 592 for (i = 0; i < 32; i++) 593 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 594 for (i = 0; i < 32; i++) 595 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); 596 for (i = 0; i < 32; i++) 597 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); 598 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); 599 for (i = 0; i < 16; i++) 600 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 601 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); 602 for (i = 0; i < 8; i++) 603 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); 604 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); 605 606 /* Wake Up */ 607 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); 608 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); 609 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); 610 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); 611 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); 612 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); 613 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); 614 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 615 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 616 617 /* DCB */ 618 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 619 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 620 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 621 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); 622 for (i = 0; i < 8; i++) 623 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); 624 for (i = 0; i < 8; i++) 625 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); 626 for (i = 0; i < 8; i++) 627 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); 628 for (i = 0; i < 8; i++) 629 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); 630 for (i = 0; i < 8; i++) 631 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); 632 for (i = 0; i < 8; i++) 633 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); 634 635 /* Statistics */ 636 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); 637 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); 638 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); 639 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); 640 for (i = 0; i < 8; i++) 641 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); 642 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); 643 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); 644 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); 645 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); 646 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); 647 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); 648 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); 649 for (i = 0; i < 8; i++) 650 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); 651 for (i = 0; i < 8; i++) 652 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); 653 for (i = 0; i < 8; i++) 654 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); 655 for (i = 0; i < 8; i++) 656 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); 657 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); 658 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); 659 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); 660 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); 661 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); 662 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); 663 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); 664 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); 665 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); 666 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); 667 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); 668 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); 669 for (i = 0; i < 8; i++) 670 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); 671 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); 672 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); 673 regs_buff[956] = IXGBE_GET_STAT(adapter, roc); 674 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); 675 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); 676 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); 677 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); 678 regs_buff[961] = IXGBE_GET_STAT(adapter, tor); 679 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); 680 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); 681 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); 682 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); 683 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); 684 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); 685 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); 686 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); 687 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); 688 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); 689 regs_buff[973] = IXGBE_GET_STAT(adapter, xec); 690 for (i = 0; i < 16; i++) 691 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); 692 for (i = 0; i < 16; i++) 693 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); 694 for (i = 0; i < 16; i++) 695 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); 696 for (i = 0; i < 16; i++) 697 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); 698 699 /* MAC */ 700 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); 701 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 702 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 703 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); 704 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); 705 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 706 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 707 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); 708 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); 709 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); 710 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); 711 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); 712 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); 713 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); 714 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); 715 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); 716 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); 717 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); 718 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); 719 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); 720 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); 721 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); 722 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); 723 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); 724 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); 725 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); 726 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); 727 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); 728 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 729 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); 730 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); 731 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); 732 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 733 734 /* Diagnostic */ 735 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 736 for (i = 0; i < 8; i++) 737 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 738 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 739 for (i = 0; i < 4; i++) 740 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); 741 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 742 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 743 for (i = 0; i < 8; i++) 744 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 745 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 746 for (i = 0; i < 4; i++) 747 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); 748 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 749 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 750 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); 751 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); 752 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); 753 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); 754 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); 755 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); 756 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); 757 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); 758 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); 759 for (i = 0; i < 8; i++) 760 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 761 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 762 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 763 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); 764 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); 765 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); 766 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); 767 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); 768 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); 769 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); 770 771 /* 82599 X540 specific registers */ 772 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); 773 } 774 775 static int ixgbe_get_eeprom_len(struct net_device *netdev) 776 { 777 struct ixgbe_adapter *adapter = netdev_priv(netdev); 778 return adapter->hw.eeprom.word_size * 2; 779 } 780 781 static int ixgbe_get_eeprom(struct net_device *netdev, 782 struct ethtool_eeprom *eeprom, u8 *bytes) 783 { 784 struct ixgbe_adapter *adapter = netdev_priv(netdev); 785 struct ixgbe_hw *hw = &adapter->hw; 786 u16 *eeprom_buff; 787 int first_word, last_word, eeprom_len; 788 int ret_val = 0; 789 u16 i; 790 791 if (eeprom->len == 0) 792 return -EINVAL; 793 794 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 795 796 first_word = eeprom->offset >> 1; 797 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 798 eeprom_len = last_word - first_word + 1; 799 800 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); 801 if (!eeprom_buff) 802 return -ENOMEM; 803 804 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, 805 eeprom_buff); 806 807 /* Device's eeprom is always little-endian, word addressable */ 808 for (i = 0; i < eeprom_len; i++) 809 le16_to_cpus(&eeprom_buff[i]); 810 811 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); 812 kfree(eeprom_buff); 813 814 return ret_val; 815 } 816 817 static int ixgbe_set_eeprom(struct net_device *netdev, 818 struct ethtool_eeprom *eeprom, u8 *bytes) 819 { 820 struct ixgbe_adapter *adapter = netdev_priv(netdev); 821 struct ixgbe_hw *hw = &adapter->hw; 822 u16 *eeprom_buff; 823 void *ptr; 824 int max_len, first_word, last_word, ret_val = 0; 825 u16 i; 826 827 if (eeprom->len == 0) 828 return -EINVAL; 829 830 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 831 return -EINVAL; 832 833 max_len = hw->eeprom.word_size * 2; 834 835 first_word = eeprom->offset >> 1; 836 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 837 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 838 if (!eeprom_buff) 839 return -ENOMEM; 840 841 ptr = eeprom_buff; 842 843 if (eeprom->offset & 1) { 844 /* 845 * need read/modify/write of first changed EEPROM word 846 * only the second byte of the word is being modified 847 */ 848 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]); 849 if (ret_val) 850 goto err; 851 852 ptr++; 853 } 854 if ((eeprom->offset + eeprom->len) & 1) { 855 /* 856 * need read/modify/write of last changed EEPROM word 857 * only the first byte of the word is being modified 858 */ 859 ret_val = hw->eeprom.ops.read(hw, last_word, 860 &eeprom_buff[last_word - first_word]); 861 if (ret_val) 862 goto err; 863 } 864 865 /* Device's eeprom is always little-endian, word addressable */ 866 for (i = 0; i < last_word - first_word + 1; i++) 867 le16_to_cpus(&eeprom_buff[i]); 868 869 memcpy(ptr, bytes, eeprom->len); 870 871 for (i = 0; i < last_word - first_word + 1; i++) 872 cpu_to_le16s(&eeprom_buff[i]); 873 874 ret_val = hw->eeprom.ops.write_buffer(hw, first_word, 875 last_word - first_word + 1, 876 eeprom_buff); 877 878 /* Update the checksum */ 879 if (ret_val == 0) 880 hw->eeprom.ops.update_checksum(hw); 881 882 err: 883 kfree(eeprom_buff); 884 return ret_val; 885 } 886 887 static void ixgbe_get_drvinfo(struct net_device *netdev, 888 struct ethtool_drvinfo *drvinfo) 889 { 890 struct ixgbe_adapter *adapter = netdev_priv(netdev); 891 u32 nvm_track_id; 892 893 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); 894 strlcpy(drvinfo->version, ixgbe_driver_version, 895 sizeof(drvinfo->version)); 896 897 nvm_track_id = (adapter->eeprom_verh << 16) | 898 adapter->eeprom_verl; 899 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", 900 nvm_track_id); 901 902 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 903 sizeof(drvinfo->bus_info)); 904 drvinfo->n_stats = IXGBE_STATS_LEN; 905 drvinfo->testinfo_len = IXGBE_TEST_LEN; 906 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 907 } 908 909 static void ixgbe_get_ringparam(struct net_device *netdev, 910 struct ethtool_ringparam *ring) 911 { 912 struct ixgbe_adapter *adapter = netdev_priv(netdev); 913 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 914 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 915 916 ring->rx_max_pending = IXGBE_MAX_RXD; 917 ring->tx_max_pending = IXGBE_MAX_TXD; 918 ring->rx_pending = rx_ring->count; 919 ring->tx_pending = tx_ring->count; 920 } 921 922 static int ixgbe_set_ringparam(struct net_device *netdev, 923 struct ethtool_ringparam *ring) 924 { 925 struct ixgbe_adapter *adapter = netdev_priv(netdev); 926 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 927 int i, err = 0; 928 u32 new_rx_count, new_tx_count; 929 bool need_update = false; 930 931 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 932 return -EINVAL; 933 934 new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD); 935 new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD); 936 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); 937 938 new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD); 939 new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); 940 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 941 942 if ((new_tx_count == adapter->tx_ring[0]->count) && 943 (new_rx_count == adapter->rx_ring[0]->count)) { 944 /* nothing to do */ 945 return 0; 946 } 947 948 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 949 usleep_range(1000, 2000); 950 951 if (!netif_running(adapter->netdev)) { 952 for (i = 0; i < adapter->num_tx_queues; i++) 953 adapter->tx_ring[i]->count = new_tx_count; 954 for (i = 0; i < adapter->num_rx_queues; i++) 955 adapter->rx_ring[i]->count = new_rx_count; 956 adapter->tx_ring_count = new_tx_count; 957 adapter->rx_ring_count = new_rx_count; 958 goto clear_reset; 959 } 960 961 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 962 if (!temp_tx_ring) { 963 err = -ENOMEM; 964 goto clear_reset; 965 } 966 967 if (new_tx_count != adapter->tx_ring_count) { 968 for (i = 0; i < adapter->num_tx_queues; i++) { 969 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 970 sizeof(struct ixgbe_ring)); 971 temp_tx_ring[i].count = new_tx_count; 972 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); 973 if (err) { 974 while (i) { 975 i--; 976 ixgbe_free_tx_resources(&temp_tx_ring[i]); 977 } 978 goto clear_reset; 979 } 980 } 981 need_update = true; 982 } 983 984 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 985 if (!temp_rx_ring) { 986 err = -ENOMEM; 987 goto err_setup; 988 } 989 990 if (new_rx_count != adapter->rx_ring_count) { 991 for (i = 0; i < adapter->num_rx_queues; i++) { 992 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 993 sizeof(struct ixgbe_ring)); 994 temp_rx_ring[i].count = new_rx_count; 995 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); 996 if (err) { 997 while (i) { 998 i--; 999 ixgbe_free_rx_resources(&temp_rx_ring[i]); 1000 } 1001 goto err_setup; 1002 } 1003 } 1004 need_update = true; 1005 } 1006 1007 /* if rings need to be updated, here's the place to do it in one shot */ 1008 if (need_update) { 1009 ixgbe_down(adapter); 1010 1011 /* tx */ 1012 if (new_tx_count != adapter->tx_ring_count) { 1013 for (i = 0; i < adapter->num_tx_queues; i++) { 1014 ixgbe_free_tx_resources(adapter->tx_ring[i]); 1015 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 1016 sizeof(struct ixgbe_ring)); 1017 } 1018 adapter->tx_ring_count = new_tx_count; 1019 } 1020 1021 /* rx */ 1022 if (new_rx_count != adapter->rx_ring_count) { 1023 for (i = 0; i < adapter->num_rx_queues; i++) { 1024 ixgbe_free_rx_resources(adapter->rx_ring[i]); 1025 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 1026 sizeof(struct ixgbe_ring)); 1027 } 1028 adapter->rx_ring_count = new_rx_count; 1029 } 1030 ixgbe_up(adapter); 1031 } 1032 1033 vfree(temp_rx_ring); 1034 err_setup: 1035 vfree(temp_tx_ring); 1036 clear_reset: 1037 clear_bit(__IXGBE_RESETTING, &adapter->state); 1038 return err; 1039 } 1040 1041 static int ixgbe_get_sset_count(struct net_device *netdev, int sset) 1042 { 1043 switch (sset) { 1044 case ETH_SS_TEST: 1045 return IXGBE_TEST_LEN; 1046 case ETH_SS_STATS: 1047 return IXGBE_STATS_LEN; 1048 default: 1049 return -EOPNOTSUPP; 1050 } 1051 } 1052 1053 static void ixgbe_get_ethtool_stats(struct net_device *netdev, 1054 struct ethtool_stats *stats, u64 *data) 1055 { 1056 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1057 struct rtnl_link_stats64 temp; 1058 const struct rtnl_link_stats64 *net_stats; 1059 unsigned int start; 1060 struct ixgbe_ring *ring; 1061 int i, j; 1062 char *p = NULL; 1063 1064 ixgbe_update_stats(adapter); 1065 net_stats = dev_get_stats(netdev, &temp); 1066 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1067 switch (ixgbe_gstrings_stats[i].type) { 1068 case NETDEV_STATS: 1069 p = (char *) net_stats + 1070 ixgbe_gstrings_stats[i].stat_offset; 1071 break; 1072 case IXGBE_STATS: 1073 p = (char *) adapter + 1074 ixgbe_gstrings_stats[i].stat_offset; 1075 break; 1076 } 1077 1078 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1079 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1080 } 1081 for (j = 0; j < adapter->num_tx_queues; j++) { 1082 ring = adapter->tx_ring[j]; 1083 do { 1084 start = u64_stats_fetch_begin_bh(&ring->syncp); 1085 data[i] = ring->stats.packets; 1086 data[i+1] = ring->stats.bytes; 1087 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1088 i += 2; 1089 } 1090 for (j = 0; j < adapter->num_rx_queues; j++) { 1091 ring = adapter->rx_ring[j]; 1092 do { 1093 start = u64_stats_fetch_begin_bh(&ring->syncp); 1094 data[i] = ring->stats.packets; 1095 data[i+1] = ring->stats.bytes; 1096 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1097 i += 2; 1098 } 1099 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 1100 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { 1101 data[i++] = adapter->stats.pxontxc[j]; 1102 data[i++] = adapter->stats.pxofftxc[j]; 1103 } 1104 for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) { 1105 data[i++] = adapter->stats.pxonrxc[j]; 1106 data[i++] = adapter->stats.pxoffrxc[j]; 1107 } 1108 } 1109 } 1110 1111 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1112 u8 *data) 1113 { 1114 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1115 char *p = (char *)data; 1116 int i; 1117 1118 switch (stringset) { 1119 case ETH_SS_TEST: 1120 memcpy(data, *ixgbe_gstrings_test, 1121 IXGBE_TEST_LEN * ETH_GSTRING_LEN); 1122 break; 1123 case ETH_SS_STATS: 1124 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1125 memcpy(p, ixgbe_gstrings_stats[i].stat_string, 1126 ETH_GSTRING_LEN); 1127 p += ETH_GSTRING_LEN; 1128 } 1129 for (i = 0; i < adapter->num_tx_queues; i++) { 1130 sprintf(p, "tx_queue_%u_packets", i); 1131 p += ETH_GSTRING_LEN; 1132 sprintf(p, "tx_queue_%u_bytes", i); 1133 p += ETH_GSTRING_LEN; 1134 } 1135 for (i = 0; i < adapter->num_rx_queues; i++) { 1136 sprintf(p, "rx_queue_%u_packets", i); 1137 p += ETH_GSTRING_LEN; 1138 sprintf(p, "rx_queue_%u_bytes", i); 1139 p += ETH_GSTRING_LEN; 1140 } 1141 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 1142 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { 1143 sprintf(p, "tx_pb_%u_pxon", i); 1144 p += ETH_GSTRING_LEN; 1145 sprintf(p, "tx_pb_%u_pxoff", i); 1146 p += ETH_GSTRING_LEN; 1147 } 1148 for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) { 1149 sprintf(p, "rx_pb_%u_pxon", i); 1150 p += ETH_GSTRING_LEN; 1151 sprintf(p, "rx_pb_%u_pxoff", i); 1152 p += ETH_GSTRING_LEN; 1153 } 1154 } 1155 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 1156 break; 1157 } 1158 } 1159 1160 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) 1161 { 1162 struct ixgbe_hw *hw = &adapter->hw; 1163 bool link_up; 1164 u32 link_speed = 0; 1165 *data = 0; 1166 1167 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 1168 if (link_up) 1169 return *data; 1170 else 1171 *data = 1; 1172 return *data; 1173 } 1174 1175 /* ethtool register test data */ 1176 struct ixgbe_reg_test { 1177 u16 reg; 1178 u8 array_len; 1179 u8 test_type; 1180 u32 mask; 1181 u32 write; 1182 }; 1183 1184 /* In the hardware, registers are laid out either singly, in arrays 1185 * spaced 0x40 bytes apart, or in contiguous tables. We assume 1186 * most tests take place on arrays or single registers (handled 1187 * as a single-element array) and special-case the tables. 1188 * Table tests are always pattern tests. 1189 * 1190 * We also make provision for some required setup steps by specifying 1191 * registers to be written without any read-back testing. 1192 */ 1193 1194 #define PATTERN_TEST 1 1195 #define SET_READ_TEST 2 1196 #define WRITE_NO_TEST 3 1197 #define TABLE32_TEST 4 1198 #define TABLE64_TEST_LO 5 1199 #define TABLE64_TEST_HI 6 1200 1201 /* default 82599 register test */ 1202 static const struct ixgbe_reg_test reg_test_82599[] = { 1203 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1204 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1205 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1206 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1207 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 1208 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1209 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1210 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1211 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1212 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1213 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1214 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1215 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1216 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1217 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, 1218 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, 1219 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1220 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, 1221 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1222 { 0, 0, 0, 0 } 1223 }; 1224 1225 /* default 82598 register test */ 1226 static const struct ixgbe_reg_test reg_test_82598[] = { 1227 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1228 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1229 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1230 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1231 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1232 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1233 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1234 /* Enable all four RX queues before testing. */ 1235 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1236 /* RDH is read-only for 82598, only test RDT. */ 1237 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1238 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1239 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1240 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1241 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, 1242 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1243 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1244 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1245 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, 1246 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, 1247 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1248 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, 1249 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1250 { 0, 0, 0, 0 } 1251 }; 1252 1253 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, 1254 u32 mask, u32 write) 1255 { 1256 u32 pat, val, before; 1257 static const u32 test_pattern[] = { 1258 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1259 1260 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { 1261 before = readl(adapter->hw.hw_addr + reg); 1262 writel((test_pattern[pat] & write), 1263 (adapter->hw.hw_addr + reg)); 1264 val = readl(adapter->hw.hw_addr + reg); 1265 if (val != (test_pattern[pat] & write & mask)) { 1266 e_err(drv, "pattern test reg %04X failed: got " 1267 "0x%08X expected 0x%08X\n", 1268 reg, val, (test_pattern[pat] & write & mask)); 1269 *data = reg; 1270 writel(before, adapter->hw.hw_addr + reg); 1271 return 1; 1272 } 1273 writel(before, adapter->hw.hw_addr + reg); 1274 } 1275 return 0; 1276 } 1277 1278 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, 1279 u32 mask, u32 write) 1280 { 1281 u32 val, before; 1282 before = readl(adapter->hw.hw_addr + reg); 1283 writel((write & mask), (adapter->hw.hw_addr + reg)); 1284 val = readl(adapter->hw.hw_addr + reg); 1285 if ((write & mask) != (val & mask)) { 1286 e_err(drv, "set/check reg %04X test failed: got 0x%08X " 1287 "expected 0x%08X\n", reg, (val & mask), (write & mask)); 1288 *data = reg; 1289 writel(before, (adapter->hw.hw_addr + reg)); 1290 return 1; 1291 } 1292 writel(before, (adapter->hw.hw_addr + reg)); 1293 return 0; 1294 } 1295 1296 #define REG_PATTERN_TEST(reg, mask, write) \ 1297 do { \ 1298 if (reg_pattern_test(adapter, data, reg, mask, write)) \ 1299 return 1; \ 1300 } while (0) \ 1301 1302 1303 #define REG_SET_AND_CHECK(reg, mask, write) \ 1304 do { \ 1305 if (reg_set_and_check(adapter, data, reg, mask, write)) \ 1306 return 1; \ 1307 } while (0) \ 1308 1309 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1310 { 1311 const struct ixgbe_reg_test *test; 1312 u32 value, before, after; 1313 u32 i, toggle; 1314 1315 switch (adapter->hw.mac.type) { 1316 case ixgbe_mac_82598EB: 1317 toggle = 0x7FFFF3FF; 1318 test = reg_test_82598; 1319 break; 1320 case ixgbe_mac_82599EB: 1321 case ixgbe_mac_X540: 1322 toggle = 0x7FFFF30F; 1323 test = reg_test_82599; 1324 break; 1325 default: 1326 *data = 1; 1327 return 1; 1328 break; 1329 } 1330 1331 /* 1332 * Because the status register is such a special case, 1333 * we handle it separately from the rest of the register 1334 * tests. Some bits are read-only, some toggle, and some 1335 * are writeable on newer MACs. 1336 */ 1337 before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); 1338 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); 1339 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); 1340 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; 1341 if (value != after) { 1342 e_err(drv, "failed STATUS register test got: 0x%08X " 1343 "expected: 0x%08X\n", after, value); 1344 *data = 1; 1345 return 1; 1346 } 1347 /* restore previous status */ 1348 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); 1349 1350 /* 1351 * Perform the remainder of the register test, looping through 1352 * the test table until we either fail or reach the null entry. 1353 */ 1354 while (test->reg) { 1355 for (i = 0; i < test->array_len; i++) { 1356 switch (test->test_type) { 1357 case PATTERN_TEST: 1358 REG_PATTERN_TEST(test->reg + (i * 0x40), 1359 test->mask, 1360 test->write); 1361 break; 1362 case SET_READ_TEST: 1363 REG_SET_AND_CHECK(test->reg + (i * 0x40), 1364 test->mask, 1365 test->write); 1366 break; 1367 case WRITE_NO_TEST: 1368 writel(test->write, 1369 (adapter->hw.hw_addr + test->reg) 1370 + (i * 0x40)); 1371 break; 1372 case TABLE32_TEST: 1373 REG_PATTERN_TEST(test->reg + (i * 4), 1374 test->mask, 1375 test->write); 1376 break; 1377 case TABLE64_TEST_LO: 1378 REG_PATTERN_TEST(test->reg + (i * 8), 1379 test->mask, 1380 test->write); 1381 break; 1382 case TABLE64_TEST_HI: 1383 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1384 test->mask, 1385 test->write); 1386 break; 1387 } 1388 } 1389 test++; 1390 } 1391 1392 *data = 0; 1393 return 0; 1394 } 1395 1396 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) 1397 { 1398 struct ixgbe_hw *hw = &adapter->hw; 1399 if (hw->eeprom.ops.validate_checksum(hw, NULL)) 1400 *data = 1; 1401 else 1402 *data = 0; 1403 return *data; 1404 } 1405 1406 static irqreturn_t ixgbe_test_intr(int irq, void *data) 1407 { 1408 struct net_device *netdev = (struct net_device *) data; 1409 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1410 1411 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); 1412 1413 return IRQ_HANDLED; 1414 } 1415 1416 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) 1417 { 1418 struct net_device *netdev = adapter->netdev; 1419 u32 mask, i = 0, shared_int = true; 1420 u32 irq = adapter->pdev->irq; 1421 1422 *data = 0; 1423 1424 /* Hook up test interrupt handler just for this test */ 1425 if (adapter->msix_entries) { 1426 /* NOTE: we don't test MSI-X interrupts here, yet */ 1427 return 0; 1428 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1429 shared_int = false; 1430 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, 1431 netdev)) { 1432 *data = 1; 1433 return -1; 1434 } 1435 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1436 netdev->name, netdev)) { 1437 shared_int = false; 1438 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1439 netdev->name, netdev)) { 1440 *data = 1; 1441 return -1; 1442 } 1443 e_info(hw, "testing %s interrupt\n", shared_int ? 1444 "shared" : "unshared"); 1445 1446 /* Disable all the interrupts */ 1447 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1448 IXGBE_WRITE_FLUSH(&adapter->hw); 1449 usleep_range(10000, 20000); 1450 1451 /* Test each interrupt */ 1452 for (; i < 10; i++) { 1453 /* Interrupt to test */ 1454 mask = 1 << i; 1455 1456 if (!shared_int) { 1457 /* 1458 * Disable the interrupts to be reported in 1459 * the cause register and then force the same 1460 * interrupt and see if one gets posted. If 1461 * an interrupt was posted to the bus, the 1462 * test failed. 1463 */ 1464 adapter->test_icr = 0; 1465 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1466 ~mask & 0x00007FFF); 1467 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1468 ~mask & 0x00007FFF); 1469 IXGBE_WRITE_FLUSH(&adapter->hw); 1470 usleep_range(10000, 20000); 1471 1472 if (adapter->test_icr & mask) { 1473 *data = 3; 1474 break; 1475 } 1476 } 1477 1478 /* 1479 * Enable the interrupt to be reported in the cause 1480 * register and then force the same interrupt and see 1481 * if one gets posted. If an interrupt was not posted 1482 * to the bus, the test failed. 1483 */ 1484 adapter->test_icr = 0; 1485 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1486 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1487 IXGBE_WRITE_FLUSH(&adapter->hw); 1488 usleep_range(10000, 20000); 1489 1490 if (!(adapter->test_icr &mask)) { 1491 *data = 4; 1492 break; 1493 } 1494 1495 if (!shared_int) { 1496 /* 1497 * Disable the other interrupts to be reported in 1498 * the cause register and then force the other 1499 * interrupts and see if any get posted. If 1500 * an interrupt was posted to the bus, the 1501 * test failed. 1502 */ 1503 adapter->test_icr = 0; 1504 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1505 ~mask & 0x00007FFF); 1506 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1507 ~mask & 0x00007FFF); 1508 IXGBE_WRITE_FLUSH(&adapter->hw); 1509 usleep_range(10000, 20000); 1510 1511 if (adapter->test_icr) { 1512 *data = 5; 1513 break; 1514 } 1515 } 1516 } 1517 1518 /* Disable all the interrupts */ 1519 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1520 IXGBE_WRITE_FLUSH(&adapter->hw); 1521 usleep_range(10000, 20000); 1522 1523 /* Unhook test interrupt handler */ 1524 free_irq(irq, netdev); 1525 1526 return *data; 1527 } 1528 1529 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) 1530 { 1531 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1532 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1533 struct ixgbe_hw *hw = &adapter->hw; 1534 u32 reg_ctl; 1535 1536 /* shut down the DMA engines now so they can be reinitialized later */ 1537 1538 /* first Rx */ 1539 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1540 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1541 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1542 ixgbe_disable_rx_queue(adapter, rx_ring); 1543 1544 /* now Tx */ 1545 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); 1546 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1547 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); 1548 1549 switch (hw->mac.type) { 1550 case ixgbe_mac_82599EB: 1551 case ixgbe_mac_X540: 1552 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1553 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1554 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1555 break; 1556 default: 1557 break; 1558 } 1559 1560 ixgbe_reset(adapter); 1561 1562 ixgbe_free_tx_resources(&adapter->test_tx_ring); 1563 ixgbe_free_rx_resources(&adapter->test_rx_ring); 1564 } 1565 1566 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1567 { 1568 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1569 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1570 u32 rctl, reg_data; 1571 int ret_val; 1572 int err; 1573 1574 /* Setup Tx descriptor ring and Tx buffers */ 1575 tx_ring->count = IXGBE_DEFAULT_TXD; 1576 tx_ring->queue_index = 0; 1577 tx_ring->dev = &adapter->pdev->dev; 1578 tx_ring->netdev = adapter->netdev; 1579 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; 1580 tx_ring->numa_node = adapter->node; 1581 1582 err = ixgbe_setup_tx_resources(tx_ring); 1583 if (err) 1584 return 1; 1585 1586 switch (adapter->hw.mac.type) { 1587 case ixgbe_mac_82599EB: 1588 case ixgbe_mac_X540: 1589 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1590 reg_data |= IXGBE_DMATXCTL_TE; 1591 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1592 break; 1593 default: 1594 break; 1595 } 1596 1597 ixgbe_configure_tx_ring(adapter, tx_ring); 1598 1599 /* Setup Rx Descriptor ring and Rx buffers */ 1600 rx_ring->count = IXGBE_DEFAULT_RXD; 1601 rx_ring->queue_index = 0; 1602 rx_ring->dev = &adapter->pdev->dev; 1603 rx_ring->netdev = adapter->netdev; 1604 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; 1605 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K; 1606 rx_ring->numa_node = adapter->node; 1607 1608 err = ixgbe_setup_rx_resources(rx_ring); 1609 if (err) { 1610 ret_val = 4; 1611 goto err_nomem; 1612 } 1613 1614 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1615 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1616 1617 ixgbe_configure_rx_ring(adapter, rx_ring); 1618 1619 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1620 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1621 1622 return 0; 1623 1624 err_nomem: 1625 ixgbe_free_desc_rings(adapter); 1626 return ret_val; 1627 } 1628 1629 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) 1630 { 1631 struct ixgbe_hw *hw = &adapter->hw; 1632 u32 reg_data; 1633 1634 /* X540 needs to set the MACC.FLU bit to force link up */ 1635 if (adapter->hw.mac.type == ixgbe_mac_X540) { 1636 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); 1637 reg_data |= IXGBE_MACC_FLU; 1638 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); 1639 } 1640 1641 /* right now we only support MAC loopback in the driver */ 1642 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1643 /* Setup MAC loopback */ 1644 reg_data |= IXGBE_HLREG0_LPBK; 1645 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); 1646 1647 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1648 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; 1649 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); 1650 1651 reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1652 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1653 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1654 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); 1655 IXGBE_WRITE_FLUSH(hw); 1656 usleep_range(10000, 20000); 1657 1658 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1659 if (hw->mac.type == ixgbe_mac_82598EB) { 1660 u8 atlas; 1661 1662 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); 1663 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 1664 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); 1665 1666 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); 1667 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 1668 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); 1669 1670 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); 1671 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 1672 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); 1673 1674 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); 1675 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 1676 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); 1677 } 1678 1679 return 0; 1680 } 1681 1682 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) 1683 { 1684 u32 reg_data; 1685 1686 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1687 reg_data &= ~IXGBE_HLREG0_LPBK; 1688 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1689 } 1690 1691 static void ixgbe_create_lbtest_frame(struct sk_buff *skb, 1692 unsigned int frame_size) 1693 { 1694 memset(skb->data, 0xFF, frame_size); 1695 frame_size &= ~1; 1696 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1697 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1698 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1699 } 1700 1701 static int ixgbe_check_lbtest_frame(struct sk_buff *skb, 1702 unsigned int frame_size) 1703 { 1704 frame_size &= ~1; 1705 if (*(skb->data + 3) == 0xFF) { 1706 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1707 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1708 return 0; 1709 } 1710 } 1711 return 13; 1712 } 1713 1714 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, 1715 struct ixgbe_ring *tx_ring, 1716 unsigned int size) 1717 { 1718 union ixgbe_adv_rx_desc *rx_desc; 1719 struct ixgbe_rx_buffer *rx_buffer_info; 1720 struct ixgbe_tx_buffer *tx_buffer_info; 1721 const int bufsz = rx_ring->rx_buf_len; 1722 u32 staterr; 1723 u16 rx_ntc, tx_ntc, count = 0; 1724 1725 /* initialize next to clean and descriptor values */ 1726 rx_ntc = rx_ring->next_to_clean; 1727 tx_ntc = tx_ring->next_to_clean; 1728 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); 1729 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1730 1731 while (staterr & IXGBE_RXD_STAT_DD) { 1732 /* check Rx buffer */ 1733 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1734 1735 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ 1736 dma_unmap_single(rx_ring->dev, 1737 rx_buffer_info->dma, 1738 bufsz, 1739 DMA_FROM_DEVICE); 1740 rx_buffer_info->dma = 0; 1741 1742 /* verify contents of skb */ 1743 if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size)) 1744 count++; 1745 1746 /* unmap buffer on Tx side */ 1747 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1748 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1749 1750 /* increment Rx/Tx next to clean counters */ 1751 rx_ntc++; 1752 if (rx_ntc == rx_ring->count) 1753 rx_ntc = 0; 1754 tx_ntc++; 1755 if (tx_ntc == tx_ring->count) 1756 tx_ntc = 0; 1757 1758 /* fetch next descriptor */ 1759 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); 1760 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1761 } 1762 1763 /* re-map buffers to ring, store next to clean values */ 1764 ixgbe_alloc_rx_buffers(rx_ring, count); 1765 rx_ring->next_to_clean = rx_ntc; 1766 tx_ring->next_to_clean = tx_ntc; 1767 1768 return count; 1769 } 1770 1771 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1772 { 1773 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1774 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1775 int i, j, lc, good_cnt, ret_val = 0; 1776 unsigned int size = 1024; 1777 netdev_tx_t tx_ret_val; 1778 struct sk_buff *skb; 1779 1780 /* allocate test skb */ 1781 skb = alloc_skb(size, GFP_KERNEL); 1782 if (!skb) 1783 return 11; 1784 1785 /* place data into test skb */ 1786 ixgbe_create_lbtest_frame(skb, size); 1787 skb_put(skb, size); 1788 1789 /* 1790 * Calculate the loop count based on the largest descriptor ring 1791 * The idea is to wrap the largest ring a number of times using 64 1792 * send/receive pairs during each loop 1793 */ 1794 1795 if (rx_ring->count <= tx_ring->count) 1796 lc = ((tx_ring->count / 64) * 2) + 1; 1797 else 1798 lc = ((rx_ring->count / 64) * 2) + 1; 1799 1800 for (j = 0; j <= lc; j++) { 1801 /* reset count of good packets */ 1802 good_cnt = 0; 1803 1804 /* place 64 packets on the transmit queue*/ 1805 for (i = 0; i < 64; i++) { 1806 skb_get(skb); 1807 tx_ret_val = ixgbe_xmit_frame_ring(skb, 1808 adapter, 1809 tx_ring); 1810 if (tx_ret_val == NETDEV_TX_OK) 1811 good_cnt++; 1812 } 1813 1814 if (good_cnt != 64) { 1815 ret_val = 12; 1816 break; 1817 } 1818 1819 /* allow 200 milliseconds for packets to go from Tx to Rx */ 1820 msleep(200); 1821 1822 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); 1823 if (good_cnt != 64) { 1824 ret_val = 13; 1825 break; 1826 } 1827 } 1828 1829 /* free the original skb */ 1830 kfree_skb(skb); 1831 1832 return ret_val; 1833 } 1834 1835 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) 1836 { 1837 *data = ixgbe_setup_desc_rings(adapter); 1838 if (*data) 1839 goto out; 1840 *data = ixgbe_setup_loopback_test(adapter); 1841 if (*data) 1842 goto err_loopback; 1843 *data = ixgbe_run_loopback_test(adapter); 1844 ixgbe_loopback_cleanup(adapter); 1845 1846 err_loopback: 1847 ixgbe_free_desc_rings(adapter); 1848 out: 1849 return *data; 1850 } 1851 1852 static void ixgbe_diag_test(struct net_device *netdev, 1853 struct ethtool_test *eth_test, u64 *data) 1854 { 1855 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1856 bool if_running = netif_running(netdev); 1857 1858 set_bit(__IXGBE_TESTING, &adapter->state); 1859 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1860 /* Offline tests */ 1861 1862 e_info(hw, "offline testing starting\n"); 1863 1864 /* Link test performed before hardware reset so autoneg doesn't 1865 * interfere with test result */ 1866 if (ixgbe_link_test(adapter, &data[4])) 1867 eth_test->flags |= ETH_TEST_FL_FAILED; 1868 1869 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 1870 int i; 1871 for (i = 0; i < adapter->num_vfs; i++) { 1872 if (adapter->vfinfo[i].clear_to_send) { 1873 netdev_warn(netdev, "%s", 1874 "offline diagnostic is not " 1875 "supported when VFs are " 1876 "present\n"); 1877 data[0] = 1; 1878 data[1] = 1; 1879 data[2] = 1; 1880 data[3] = 1; 1881 eth_test->flags |= ETH_TEST_FL_FAILED; 1882 clear_bit(__IXGBE_TESTING, 1883 &adapter->state); 1884 goto skip_ol_tests; 1885 } 1886 } 1887 } 1888 1889 if (if_running) 1890 /* indicate we're in test mode */ 1891 dev_close(netdev); 1892 else 1893 ixgbe_reset(adapter); 1894 1895 e_info(hw, "register testing starting\n"); 1896 if (ixgbe_reg_test(adapter, &data[0])) 1897 eth_test->flags |= ETH_TEST_FL_FAILED; 1898 1899 ixgbe_reset(adapter); 1900 e_info(hw, "eeprom testing starting\n"); 1901 if (ixgbe_eeprom_test(adapter, &data[1])) 1902 eth_test->flags |= ETH_TEST_FL_FAILED; 1903 1904 ixgbe_reset(adapter); 1905 e_info(hw, "interrupt testing starting\n"); 1906 if (ixgbe_intr_test(adapter, &data[2])) 1907 eth_test->flags |= ETH_TEST_FL_FAILED; 1908 1909 /* If SRIOV or VMDq is enabled then skip MAC 1910 * loopback diagnostic. */ 1911 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 1912 IXGBE_FLAG_VMDQ_ENABLED)) { 1913 e_info(hw, "Skip MAC loopback diagnostic in VT " 1914 "mode\n"); 1915 data[3] = 0; 1916 goto skip_loopback; 1917 } 1918 1919 ixgbe_reset(adapter); 1920 e_info(hw, "loopback testing starting\n"); 1921 if (ixgbe_loopback_test(adapter, &data[3])) 1922 eth_test->flags |= ETH_TEST_FL_FAILED; 1923 1924 skip_loopback: 1925 ixgbe_reset(adapter); 1926 1927 clear_bit(__IXGBE_TESTING, &adapter->state); 1928 if (if_running) 1929 dev_open(netdev); 1930 } else { 1931 e_info(hw, "online testing starting\n"); 1932 /* Online tests */ 1933 if (ixgbe_link_test(adapter, &data[4])) 1934 eth_test->flags |= ETH_TEST_FL_FAILED; 1935 1936 /* Online tests aren't run; pass by default */ 1937 data[0] = 0; 1938 data[1] = 0; 1939 data[2] = 0; 1940 data[3] = 0; 1941 1942 clear_bit(__IXGBE_TESTING, &adapter->state); 1943 } 1944 skip_ol_tests: 1945 msleep_interruptible(4 * 1000); 1946 } 1947 1948 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 1949 struct ethtool_wolinfo *wol) 1950 { 1951 struct ixgbe_hw *hw = &adapter->hw; 1952 int retval = 1; 1953 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; 1954 1955 /* WOL not supported except for the following */ 1956 switch(hw->device_id) { 1957 case IXGBE_DEV_ID_82599_SFP: 1958 /* Only these subdevices could supports WOL */ 1959 switch (hw->subsystem_device_id) { 1960 case IXGBE_SUBDEV_ID_82599_560FLR: 1961 /* only support first port */ 1962 if (hw->bus.func != 0) { 1963 wol->supported = 0; 1964 break; 1965 } 1966 case IXGBE_SUBDEV_ID_82599_SFP: 1967 retval = 0; 1968 break; 1969 default: 1970 wol->supported = 0; 1971 break; 1972 } 1973 break; 1974 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 1975 /* All except this subdevice support WOL */ 1976 if (hw->subsystem_device_id == 1977 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { 1978 wol->supported = 0; 1979 break; 1980 } 1981 retval = 0; 1982 break; 1983 case IXGBE_DEV_ID_82599_KX4: 1984 retval = 0; 1985 break; 1986 case IXGBE_DEV_ID_X540T: 1987 /* check eeprom to see if enabled wol */ 1988 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1989 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && 1990 (hw->bus.func == 0))) { 1991 retval = 0; 1992 break; 1993 } 1994 1995 /* All others not supported */ 1996 wol->supported = 0; 1997 break; 1998 default: 1999 wol->supported = 0; 2000 } 2001 2002 return retval; 2003 } 2004 2005 static void ixgbe_get_wol(struct net_device *netdev, 2006 struct ethtool_wolinfo *wol) 2007 { 2008 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2009 2010 wol->supported = WAKE_UCAST | WAKE_MCAST | 2011 WAKE_BCAST | WAKE_MAGIC; 2012 wol->wolopts = 0; 2013 2014 if (ixgbe_wol_exclusion(adapter, wol) || 2015 !device_can_wakeup(&adapter->pdev->dev)) 2016 return; 2017 2018 if (adapter->wol & IXGBE_WUFC_EX) 2019 wol->wolopts |= WAKE_UCAST; 2020 if (adapter->wol & IXGBE_WUFC_MC) 2021 wol->wolopts |= WAKE_MCAST; 2022 if (adapter->wol & IXGBE_WUFC_BC) 2023 wol->wolopts |= WAKE_BCAST; 2024 if (adapter->wol & IXGBE_WUFC_MAG) 2025 wol->wolopts |= WAKE_MAGIC; 2026 } 2027 2028 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2029 { 2030 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2031 2032 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 2033 return -EOPNOTSUPP; 2034 2035 if (ixgbe_wol_exclusion(adapter, wol)) 2036 return wol->wolopts ? -EOPNOTSUPP : 0; 2037 2038 adapter->wol = 0; 2039 2040 if (wol->wolopts & WAKE_UCAST) 2041 adapter->wol |= IXGBE_WUFC_EX; 2042 if (wol->wolopts & WAKE_MCAST) 2043 adapter->wol |= IXGBE_WUFC_MC; 2044 if (wol->wolopts & WAKE_BCAST) 2045 adapter->wol |= IXGBE_WUFC_BC; 2046 if (wol->wolopts & WAKE_MAGIC) 2047 adapter->wol |= IXGBE_WUFC_MAG; 2048 2049 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 2050 2051 return 0; 2052 } 2053 2054 static int ixgbe_nway_reset(struct net_device *netdev) 2055 { 2056 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2057 2058 if (netif_running(netdev)) 2059 ixgbe_reinit_locked(adapter); 2060 2061 return 0; 2062 } 2063 2064 static int ixgbe_set_phys_id(struct net_device *netdev, 2065 enum ethtool_phys_id_state state) 2066 { 2067 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2068 struct ixgbe_hw *hw = &adapter->hw; 2069 2070 switch (state) { 2071 case ETHTOOL_ID_ACTIVE: 2072 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2073 return 2; 2074 2075 case ETHTOOL_ID_ON: 2076 hw->mac.ops.led_on(hw, IXGBE_LED_ON); 2077 break; 2078 2079 case ETHTOOL_ID_OFF: 2080 hw->mac.ops.led_off(hw, IXGBE_LED_ON); 2081 break; 2082 2083 case ETHTOOL_ID_INACTIVE: 2084 /* Restore LED settings */ 2085 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); 2086 break; 2087 } 2088 2089 return 0; 2090 } 2091 2092 static int ixgbe_get_coalesce(struct net_device *netdev, 2093 struct ethtool_coalesce *ec) 2094 { 2095 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2096 2097 ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; 2098 2099 /* only valid if in constant ITR mode */ 2100 if (adapter->rx_itr_setting <= 1) 2101 ec->rx_coalesce_usecs = adapter->rx_itr_setting; 2102 else 2103 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; 2104 2105 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 2106 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 2107 return 0; 2108 2109 /* only valid if in constant ITR mode */ 2110 if (adapter->tx_itr_setting <= 1) 2111 ec->tx_coalesce_usecs = adapter->tx_itr_setting; 2112 else 2113 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; 2114 2115 return 0; 2116 } 2117 2118 /* 2119 * this function must be called before setting the new value of 2120 * rx_itr_setting 2121 */ 2122 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter, 2123 struct ethtool_coalesce *ec) 2124 { 2125 struct net_device *netdev = adapter->netdev; 2126 2127 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 2128 return false; 2129 2130 /* if interrupt rate is too high then disable RSC */ 2131 if (ec->rx_coalesce_usecs != 1 && 2132 ec->rx_coalesce_usecs <= (IXGBE_MIN_RSC_ITR >> 2)) { 2133 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2134 e_info(probe, "rx-usecs set too low, disabling RSC\n"); 2135 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 2136 return true; 2137 } 2138 } else { 2139 /* check the feature flag value and enable RSC if necessary */ 2140 if ((netdev->features & NETIF_F_LRO) && 2141 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 2142 e_info(probe, "rx-usecs set to %d, re-enabling RSC\n", 2143 ec->rx_coalesce_usecs); 2144 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2145 return true; 2146 } 2147 } 2148 return false; 2149 } 2150 2151 static int ixgbe_set_coalesce(struct net_device *netdev, 2152 struct ethtool_coalesce *ec) 2153 { 2154 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2155 struct ixgbe_q_vector *q_vector; 2156 int i; 2157 int num_vectors; 2158 u16 tx_itr_param, rx_itr_param; 2159 bool need_reset = false; 2160 2161 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2162 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count 2163 && ec->tx_coalesce_usecs) 2164 return -EINVAL; 2165 2166 if (ec->tx_max_coalesced_frames_irq) 2167 adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; 2168 2169 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || 2170 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) 2171 return -EINVAL; 2172 2173 /* check the old value and enable RSC if necessary */ 2174 need_reset = ixgbe_update_rsc(adapter, ec); 2175 2176 if (ec->rx_coalesce_usecs > 1) 2177 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 2178 else 2179 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 2180 2181 if (adapter->rx_itr_setting == 1) 2182 rx_itr_param = IXGBE_20K_ITR; 2183 else 2184 rx_itr_param = adapter->rx_itr_setting; 2185 2186 if (ec->tx_coalesce_usecs > 1) 2187 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 2188 else 2189 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 2190 2191 if (adapter->tx_itr_setting == 1) 2192 tx_itr_param = IXGBE_10K_ITR; 2193 else 2194 tx_itr_param = adapter->tx_itr_setting; 2195 2196 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 2197 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2198 else 2199 num_vectors = 1; 2200 2201 for (i = 0; i < num_vectors; i++) { 2202 q_vector = adapter->q_vector[i]; 2203 q_vector->tx.work_limit = adapter->tx_work_limit; 2204 if (q_vector->tx.count && !q_vector->rx.count) 2205 /* tx only */ 2206 q_vector->itr = tx_itr_param; 2207 else 2208 /* rx only or mixed */ 2209 q_vector->itr = rx_itr_param; 2210 ixgbe_write_eitr(q_vector); 2211 } 2212 2213 /* 2214 * do reset here at the end to make sure EITR==0 case is handled 2215 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings 2216 * also locks in RSC enable/disable which requires reset 2217 */ 2218 if (need_reset) 2219 ixgbe_do_reset(netdev); 2220 2221 return 0; 2222 } 2223 2224 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2225 struct ethtool_rxnfc *cmd) 2226 { 2227 union ixgbe_atr_input *mask = &adapter->fdir_mask; 2228 struct ethtool_rx_flow_spec *fsp = 2229 (struct ethtool_rx_flow_spec *)&cmd->fs; 2230 struct hlist_node *node, *node2; 2231 struct ixgbe_fdir_filter *rule = NULL; 2232 2233 /* report total rule count */ 2234 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2235 2236 hlist_for_each_entry_safe(rule, node, node2, 2237 &adapter->fdir_filter_list, fdir_node) { 2238 if (fsp->location <= rule->sw_idx) 2239 break; 2240 } 2241 2242 if (!rule || fsp->location != rule->sw_idx) 2243 return -EINVAL; 2244 2245 /* fill out the flow spec entry */ 2246 2247 /* set flow type field */ 2248 switch (rule->filter.formatted.flow_type) { 2249 case IXGBE_ATR_FLOW_TYPE_TCPV4: 2250 fsp->flow_type = TCP_V4_FLOW; 2251 break; 2252 case IXGBE_ATR_FLOW_TYPE_UDPV4: 2253 fsp->flow_type = UDP_V4_FLOW; 2254 break; 2255 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 2256 fsp->flow_type = SCTP_V4_FLOW; 2257 break; 2258 case IXGBE_ATR_FLOW_TYPE_IPV4: 2259 fsp->flow_type = IP_USER_FLOW; 2260 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 2261 fsp->h_u.usr_ip4_spec.proto = 0; 2262 fsp->m_u.usr_ip4_spec.proto = 0; 2263 break; 2264 default: 2265 return -EINVAL; 2266 } 2267 2268 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; 2269 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; 2270 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; 2271 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; 2272 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; 2273 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; 2274 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; 2275 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; 2276 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; 2277 fsp->m_ext.vlan_tci = mask->formatted.vlan_id; 2278 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; 2279 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; 2280 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); 2281 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); 2282 fsp->flow_type |= FLOW_EXT; 2283 2284 /* record action */ 2285 if (rule->action == IXGBE_FDIR_DROP_QUEUE) 2286 fsp->ring_cookie = RX_CLS_FLOW_DISC; 2287 else 2288 fsp->ring_cookie = rule->action; 2289 2290 return 0; 2291 } 2292 2293 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, 2294 struct ethtool_rxnfc *cmd, 2295 u32 *rule_locs) 2296 { 2297 struct hlist_node *node, *node2; 2298 struct ixgbe_fdir_filter *rule; 2299 int cnt = 0; 2300 2301 /* report total rule count */ 2302 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2303 2304 hlist_for_each_entry_safe(rule, node, node2, 2305 &adapter->fdir_filter_list, fdir_node) { 2306 if (cnt == cmd->rule_cnt) 2307 return -EMSGSIZE; 2308 rule_locs[cnt] = rule->sw_idx; 2309 cnt++; 2310 } 2311 2312 cmd->rule_cnt = cnt; 2313 2314 return 0; 2315 } 2316 2317 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 2318 u32 *rule_locs) 2319 { 2320 struct ixgbe_adapter *adapter = netdev_priv(dev); 2321 int ret = -EOPNOTSUPP; 2322 2323 switch (cmd->cmd) { 2324 case ETHTOOL_GRXRINGS: 2325 cmd->data = adapter->num_rx_queues; 2326 ret = 0; 2327 break; 2328 case ETHTOOL_GRXCLSRLCNT: 2329 cmd->rule_cnt = adapter->fdir_filter_count; 2330 ret = 0; 2331 break; 2332 case ETHTOOL_GRXCLSRULE: 2333 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); 2334 break; 2335 case ETHTOOL_GRXCLSRLALL: 2336 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); 2337 break; 2338 default: 2339 break; 2340 } 2341 2342 return ret; 2343 } 2344 2345 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2346 struct ixgbe_fdir_filter *input, 2347 u16 sw_idx) 2348 { 2349 struct ixgbe_hw *hw = &adapter->hw; 2350 struct hlist_node *node, *node2, *parent; 2351 struct ixgbe_fdir_filter *rule; 2352 int err = -EINVAL; 2353 2354 parent = NULL; 2355 rule = NULL; 2356 2357 hlist_for_each_entry_safe(rule, node, node2, 2358 &adapter->fdir_filter_list, fdir_node) { 2359 /* hash found, or no matching entry */ 2360 if (rule->sw_idx >= sw_idx) 2361 break; 2362 parent = node; 2363 } 2364 2365 /* if there is an old rule occupying our place remove it */ 2366 if (rule && (rule->sw_idx == sw_idx)) { 2367 if (!input || (rule->filter.formatted.bkt_hash != 2368 input->filter.formatted.bkt_hash)) { 2369 err = ixgbe_fdir_erase_perfect_filter_82599(hw, 2370 &rule->filter, 2371 sw_idx); 2372 } 2373 2374 hlist_del(&rule->fdir_node); 2375 kfree(rule); 2376 adapter->fdir_filter_count--; 2377 } 2378 2379 /* 2380 * If no input this was a delete, err should be 0 if a rule was 2381 * successfully found and removed from the list else -EINVAL 2382 */ 2383 if (!input) 2384 return err; 2385 2386 /* initialize node and set software index */ 2387 INIT_HLIST_NODE(&input->fdir_node); 2388 2389 /* add filter to the list */ 2390 if (parent) 2391 hlist_add_after(parent, &input->fdir_node); 2392 else 2393 hlist_add_head(&input->fdir_node, 2394 &adapter->fdir_filter_list); 2395 2396 /* update counts */ 2397 adapter->fdir_filter_count++; 2398 2399 return 0; 2400 } 2401 2402 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, 2403 u8 *flow_type) 2404 { 2405 switch (fsp->flow_type & ~FLOW_EXT) { 2406 case TCP_V4_FLOW: 2407 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2408 break; 2409 case UDP_V4_FLOW: 2410 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2411 break; 2412 case SCTP_V4_FLOW: 2413 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2414 break; 2415 case IP_USER_FLOW: 2416 switch (fsp->h_u.usr_ip4_spec.proto) { 2417 case IPPROTO_TCP: 2418 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2419 break; 2420 case IPPROTO_UDP: 2421 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2422 break; 2423 case IPPROTO_SCTP: 2424 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2425 break; 2426 case 0: 2427 if (!fsp->m_u.usr_ip4_spec.proto) { 2428 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; 2429 break; 2430 } 2431 default: 2432 return 0; 2433 } 2434 break; 2435 default: 2436 return 0; 2437 } 2438 2439 return 1; 2440 } 2441 2442 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2443 struct ethtool_rxnfc *cmd) 2444 { 2445 struct ethtool_rx_flow_spec *fsp = 2446 (struct ethtool_rx_flow_spec *)&cmd->fs; 2447 struct ixgbe_hw *hw = &adapter->hw; 2448 struct ixgbe_fdir_filter *input; 2449 union ixgbe_atr_input mask; 2450 int err; 2451 2452 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 2453 return -EOPNOTSUPP; 2454 2455 /* 2456 * Don't allow programming if the action is a queue greater than 2457 * the number of online Rx queues. 2458 */ 2459 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && 2460 (fsp->ring_cookie >= adapter->num_rx_queues)) 2461 return -EINVAL; 2462 2463 /* Don't allow indexes to exist outside of available space */ 2464 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { 2465 e_err(drv, "Location out of range\n"); 2466 return -EINVAL; 2467 } 2468 2469 input = kzalloc(sizeof(*input), GFP_ATOMIC); 2470 if (!input) 2471 return -ENOMEM; 2472 2473 memset(&mask, 0, sizeof(union ixgbe_atr_input)); 2474 2475 /* set SW index */ 2476 input->sw_idx = fsp->location; 2477 2478 /* record flow type */ 2479 if (!ixgbe_flowspec_to_flow_type(fsp, 2480 &input->filter.formatted.flow_type)) { 2481 e_err(drv, "Unrecognized flow type\n"); 2482 goto err_out; 2483 } 2484 2485 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 2486 IXGBE_ATR_L4TYPE_MASK; 2487 2488 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) 2489 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; 2490 2491 /* Copy input into formatted structures */ 2492 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 2493 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; 2494 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 2495 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; 2496 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; 2497 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; 2498 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 2499 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 2500 2501 if (fsp->flow_type & FLOW_EXT) { 2502 input->filter.formatted.vm_pool = 2503 (unsigned char)ntohl(fsp->h_ext.data[1]); 2504 mask.formatted.vm_pool = 2505 (unsigned char)ntohl(fsp->m_ext.data[1]); 2506 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; 2507 mask.formatted.vlan_id = fsp->m_ext.vlan_tci; 2508 input->filter.formatted.flex_bytes = 2509 fsp->h_ext.vlan_etype; 2510 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; 2511 } 2512 2513 /* determine if we need to drop or route the packet */ 2514 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) 2515 input->action = IXGBE_FDIR_DROP_QUEUE; 2516 else 2517 input->action = fsp->ring_cookie; 2518 2519 spin_lock(&adapter->fdir_perfect_lock); 2520 2521 if (hlist_empty(&adapter->fdir_filter_list)) { 2522 /* save mask and program input mask into HW */ 2523 memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); 2524 err = ixgbe_fdir_set_input_mask_82599(hw, &mask); 2525 if (err) { 2526 e_err(drv, "Error writing mask\n"); 2527 goto err_out_w_lock; 2528 } 2529 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { 2530 e_err(drv, "Only one mask supported per port\n"); 2531 goto err_out_w_lock; 2532 } 2533 2534 /* apply mask and compute/store hash */ 2535 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); 2536 2537 /* program filters to filter memory */ 2538 err = ixgbe_fdir_write_perfect_filter_82599(hw, 2539 &input->filter, input->sw_idx, 2540 (input->action == IXGBE_FDIR_DROP_QUEUE) ? 2541 IXGBE_FDIR_DROP_QUEUE : 2542 adapter->rx_ring[input->action]->reg_idx); 2543 if (err) 2544 goto err_out_w_lock; 2545 2546 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); 2547 2548 spin_unlock(&adapter->fdir_perfect_lock); 2549 2550 return err; 2551 err_out_w_lock: 2552 spin_unlock(&adapter->fdir_perfect_lock); 2553 err_out: 2554 kfree(input); 2555 return -EINVAL; 2556 } 2557 2558 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2559 struct ethtool_rxnfc *cmd) 2560 { 2561 struct ethtool_rx_flow_spec *fsp = 2562 (struct ethtool_rx_flow_spec *)&cmd->fs; 2563 int err; 2564 2565 spin_lock(&adapter->fdir_perfect_lock); 2566 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); 2567 spin_unlock(&adapter->fdir_perfect_lock); 2568 2569 return err; 2570 } 2571 2572 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 2573 { 2574 struct ixgbe_adapter *adapter = netdev_priv(dev); 2575 int ret = -EOPNOTSUPP; 2576 2577 switch (cmd->cmd) { 2578 case ETHTOOL_SRXCLSRLINS: 2579 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); 2580 break; 2581 case ETHTOOL_SRXCLSRLDEL: 2582 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); 2583 break; 2584 default: 2585 break; 2586 } 2587 2588 return ret; 2589 } 2590 2591 static const struct ethtool_ops ixgbe_ethtool_ops = { 2592 .get_settings = ixgbe_get_settings, 2593 .set_settings = ixgbe_set_settings, 2594 .get_drvinfo = ixgbe_get_drvinfo, 2595 .get_regs_len = ixgbe_get_regs_len, 2596 .get_regs = ixgbe_get_regs, 2597 .get_wol = ixgbe_get_wol, 2598 .set_wol = ixgbe_set_wol, 2599 .nway_reset = ixgbe_nway_reset, 2600 .get_link = ethtool_op_get_link, 2601 .get_eeprom_len = ixgbe_get_eeprom_len, 2602 .get_eeprom = ixgbe_get_eeprom, 2603 .set_eeprom = ixgbe_set_eeprom, 2604 .get_ringparam = ixgbe_get_ringparam, 2605 .set_ringparam = ixgbe_set_ringparam, 2606 .get_pauseparam = ixgbe_get_pauseparam, 2607 .set_pauseparam = ixgbe_set_pauseparam, 2608 .get_msglevel = ixgbe_get_msglevel, 2609 .set_msglevel = ixgbe_set_msglevel, 2610 .self_test = ixgbe_diag_test, 2611 .get_strings = ixgbe_get_strings, 2612 .set_phys_id = ixgbe_set_phys_id, 2613 .get_sset_count = ixgbe_get_sset_count, 2614 .get_ethtool_stats = ixgbe_get_ethtool_stats, 2615 .get_coalesce = ixgbe_get_coalesce, 2616 .set_coalesce = ixgbe_set_coalesce, 2617 .get_rxnfc = ixgbe_get_rxnfc, 2618 .set_rxnfc = ixgbe_set_rxnfc, 2619 }; 2620 2621 void ixgbe_set_ethtool_ops(struct net_device *netdev) 2622 { 2623 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); 2624 } 2625