1 /******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 /* ethtool support for ixgbevf */ 29 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 32 #include <linux/types.h> 33 #include <linux/module.h> 34 #include <linux/slab.h> 35 #include <linux/pci.h> 36 #include <linux/netdevice.h> 37 #include <linux/ethtool.h> 38 #include <linux/vmalloc.h> 39 #include <linux/if_vlan.h> 40 #include <linux/uaccess.h> 41 42 #include "ixgbevf.h" 43 44 #define IXGBE_ALL_RAR_ENTRIES 16 45 46 #ifdef ETHTOOL_GSTATS 47 struct ixgbe_stats { 48 char stat_string[ETH_GSTRING_LEN]; 49 int sizeof_stat; 50 int stat_offset; 51 int base_stat_offset; 52 int saved_reset_offset; 53 }; 54 55 #define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \ 56 offsetof(struct ixgbevf_adapter, m), \ 57 offsetof(struct ixgbevf_adapter, b), \ 58 offsetof(struct ixgbevf_adapter, r) 59 60 static const struct ixgbe_stats ixgbe_gstrings_stats[] = { 61 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, 62 stats.saved_reset_vfgprc)}, 63 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, 64 stats.saved_reset_vfgptc)}, 65 {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, 66 stats.saved_reset_vfgorc)}, 67 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, 68 stats.saved_reset_vfgotc)}, 69 {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)}, 70 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, 71 stats.saved_reset_vfmprc)}, 72 {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base, 73 zero_base)}, 74 {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base, 75 zero_base)}, 76 {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, 77 zero_base)}, 78 {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)}, 79 }; 80 81 #define IXGBE_QUEUE_STATS_LEN 0 82 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 83 84 #define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) 85 #endif /* ETHTOOL_GSTATS */ 86 #ifdef ETHTOOL_TEST 87 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 88 "Register test (offline)", 89 "Link test (on/offline)" 90 }; 91 #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) 92 #endif /* ETHTOOL_TEST */ 93 94 static int ixgbevf_get_settings(struct net_device *netdev, 95 struct ethtool_cmd *ecmd) 96 { 97 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 98 struct ixgbe_hw *hw = &adapter->hw; 99 u32 link_speed = 0; 100 bool link_up; 101 102 ecmd->supported = SUPPORTED_10000baseT_Full; 103 ecmd->autoneg = AUTONEG_DISABLE; 104 ecmd->transceiver = XCVR_DUMMY1; 105 ecmd->port = -1; 106 107 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 108 109 if (link_up) { 110 __u32 speed = SPEED_10000; 111 switch (link_speed) { 112 case IXGBE_LINK_SPEED_10GB_FULL: 113 speed = SPEED_10000; 114 break; 115 case IXGBE_LINK_SPEED_1GB_FULL: 116 speed = SPEED_1000; 117 break; 118 case IXGBE_LINK_SPEED_100_FULL: 119 speed = SPEED_100; 120 break; 121 } 122 123 ethtool_cmd_speed_set(ecmd, speed); 124 ecmd->duplex = DUPLEX_FULL; 125 } else { 126 ethtool_cmd_speed_set(ecmd, -1); 127 ecmd->duplex = -1; 128 } 129 130 return 0; 131 } 132 133 static u32 ixgbevf_get_msglevel(struct net_device *netdev) 134 { 135 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 136 return adapter->msg_enable; 137 } 138 139 static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) 140 { 141 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 142 adapter->msg_enable = data; 143 } 144 145 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) 146 147 static char *ixgbevf_reg_names[] = { 148 "IXGBE_VFCTRL", 149 "IXGBE_VFSTATUS", 150 "IXGBE_VFLINKS", 151 "IXGBE_VFRXMEMWRAP", 152 "IXGBE_VFFRTIMER", 153 "IXGBE_VTEICR", 154 "IXGBE_VTEICS", 155 "IXGBE_VTEIMS", 156 "IXGBE_VTEIMC", 157 "IXGBE_VTEIAC", 158 "IXGBE_VTEIAM", 159 "IXGBE_VTEITR", 160 "IXGBE_VTIVAR", 161 "IXGBE_VTIVAR_MISC", 162 "IXGBE_VFRDBAL0", 163 "IXGBE_VFRDBAL1", 164 "IXGBE_VFRDBAH0", 165 "IXGBE_VFRDBAH1", 166 "IXGBE_VFRDLEN0", 167 "IXGBE_VFRDLEN1", 168 "IXGBE_VFRDH0", 169 "IXGBE_VFRDH1", 170 "IXGBE_VFRDT0", 171 "IXGBE_VFRDT1", 172 "IXGBE_VFRXDCTL0", 173 "IXGBE_VFRXDCTL1", 174 "IXGBE_VFSRRCTL0", 175 "IXGBE_VFSRRCTL1", 176 "IXGBE_VFPSRTYPE", 177 "IXGBE_VFTDBAL0", 178 "IXGBE_VFTDBAL1", 179 "IXGBE_VFTDBAH0", 180 "IXGBE_VFTDBAH1", 181 "IXGBE_VFTDLEN0", 182 "IXGBE_VFTDLEN1", 183 "IXGBE_VFTDH0", 184 "IXGBE_VFTDH1", 185 "IXGBE_VFTDT0", 186 "IXGBE_VFTDT1", 187 "IXGBE_VFTXDCTL0", 188 "IXGBE_VFTXDCTL1", 189 "IXGBE_VFTDWBAL0", 190 "IXGBE_VFTDWBAL1", 191 "IXGBE_VFTDWBAH0", 192 "IXGBE_VFTDWBAH1" 193 }; 194 195 196 static int ixgbevf_get_regs_len(struct net_device *netdev) 197 { 198 return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32); 199 } 200 201 static void ixgbevf_get_regs(struct net_device *netdev, 202 struct ethtool_regs *regs, 203 void *p) 204 { 205 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 206 struct ixgbe_hw *hw = &adapter->hw; 207 u32 *regs_buff = p; 208 u32 regs_len = ixgbevf_get_regs_len(netdev); 209 u8 i; 210 211 memset(p, 0, regs_len); 212 213 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; 214 215 /* General Registers */ 216 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); 217 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); 218 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 219 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); 220 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); 221 222 /* Interrupt */ 223 /* don't read EICR because it can clear interrupt causes, instead 224 * read EICS which is a shadow but doesn't clear EICR */ 225 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); 226 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); 227 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); 228 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC); 229 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC); 230 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM); 231 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0)); 232 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0)); 233 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 234 235 /* Receive DMA */ 236 for (i = 0; i < 2; i++) 237 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); 238 for (i = 0; i < 2; i++) 239 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); 240 for (i = 0; i < 2; i++) 241 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); 242 for (i = 0; i < 2; i++) 243 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); 244 for (i = 0; i < 2; i++) 245 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); 246 for (i = 0; i < 2; i++) 247 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 248 for (i = 0; i < 2; i++) 249 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); 250 251 /* Receive */ 252 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE); 253 254 /* Transmit */ 255 for (i = 0; i < 2; i++) 256 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); 257 for (i = 0; i < 2; i++) 258 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); 259 for (i = 0; i < 2; i++) 260 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); 261 for (i = 0; i < 2; i++) 262 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); 263 for (i = 0; i < 2; i++) 264 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); 265 for (i = 0; i < 2; i++) 266 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 267 for (i = 0; i < 2; i++) 268 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); 269 for (i = 0; i < 2; i++) 270 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); 271 272 for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++) 273 hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]); 274 } 275 276 static void ixgbevf_get_drvinfo(struct net_device *netdev, 277 struct ethtool_drvinfo *drvinfo) 278 { 279 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 280 281 strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); 282 strlcpy(drvinfo->version, ixgbevf_driver_version, 283 sizeof(drvinfo->version)); 284 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 285 sizeof(drvinfo->bus_info)); 286 } 287 288 static void ixgbevf_get_ringparam(struct net_device *netdev, 289 struct ethtool_ringparam *ring) 290 { 291 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 292 struct ixgbevf_ring *tx_ring = adapter->tx_ring; 293 struct ixgbevf_ring *rx_ring = adapter->rx_ring; 294 295 ring->rx_max_pending = IXGBEVF_MAX_RXD; 296 ring->tx_max_pending = IXGBEVF_MAX_TXD; 297 ring->rx_pending = rx_ring->count; 298 ring->tx_pending = tx_ring->count; 299 } 300 301 static int ixgbevf_set_ringparam(struct net_device *netdev, 302 struct ethtool_ringparam *ring) 303 { 304 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 305 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; 306 int i, err = 0; 307 u32 new_rx_count, new_tx_count; 308 309 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 310 return -EINVAL; 311 312 new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD); 313 new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD); 314 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); 315 316 new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD); 317 new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD); 318 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 319 320 if ((new_tx_count == adapter->tx_ring->count) && 321 (new_rx_count == adapter->rx_ring->count)) { 322 /* nothing to do */ 323 return 0; 324 } 325 326 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 327 msleep(1); 328 329 /* 330 * If the adapter isn't up and running then just set the 331 * new parameters and scurry for the exits. 332 */ 333 if (!netif_running(adapter->netdev)) { 334 for (i = 0; i < adapter->num_tx_queues; i++) 335 adapter->tx_ring[i].count = new_tx_count; 336 for (i = 0; i < adapter->num_rx_queues; i++) 337 adapter->rx_ring[i].count = new_rx_count; 338 adapter->tx_ring_count = new_tx_count; 339 adapter->rx_ring_count = new_rx_count; 340 goto clear_reset; 341 } 342 343 tx_ring = kcalloc(adapter->num_tx_queues, 344 sizeof(struct ixgbevf_ring), GFP_KERNEL); 345 if (!tx_ring) { 346 err = -ENOMEM; 347 goto clear_reset; 348 } 349 350 rx_ring = kcalloc(adapter->num_rx_queues, 351 sizeof(struct ixgbevf_ring), GFP_KERNEL); 352 if (!rx_ring) { 353 err = -ENOMEM; 354 goto err_rx_setup; 355 } 356 357 ixgbevf_down(adapter); 358 359 memcpy(tx_ring, adapter->tx_ring, 360 adapter->num_tx_queues * sizeof(struct ixgbevf_ring)); 361 for (i = 0; i < adapter->num_tx_queues; i++) { 362 tx_ring[i].count = new_tx_count; 363 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); 364 if (err) { 365 while (i) { 366 i--; 367 ixgbevf_free_tx_resources(adapter, 368 &tx_ring[i]); 369 } 370 goto err_tx_ring_setup; 371 } 372 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx; 373 } 374 375 memcpy(rx_ring, adapter->rx_ring, 376 adapter->num_rx_queues * sizeof(struct ixgbevf_ring)); 377 for (i = 0; i < adapter->num_rx_queues; i++) { 378 rx_ring[i].count = new_rx_count; 379 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); 380 if (err) { 381 while (i) { 382 i--; 383 ixgbevf_free_rx_resources(adapter, 384 &rx_ring[i]); 385 } 386 goto err_rx_ring_setup; 387 } 388 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx; 389 } 390 391 /* 392 * Only switch to new rings if all the prior allocations 393 * and ring setups have succeeded. 394 */ 395 kfree(adapter->tx_ring); 396 adapter->tx_ring = tx_ring; 397 adapter->tx_ring_count = new_tx_count; 398 399 kfree(adapter->rx_ring); 400 adapter->rx_ring = rx_ring; 401 adapter->rx_ring_count = new_rx_count; 402 403 /* success! */ 404 ixgbevf_up(adapter); 405 406 goto clear_reset; 407 408 err_rx_ring_setup: 409 for(i = 0; i < adapter->num_tx_queues; i++) 410 ixgbevf_free_tx_resources(adapter, &tx_ring[i]); 411 412 err_tx_ring_setup: 413 kfree(rx_ring); 414 415 err_rx_setup: 416 kfree(tx_ring); 417 418 clear_reset: 419 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 420 return err; 421 } 422 423 static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) 424 { 425 switch (stringset) { 426 case ETH_SS_TEST: 427 return IXGBE_TEST_LEN; 428 case ETH_SS_STATS: 429 return IXGBE_GLOBAL_STATS_LEN; 430 default: 431 return -EINVAL; 432 } 433 } 434 435 static void ixgbevf_get_ethtool_stats(struct net_device *netdev, 436 struct ethtool_stats *stats, u64 *data) 437 { 438 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 439 int i; 440 441 ixgbevf_update_stats(adapter); 442 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 443 char *p = (char *)adapter + 444 ixgbe_gstrings_stats[i].stat_offset; 445 char *b = (char *)adapter + 446 ixgbe_gstrings_stats[i].base_stat_offset; 447 char *r = (char *)adapter + 448 ixgbe_gstrings_stats[i].saved_reset_offset; 449 data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat == 450 sizeof(u64)) ? *(u64 *)p : *(u32 *)p) - 451 ((ixgbe_gstrings_stats[i].sizeof_stat == 452 sizeof(u64)) ? *(u64 *)b : *(u32 *)b) + 453 ((ixgbe_gstrings_stats[i].sizeof_stat == 454 sizeof(u64)) ? *(u64 *)r : *(u32 *)r); 455 } 456 } 457 458 static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, 459 u8 *data) 460 { 461 char *p = (char *)data; 462 int i; 463 464 switch (stringset) { 465 case ETH_SS_TEST: 466 memcpy(data, *ixgbe_gstrings_test, 467 IXGBE_TEST_LEN * ETH_GSTRING_LEN); 468 break; 469 case ETH_SS_STATS: 470 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 471 memcpy(p, ixgbe_gstrings_stats[i].stat_string, 472 ETH_GSTRING_LEN); 473 p += ETH_GSTRING_LEN; 474 } 475 break; 476 } 477 } 478 479 static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data) 480 { 481 struct ixgbe_hw *hw = &adapter->hw; 482 bool link_up; 483 u32 link_speed = 0; 484 *data = 0; 485 486 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 487 if (!link_up) 488 *data = 1; 489 490 return *data; 491 } 492 493 /* ethtool register test data */ 494 struct ixgbevf_reg_test { 495 u16 reg; 496 u8 array_len; 497 u8 test_type; 498 u32 mask; 499 u32 write; 500 }; 501 502 /* In the hardware, registers are laid out either singly, in arrays 503 * spaced 0x40 bytes apart, or in contiguous tables. We assume 504 * most tests take place on arrays or single registers (handled 505 * as a single-element array) and special-case the tables. 506 * Table tests are always pattern tests. 507 * 508 * We also make provision for some required setup steps by specifying 509 * registers to be written without any read-back testing. 510 */ 511 512 #define PATTERN_TEST 1 513 #define SET_READ_TEST 2 514 #define WRITE_NO_TEST 3 515 #define TABLE32_TEST 4 516 #define TABLE64_TEST_LO 5 517 #define TABLE64_TEST_HI 6 518 519 /* default VF register test */ 520 static const struct ixgbevf_reg_test reg_test_vf[] = { 521 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 522 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 523 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 524 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 525 { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 526 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, 527 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 528 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 529 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, 530 { 0, 0, 0, 0 } 531 }; 532 533 static const u32 register_test_patterns[] = { 534 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF 535 }; 536 537 #define REG_PATTERN_TEST(R, M, W) \ 538 { \ 539 u32 pat, val, before; \ 540 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \ 541 before = readl(adapter->hw.hw_addr + R); \ 542 writel((register_test_patterns[pat] & W), \ 543 (adapter->hw.hw_addr + R)); \ 544 val = readl(adapter->hw.hw_addr + R); \ 545 if (val != (register_test_patterns[pat] & W & M)) { \ 546 hw_dbg(&adapter->hw, \ 547 "pattern test reg %04X failed: got " \ 548 "0x%08X expected 0x%08X\n", \ 549 R, val, (register_test_patterns[pat] & W & M)); \ 550 *data = R; \ 551 writel(before, adapter->hw.hw_addr + R); \ 552 return 1; \ 553 } \ 554 writel(before, adapter->hw.hw_addr + R); \ 555 } \ 556 } 557 558 #define REG_SET_AND_CHECK(R, M, W) \ 559 { \ 560 u32 val, before; \ 561 before = readl(adapter->hw.hw_addr + R); \ 562 writel((W & M), (adapter->hw.hw_addr + R)); \ 563 val = readl(adapter->hw.hw_addr + R); \ 564 if ((W & M) != (val & M)) { \ 565 pr_err("set/check reg %04X test failed: got 0x%08X expected " \ 566 "0x%08X\n", R, (val & M), (W & M)); \ 567 *data = R; \ 568 writel(before, (adapter->hw.hw_addr + R)); \ 569 return 1; \ 570 } \ 571 writel(before, (adapter->hw.hw_addr + R)); \ 572 } 573 574 static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) 575 { 576 const struct ixgbevf_reg_test *test; 577 u32 i; 578 579 test = reg_test_vf; 580 581 /* 582 * Perform the register test, looping through the test table 583 * until we either fail or reach the null entry. 584 */ 585 while (test->reg) { 586 for (i = 0; i < test->array_len; i++) { 587 switch (test->test_type) { 588 case PATTERN_TEST: 589 REG_PATTERN_TEST(test->reg + (i * 0x40), 590 test->mask, 591 test->write); 592 break; 593 case SET_READ_TEST: 594 REG_SET_AND_CHECK(test->reg + (i * 0x40), 595 test->mask, 596 test->write); 597 break; 598 case WRITE_NO_TEST: 599 writel(test->write, 600 (adapter->hw.hw_addr + test->reg) 601 + (i * 0x40)); 602 break; 603 case TABLE32_TEST: 604 REG_PATTERN_TEST(test->reg + (i * 4), 605 test->mask, 606 test->write); 607 break; 608 case TABLE64_TEST_LO: 609 REG_PATTERN_TEST(test->reg + (i * 8), 610 test->mask, 611 test->write); 612 break; 613 case TABLE64_TEST_HI: 614 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 615 test->mask, 616 test->write); 617 break; 618 } 619 } 620 test++; 621 } 622 623 *data = 0; 624 return *data; 625 } 626 627 static void ixgbevf_diag_test(struct net_device *netdev, 628 struct ethtool_test *eth_test, u64 *data) 629 { 630 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 631 bool if_running = netif_running(netdev); 632 633 set_bit(__IXGBEVF_TESTING, &adapter->state); 634 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 635 /* Offline tests */ 636 637 hw_dbg(&adapter->hw, "offline testing starting\n"); 638 639 /* Link test performed before hardware reset so autoneg doesn't 640 * interfere with test result */ 641 if (ixgbevf_link_test(adapter, &data[1])) 642 eth_test->flags |= ETH_TEST_FL_FAILED; 643 644 if (if_running) 645 /* indicate we're in test mode */ 646 dev_close(netdev); 647 else 648 ixgbevf_reset(adapter); 649 650 hw_dbg(&adapter->hw, "register testing starting\n"); 651 if (ixgbevf_reg_test(adapter, &data[0])) 652 eth_test->flags |= ETH_TEST_FL_FAILED; 653 654 ixgbevf_reset(adapter); 655 656 clear_bit(__IXGBEVF_TESTING, &adapter->state); 657 if (if_running) 658 dev_open(netdev); 659 } else { 660 hw_dbg(&adapter->hw, "online testing starting\n"); 661 /* Online tests */ 662 if (ixgbevf_link_test(adapter, &data[1])) 663 eth_test->flags |= ETH_TEST_FL_FAILED; 664 665 /* Online tests aren't run; pass by default */ 666 data[0] = 0; 667 668 clear_bit(__IXGBEVF_TESTING, &adapter->state); 669 } 670 msleep_interruptible(4 * 1000); 671 } 672 673 static int ixgbevf_nway_reset(struct net_device *netdev) 674 { 675 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 676 677 if (netif_running(netdev)) { 678 if (!adapter->dev_closed) 679 ixgbevf_reinit_locked(adapter); 680 } 681 682 return 0; 683 } 684 685 static const struct ethtool_ops ixgbevf_ethtool_ops = { 686 .get_settings = ixgbevf_get_settings, 687 .get_drvinfo = ixgbevf_get_drvinfo, 688 .get_regs_len = ixgbevf_get_regs_len, 689 .get_regs = ixgbevf_get_regs, 690 .nway_reset = ixgbevf_nway_reset, 691 .get_link = ethtool_op_get_link, 692 .get_ringparam = ixgbevf_get_ringparam, 693 .set_ringparam = ixgbevf_set_ringparam, 694 .get_msglevel = ixgbevf_get_msglevel, 695 .set_msglevel = ixgbevf_set_msglevel, 696 .self_test = ixgbevf_diag_test, 697 .get_sset_count = ixgbevf_get_sset_count, 698 .get_strings = ixgbevf_get_strings, 699 .get_ethtool_stats = ixgbevf_get_ethtool_stats, 700 }; 701 702 void ixgbevf_set_ethtool_ops(struct net_device *netdev) 703 { 704 SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops); 705 } 706