1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 /* ethtool support for igc */ 5 #include <linux/if_vlan.h> 6 #include <linux/pm_runtime.h> 7 #include <linux/mdio.h> 8 9 #include "igc.h" 10 #include "igc_diag.h" 11 #include "igc_tsn.h" 12 13 /* forward declaration */ 14 struct igc_stats { 15 char stat_string[ETH_GSTRING_LEN]; 16 int sizeof_stat; 17 int stat_offset; 18 }; 19 20 #define IGC_STAT(_name, _stat) { \ 21 .stat_string = _name, \ 22 .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \ 23 .stat_offset = offsetof(struct igc_adapter, _stat) \ 24 } 25 26 static const struct igc_stats igc_gstrings_stats[] = { 27 IGC_STAT("rx_packets", stats.gprc), 28 IGC_STAT("tx_packets", stats.gptc), 29 IGC_STAT("rx_bytes", stats.gorc), 30 IGC_STAT("tx_bytes", stats.gotc), 31 IGC_STAT("rx_broadcast", stats.bprc), 32 IGC_STAT("tx_broadcast", stats.bptc), 33 IGC_STAT("rx_multicast", stats.mprc), 34 IGC_STAT("tx_multicast", stats.mptc), 35 IGC_STAT("multicast", stats.mprc), 36 IGC_STAT("collisions", stats.colc), 37 IGC_STAT("rx_crc_errors", stats.crcerrs), 38 IGC_STAT("rx_no_buffer_count", stats.rnbc), 39 IGC_STAT("rx_missed_errors", stats.mpc), 40 IGC_STAT("tx_aborted_errors", stats.ecol), 41 IGC_STAT("tx_carrier_errors", stats.tncrs), 42 IGC_STAT("tx_window_errors", stats.latecol), 43 IGC_STAT("tx_abort_late_coll", stats.latecol), 44 IGC_STAT("tx_deferred_ok", stats.dc), 45 IGC_STAT("tx_single_coll_ok", stats.scc), 46 IGC_STAT("tx_multi_coll_ok", stats.mcc), 47 IGC_STAT("tx_timeout_count", tx_timeout_count), 48 IGC_STAT("rx_long_length_errors", stats.roc), 49 IGC_STAT("rx_short_length_errors", stats.ruc), 50 IGC_STAT("rx_align_errors", stats.algnerrc), 51 IGC_STAT("tx_tcp_seg_good", stats.tsctc), 52 IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), 53 IGC_STAT("rx_flow_control_xon", stats.xonrxc), 54 IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), 55 IGC_STAT("tx_flow_control_xon", stats.xontxc), 56 IGC_STAT("tx_flow_control_xoff", stats.xofftxc), 57 IGC_STAT("rx_long_byte_count", stats.gorc), 58 IGC_STAT("tx_dma_out_of_sync", stats.doosync), 59 IGC_STAT("tx_smbus", stats.mgptc), 60 IGC_STAT("rx_smbus", stats.mgprc), 61 IGC_STAT("dropped_smbus", stats.mgpdc), 62 IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), 63 IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), 64 IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), 65 IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), 66 IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), 67 IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), 68 IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 69 IGC_STAT("tx_lpi_counter", stats.tlpic), 70 IGC_STAT("rx_lpi_counter", stats.rlpic), 71 IGC_STAT("qbv_config_change_errors", qbv_config_change_errors), 72 }; 73 74 #define IGC_NETDEV_STAT(_net_stat) { \ 75 .stat_string = __stringify(_net_stat), \ 76 .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ 77 .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ 78 } 79 80 static const struct igc_stats igc_gstrings_net_stats[] = { 81 IGC_NETDEV_STAT(rx_errors), 82 IGC_NETDEV_STAT(tx_errors), 83 IGC_NETDEV_STAT(tx_dropped), 84 IGC_NETDEV_STAT(rx_length_errors), 85 IGC_NETDEV_STAT(rx_over_errors), 86 IGC_NETDEV_STAT(rx_frame_errors), 87 IGC_NETDEV_STAT(rx_fifo_errors), 88 IGC_NETDEV_STAT(tx_fifo_errors), 89 IGC_NETDEV_STAT(tx_heartbeat_errors) 90 }; 91 92 enum igc_diagnostics_results { 93 TEST_REG = 0, 94 TEST_EEP, 95 TEST_IRQ, 96 TEST_LOOP, 97 TEST_LINK 98 }; 99 100 static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { 101 [TEST_REG] = "Register test (offline)", 102 [TEST_EEP] = "Eeprom test (offline)", 103 [TEST_IRQ] = "Interrupt test (offline)", 104 [TEST_LOOP] = "Loopback test (offline)", 105 [TEST_LINK] = "Link test (on/offline)" 106 }; 107 108 #define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) 109 110 #define IGC_GLOBAL_STATS_LEN \ 111 (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) 112 #define IGC_NETDEV_STATS_LEN \ 113 (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) 114 #define IGC_RX_QUEUE_STATS_LEN \ 115 (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) 116 #define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ 117 #define IGC_QUEUE_STATS_LEN \ 118 ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ 119 IGC_RX_QUEUE_STATS_LEN) + \ 120 (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ 121 IGC_TX_QUEUE_STATS_LEN)) 122 #define IGC_STATS_LEN \ 123 (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) 124 125 #define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) 126 #define IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO BIT(1) 127 static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { 128 "legacy-rx", 129 "reverse-tsn-txq-prio", 130 }; 131 132 #define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) 133 134 static void igc_ethtool_get_drvinfo(struct net_device *netdev, 135 struct ethtool_drvinfo *drvinfo) 136 { 137 struct igc_adapter *adapter = netdev_priv(netdev); 138 struct igc_hw *hw = &adapter->hw; 139 u16 nvm_version = 0; 140 u16 gphy_version; 141 142 strscpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); 143 144 /* NVM image version is reported as firmware version for i225 device */ 145 hw->nvm.ops.read(hw, IGC_NVM_DEV_STARTER, 1, &nvm_version); 146 147 /* gPHY firmware version is reported as PHY FW version */ 148 gphy_version = igc_read_phy_fw_version(hw); 149 150 scnprintf(adapter->fw_version, 151 sizeof(adapter->fw_version), 152 "%x:%x", 153 nvm_version, 154 gphy_version); 155 156 strscpy(drvinfo->fw_version, adapter->fw_version, 157 sizeof(drvinfo->fw_version)); 158 159 strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 160 sizeof(drvinfo->bus_info)); 161 162 drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; 163 } 164 165 static int igc_ethtool_get_regs_len(struct net_device *netdev) 166 { 167 return IGC_REGS_LEN * sizeof(u32); 168 } 169 170 static void igc_ethtool_get_regs(struct net_device *netdev, 171 struct ethtool_regs *regs, void *p) 172 { 173 struct igc_adapter *adapter = netdev_priv(netdev); 174 struct igc_hw *hw = &adapter->hw; 175 u32 *regs_buff = p; 176 u8 i; 177 178 memset(p, 0, IGC_REGS_LEN * sizeof(u32)); 179 180 regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id; 181 182 /* General Registers */ 183 regs_buff[0] = rd32(IGC_CTRL); 184 regs_buff[1] = rd32(IGC_STATUS); 185 regs_buff[2] = rd32(IGC_CTRL_EXT); 186 regs_buff[3] = rd32(IGC_MDIC); 187 regs_buff[4] = rd32(IGC_CONNSW); 188 189 /* NVM Register */ 190 regs_buff[5] = rd32(IGC_EECD); 191 192 /* Interrupt */ 193 /* Reading EICS for EICR because they read the 194 * same but EICS does not clear on read 195 */ 196 regs_buff[6] = rd32(IGC_EICS); 197 regs_buff[7] = rd32(IGC_EICS); 198 regs_buff[8] = rd32(IGC_EIMS); 199 regs_buff[9] = rd32(IGC_EIMC); 200 regs_buff[10] = rd32(IGC_EIAC); 201 regs_buff[11] = rd32(IGC_EIAM); 202 /* Reading ICS for ICR because they read the 203 * same but ICS does not clear on read 204 */ 205 regs_buff[12] = rd32(IGC_ICS); 206 regs_buff[13] = rd32(IGC_ICS); 207 regs_buff[14] = rd32(IGC_IMS); 208 regs_buff[15] = rd32(IGC_IMC); 209 regs_buff[16] = rd32(IGC_IAC); 210 regs_buff[17] = rd32(IGC_IAM); 211 212 /* Flow Control */ 213 regs_buff[18] = rd32(IGC_FCAL); 214 regs_buff[19] = rd32(IGC_FCAH); 215 regs_buff[20] = rd32(IGC_FCTTV); 216 regs_buff[21] = rd32(IGC_FCRTL); 217 regs_buff[22] = rd32(IGC_FCRTH); 218 regs_buff[23] = rd32(IGC_FCRTV); 219 220 /* Receive */ 221 regs_buff[24] = rd32(IGC_RCTL); 222 regs_buff[25] = rd32(IGC_RXCSUM); 223 regs_buff[26] = rd32(IGC_RLPML); 224 regs_buff[27] = rd32(IGC_RFCTL); 225 226 /* Transmit */ 227 regs_buff[28] = rd32(IGC_TCTL); 228 regs_buff[29] = rd32(IGC_TIPG); 229 230 /* Wake Up */ 231 232 /* MAC */ 233 234 /* Statistics */ 235 regs_buff[30] = adapter->stats.crcerrs; 236 regs_buff[31] = adapter->stats.algnerrc; 237 regs_buff[32] = adapter->stats.symerrs; 238 regs_buff[33] = adapter->stats.rxerrc; 239 regs_buff[34] = adapter->stats.mpc; 240 regs_buff[35] = adapter->stats.scc; 241 regs_buff[36] = adapter->stats.ecol; 242 regs_buff[37] = adapter->stats.mcc; 243 regs_buff[38] = adapter->stats.latecol; 244 regs_buff[39] = adapter->stats.colc; 245 regs_buff[40] = adapter->stats.dc; 246 regs_buff[41] = adapter->stats.tncrs; 247 regs_buff[42] = adapter->stats.sec; 248 regs_buff[43] = adapter->stats.htdpmc; 249 regs_buff[44] = adapter->stats.rlec; 250 regs_buff[45] = adapter->stats.xonrxc; 251 regs_buff[46] = adapter->stats.xontxc; 252 regs_buff[47] = adapter->stats.xoffrxc; 253 regs_buff[48] = adapter->stats.xofftxc; 254 regs_buff[49] = adapter->stats.fcruc; 255 regs_buff[50] = adapter->stats.prc64; 256 regs_buff[51] = adapter->stats.prc127; 257 regs_buff[52] = adapter->stats.prc255; 258 regs_buff[53] = adapter->stats.prc511; 259 regs_buff[54] = adapter->stats.prc1023; 260 regs_buff[55] = adapter->stats.prc1522; 261 regs_buff[56] = adapter->stats.gprc; 262 regs_buff[57] = adapter->stats.bprc; 263 regs_buff[58] = adapter->stats.mprc; 264 regs_buff[59] = adapter->stats.gptc; 265 regs_buff[60] = adapter->stats.gorc; 266 regs_buff[61] = adapter->stats.gotc; 267 regs_buff[62] = adapter->stats.rnbc; 268 regs_buff[63] = adapter->stats.ruc; 269 regs_buff[64] = adapter->stats.rfc; 270 regs_buff[65] = adapter->stats.roc; 271 regs_buff[66] = adapter->stats.rjc; 272 regs_buff[67] = adapter->stats.mgprc; 273 regs_buff[68] = adapter->stats.mgpdc; 274 regs_buff[69] = adapter->stats.mgptc; 275 regs_buff[70] = adapter->stats.tor; 276 regs_buff[71] = adapter->stats.tot; 277 regs_buff[72] = adapter->stats.tpr; 278 regs_buff[73] = adapter->stats.tpt; 279 regs_buff[74] = adapter->stats.ptc64; 280 regs_buff[75] = adapter->stats.ptc127; 281 regs_buff[76] = adapter->stats.ptc255; 282 regs_buff[77] = adapter->stats.ptc511; 283 regs_buff[78] = adapter->stats.ptc1023; 284 regs_buff[79] = adapter->stats.ptc1522; 285 regs_buff[80] = adapter->stats.mptc; 286 regs_buff[81] = adapter->stats.bptc; 287 regs_buff[82] = adapter->stats.tsctc; 288 regs_buff[83] = adapter->stats.iac; 289 regs_buff[84] = adapter->stats.rpthc; 290 regs_buff[85] = adapter->stats.hgptc; 291 regs_buff[86] = adapter->stats.hgorc; 292 regs_buff[87] = adapter->stats.hgotc; 293 regs_buff[88] = adapter->stats.lenerrs; 294 regs_buff[89] = adapter->stats.scvpc; 295 regs_buff[90] = adapter->stats.hrmpc; 296 297 for (i = 0; i < 4; i++) 298 regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); 299 for (i = 0; i < 4; i++) 300 regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); 301 for (i = 0; i < 4; i++) 302 regs_buff[99 + i] = rd32(IGC_RDBAL(i)); 303 for (i = 0; i < 4; i++) 304 regs_buff[103 + i] = rd32(IGC_RDBAH(i)); 305 for (i = 0; i < 4; i++) 306 regs_buff[107 + i] = rd32(IGC_RDLEN(i)); 307 for (i = 0; i < 4; i++) 308 regs_buff[111 + i] = rd32(IGC_RDH(i)); 309 for (i = 0; i < 4; i++) 310 regs_buff[115 + i] = rd32(IGC_RDT(i)); 311 for (i = 0; i < 4; i++) 312 regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); 313 314 for (i = 0; i < 10; i++) 315 regs_buff[123 + i] = rd32(IGC_EITR(i)); 316 for (i = 0; i < 16; i++) 317 regs_buff[139 + i] = rd32(IGC_RAL(i)); 318 for (i = 0; i < 16; i++) 319 regs_buff[145 + i] = rd32(IGC_RAH(i)); 320 321 for (i = 0; i < 4; i++) 322 regs_buff[149 + i] = rd32(IGC_TDBAL(i)); 323 for (i = 0; i < 4; i++) 324 regs_buff[152 + i] = rd32(IGC_TDBAH(i)); 325 for (i = 0; i < 4; i++) 326 regs_buff[156 + i] = rd32(IGC_TDLEN(i)); 327 for (i = 0; i < 4; i++) 328 regs_buff[160 + i] = rd32(IGC_TDH(i)); 329 for (i = 0; i < 4; i++) 330 regs_buff[164 + i] = rd32(IGC_TDT(i)); 331 for (i = 0; i < 4; i++) 332 regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); 333 334 /* XXX: Due to a bug few lines above, RAL and RAH registers are 335 * overwritten. To preserve the ABI, we write these registers again in 336 * regs_buff. 337 */ 338 for (i = 0; i < 16; i++) 339 regs_buff[172 + i] = rd32(IGC_RAL(i)); 340 for (i = 0; i < 16; i++) 341 regs_buff[188 + i] = rd32(IGC_RAH(i)); 342 343 regs_buff[204] = rd32(IGC_VLANPQF); 344 345 for (i = 0; i < 8; i++) 346 regs_buff[205 + i] = rd32(IGC_ETQF(i)); 347 348 regs_buff[213] = adapter->stats.tlpic; 349 regs_buff[214] = adapter->stats.rlpic; 350 } 351 352 static void igc_ethtool_get_wol(struct net_device *netdev, 353 struct ethtool_wolinfo *wol) 354 { 355 struct igc_adapter *adapter = netdev_priv(netdev); 356 357 wol->wolopts = 0; 358 359 if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) 360 return; 361 362 wol->supported = WAKE_UCAST | WAKE_MCAST | 363 WAKE_BCAST | WAKE_MAGIC | 364 WAKE_PHY; 365 366 /* apply any specific unsupported masks here */ 367 switch (adapter->hw.device_id) { 368 default: 369 break; 370 } 371 372 if (adapter->wol & IGC_WUFC_EX) 373 wol->wolopts |= WAKE_UCAST; 374 if (adapter->wol & IGC_WUFC_MC) 375 wol->wolopts |= WAKE_MCAST; 376 if (adapter->wol & IGC_WUFC_BC) 377 wol->wolopts |= WAKE_BCAST; 378 if (adapter->wol & IGC_WUFC_MAG) 379 wol->wolopts |= WAKE_MAGIC; 380 if (adapter->wol & IGC_WUFC_LNKC) 381 wol->wolopts |= WAKE_PHY; 382 } 383 384 static int igc_ethtool_set_wol(struct net_device *netdev, 385 struct ethtool_wolinfo *wol) 386 { 387 struct igc_adapter *adapter = netdev_priv(netdev); 388 389 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) 390 return -EOPNOTSUPP; 391 392 if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) 393 return wol->wolopts ? -EOPNOTSUPP : 0; 394 395 /* these settings will always override what we currently have */ 396 adapter->wol = 0; 397 398 if (wol->wolopts & WAKE_UCAST) 399 adapter->wol |= IGC_WUFC_EX; 400 if (wol->wolopts & WAKE_MCAST) 401 adapter->wol |= IGC_WUFC_MC; 402 if (wol->wolopts & WAKE_BCAST) 403 adapter->wol |= IGC_WUFC_BC; 404 if (wol->wolopts & WAKE_MAGIC) 405 adapter->wol |= IGC_WUFC_MAG; 406 if (wol->wolopts & WAKE_PHY) 407 adapter->wol |= IGC_WUFC_LNKC; 408 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 409 410 return 0; 411 } 412 413 static u32 igc_ethtool_get_msglevel(struct net_device *netdev) 414 { 415 struct igc_adapter *adapter = netdev_priv(netdev); 416 417 return adapter->msg_enable; 418 } 419 420 static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data) 421 { 422 struct igc_adapter *adapter = netdev_priv(netdev); 423 424 adapter->msg_enable = data; 425 } 426 427 static int igc_ethtool_nway_reset(struct net_device *netdev) 428 { 429 struct igc_adapter *adapter = netdev_priv(netdev); 430 431 if (netif_running(netdev)) 432 igc_reinit_locked(adapter); 433 return 0; 434 } 435 436 static u32 igc_ethtool_get_link(struct net_device *netdev) 437 { 438 struct igc_adapter *adapter = netdev_priv(netdev); 439 struct igc_mac_info *mac = &adapter->hw.mac; 440 441 /* If the link is not reported up to netdev, interrupts are disabled, 442 * and so the physical link state may have changed since we last 443 * looked. Set get_link_status to make sure that the true link 444 * state is interrogated, rather than pulling a cached and possibly 445 * stale link state from the driver. 446 */ 447 if (!netif_carrier_ok(netdev)) 448 mac->get_link_status = 1; 449 450 return igc_has_link(adapter); 451 } 452 453 static int igc_ethtool_get_eeprom_len(struct net_device *netdev) 454 { 455 struct igc_adapter *adapter = netdev_priv(netdev); 456 457 return adapter->hw.nvm.word_size * 2; 458 } 459 460 static int igc_ethtool_get_eeprom(struct net_device *netdev, 461 struct ethtool_eeprom *eeprom, u8 *bytes) 462 { 463 struct igc_adapter *adapter = netdev_priv(netdev); 464 struct igc_hw *hw = &adapter->hw; 465 int first_word, last_word; 466 u16 *eeprom_buff; 467 int ret_val = 0; 468 u16 i; 469 470 if (eeprom->len == 0) 471 return -EINVAL; 472 473 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 474 475 first_word = eeprom->offset >> 1; 476 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 477 478 eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), 479 GFP_KERNEL); 480 if (!eeprom_buff) 481 return -ENOMEM; 482 483 if (hw->nvm.type == igc_nvm_eeprom_spi) { 484 ret_val = hw->nvm.ops.read(hw, first_word, 485 last_word - first_word + 1, 486 eeprom_buff); 487 } else { 488 for (i = 0; i < last_word - first_word + 1; i++) { 489 ret_val = hw->nvm.ops.read(hw, first_word + i, 1, 490 &eeprom_buff[i]); 491 if (ret_val) 492 break; 493 } 494 } 495 496 /* Device's eeprom is always little-endian, word addressable */ 497 for (i = 0; i < last_word - first_word + 1; i++) 498 le16_to_cpus(&eeprom_buff[i]); 499 500 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), 501 eeprom->len); 502 kfree(eeprom_buff); 503 504 return ret_val; 505 } 506 507 static int igc_ethtool_set_eeprom(struct net_device *netdev, 508 struct ethtool_eeprom *eeprom, u8 *bytes) 509 { 510 struct igc_adapter *adapter = netdev_priv(netdev); 511 struct igc_hw *hw = &adapter->hw; 512 int max_len, first_word, last_word, ret_val = 0; 513 u16 *eeprom_buff; 514 void *ptr; 515 u16 i; 516 517 if (eeprom->len == 0) 518 return -EOPNOTSUPP; 519 520 if (hw->mac.type >= igc_i225 && 521 !igc_get_flash_presence_i225(hw)) { 522 return -EOPNOTSUPP; 523 } 524 525 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 526 return -EFAULT; 527 528 max_len = hw->nvm.word_size * 2; 529 530 first_word = eeprom->offset >> 1; 531 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 532 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 533 if (!eeprom_buff) 534 return -ENOMEM; 535 536 ptr = (void *)eeprom_buff; 537 538 if (eeprom->offset & 1) { 539 /* need read/modify/write of first changed EEPROM word 540 * only the second byte of the word is being modified 541 */ 542 ret_val = hw->nvm.ops.read(hw, first_word, 1, 543 &eeprom_buff[0]); 544 ptr++; 545 } 546 if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { 547 /* need read/modify/write of last changed EEPROM word 548 * only the first byte of the word is being modified 549 */ 550 ret_val = hw->nvm.ops.read(hw, last_word, 1, 551 &eeprom_buff[last_word - first_word]); 552 } 553 554 /* Device's eeprom is always little-endian, word addressable */ 555 for (i = 0; i < last_word - first_word + 1; i++) 556 le16_to_cpus(&eeprom_buff[i]); 557 558 memcpy(ptr, bytes, eeprom->len); 559 560 for (i = 0; i < last_word - first_word + 1; i++) 561 cpu_to_le16s(&eeprom_buff[i]); 562 563 ret_val = hw->nvm.ops.write(hw, first_word, 564 last_word - first_word + 1, eeprom_buff); 565 566 /* Update the checksum if nvm write succeeded */ 567 if (ret_val == 0) 568 hw->nvm.ops.update(hw); 569 570 kfree(eeprom_buff); 571 return ret_val; 572 } 573 574 static void 575 igc_ethtool_get_ringparam(struct net_device *netdev, 576 struct ethtool_ringparam *ring, 577 struct kernel_ethtool_ringparam *kernel_ering, 578 struct netlink_ext_ack *extack) 579 { 580 struct igc_adapter *adapter = netdev_priv(netdev); 581 582 ring->rx_max_pending = IGC_MAX_RXD; 583 ring->tx_max_pending = IGC_MAX_TXD; 584 ring->rx_pending = adapter->rx_ring_count; 585 ring->tx_pending = adapter->tx_ring_count; 586 } 587 588 static int 589 igc_ethtool_set_ringparam(struct net_device *netdev, 590 struct ethtool_ringparam *ring, 591 struct kernel_ethtool_ringparam *kernel_ering, 592 struct netlink_ext_ack *extack) 593 { 594 struct igc_adapter *adapter = netdev_priv(netdev); 595 struct igc_ring *temp_ring; 596 u16 new_rx_count, new_tx_count; 597 int i, err = 0; 598 599 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 600 return -EINVAL; 601 602 new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); 603 new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); 604 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 605 606 new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); 607 new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); 608 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 609 610 if (new_tx_count == adapter->tx_ring_count && 611 new_rx_count == adapter->rx_ring_count) { 612 /* nothing to do */ 613 return 0; 614 } 615 616 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 617 usleep_range(1000, 2000); 618 619 if (!netif_running(adapter->netdev)) { 620 for (i = 0; i < adapter->num_tx_queues; i++) 621 adapter->tx_ring[i]->count = new_tx_count; 622 for (i = 0; i < adapter->num_rx_queues; i++) 623 adapter->rx_ring[i]->count = new_rx_count; 624 adapter->tx_ring_count = new_tx_count; 625 adapter->rx_ring_count = new_rx_count; 626 goto clear_reset; 627 } 628 629 if (adapter->num_tx_queues > adapter->num_rx_queues) 630 temp_ring = vmalloc(array_size(sizeof(struct igc_ring), 631 adapter->num_tx_queues)); 632 else 633 temp_ring = vmalloc(array_size(sizeof(struct igc_ring), 634 adapter->num_rx_queues)); 635 636 if (!temp_ring) { 637 err = -ENOMEM; 638 goto clear_reset; 639 } 640 641 igc_down(adapter); 642 643 /* We can't just free everything and then setup again, 644 * because the ISRs in MSI-X mode get passed pointers 645 * to the Tx and Rx ring structs. 646 */ 647 if (new_tx_count != adapter->tx_ring_count) { 648 for (i = 0; i < adapter->num_tx_queues; i++) { 649 memcpy(&temp_ring[i], adapter->tx_ring[i], 650 sizeof(struct igc_ring)); 651 652 temp_ring[i].count = new_tx_count; 653 err = igc_setup_tx_resources(&temp_ring[i]); 654 if (err) { 655 while (i) { 656 i--; 657 igc_free_tx_resources(&temp_ring[i]); 658 } 659 goto err_setup; 660 } 661 } 662 663 for (i = 0; i < adapter->num_tx_queues; i++) { 664 igc_free_tx_resources(adapter->tx_ring[i]); 665 666 memcpy(adapter->tx_ring[i], &temp_ring[i], 667 sizeof(struct igc_ring)); 668 } 669 670 adapter->tx_ring_count = new_tx_count; 671 } 672 673 if (new_rx_count != adapter->rx_ring_count) { 674 for (i = 0; i < adapter->num_rx_queues; i++) { 675 memcpy(&temp_ring[i], adapter->rx_ring[i], 676 sizeof(struct igc_ring)); 677 678 temp_ring[i].count = new_rx_count; 679 err = igc_setup_rx_resources(&temp_ring[i]); 680 if (err) { 681 while (i) { 682 i--; 683 igc_free_rx_resources(&temp_ring[i]); 684 } 685 goto err_setup; 686 } 687 } 688 689 for (i = 0; i < adapter->num_rx_queues; i++) { 690 igc_free_rx_resources(adapter->rx_ring[i]); 691 692 memcpy(adapter->rx_ring[i], &temp_ring[i], 693 sizeof(struct igc_ring)); 694 } 695 696 adapter->rx_ring_count = new_rx_count; 697 } 698 err_setup: 699 igc_up(adapter); 700 vfree(temp_ring); 701 clear_reset: 702 clear_bit(__IGC_RESETTING, &adapter->state); 703 return err; 704 } 705 706 static void igc_ethtool_get_pauseparam(struct net_device *netdev, 707 struct ethtool_pauseparam *pause) 708 { 709 struct igc_adapter *adapter = netdev_priv(netdev); 710 struct igc_hw *hw = &adapter->hw; 711 712 pause->autoneg = 713 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 714 715 if (hw->fc.current_mode == igc_fc_rx_pause) { 716 pause->rx_pause = 1; 717 } else if (hw->fc.current_mode == igc_fc_tx_pause) { 718 pause->tx_pause = 1; 719 } else if (hw->fc.current_mode == igc_fc_full) { 720 pause->rx_pause = 1; 721 pause->tx_pause = 1; 722 } 723 } 724 725 static int igc_ethtool_set_pauseparam(struct net_device *netdev, 726 struct ethtool_pauseparam *pause) 727 { 728 struct igc_adapter *adapter = netdev_priv(netdev); 729 struct igc_hw *hw = &adapter->hw; 730 int retval = 0; 731 732 adapter->fc_autoneg = pause->autoneg; 733 734 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 735 usleep_range(1000, 2000); 736 737 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 738 hw->fc.requested_mode = igc_fc_default; 739 if (netif_running(adapter->netdev)) { 740 igc_down(adapter); 741 igc_up(adapter); 742 } else { 743 igc_reset(adapter); 744 } 745 } else { 746 if (pause->rx_pause && pause->tx_pause) 747 hw->fc.requested_mode = igc_fc_full; 748 else if (pause->rx_pause && !pause->tx_pause) 749 hw->fc.requested_mode = igc_fc_rx_pause; 750 else if (!pause->rx_pause && pause->tx_pause) 751 hw->fc.requested_mode = igc_fc_tx_pause; 752 else if (!pause->rx_pause && !pause->tx_pause) 753 hw->fc.requested_mode = igc_fc_none; 754 755 hw->fc.current_mode = hw->fc.requested_mode; 756 757 retval = ((hw->phy.media_type == igc_media_type_copper) ? 758 igc_force_mac_fc(hw) : igc_setup_link(hw)); 759 } 760 761 clear_bit(__IGC_RESETTING, &adapter->state); 762 return retval; 763 } 764 765 static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, 766 u8 *data) 767 { 768 struct igc_adapter *adapter = netdev_priv(netdev); 769 u8 *p = data; 770 int i; 771 772 switch (stringset) { 773 case ETH_SS_TEST: 774 memcpy(data, *igc_gstrings_test, 775 IGC_TEST_LEN * ETH_GSTRING_LEN); 776 break; 777 case ETH_SS_STATS: 778 for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) 779 ethtool_puts(&p, igc_gstrings_stats[i].stat_string); 780 for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) 781 ethtool_puts(&p, igc_gstrings_net_stats[i].stat_string); 782 for (i = 0; i < adapter->num_tx_queues; i++) { 783 ethtool_sprintf(&p, "tx_queue_%u_packets", i); 784 ethtool_sprintf(&p, "tx_queue_%u_bytes", i); 785 ethtool_sprintf(&p, "tx_queue_%u_restart", i); 786 } 787 for (i = 0; i < adapter->num_rx_queues; i++) { 788 ethtool_sprintf(&p, "rx_queue_%u_packets", i); 789 ethtool_sprintf(&p, "rx_queue_%u_bytes", i); 790 ethtool_sprintf(&p, "rx_queue_%u_drops", i); 791 ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); 792 ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); 793 } 794 /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ 795 break; 796 case ETH_SS_PRIV_FLAGS: 797 memcpy(data, igc_priv_flags_strings, 798 IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); 799 break; 800 } 801 } 802 803 static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset) 804 { 805 switch (sset) { 806 case ETH_SS_STATS: 807 return IGC_STATS_LEN; 808 case ETH_SS_TEST: 809 return IGC_TEST_LEN; 810 case ETH_SS_PRIV_FLAGS: 811 return IGC_PRIV_FLAGS_STR_LEN; 812 default: 813 return -ENOTSUPP; 814 } 815 } 816 817 static void igc_ethtool_get_stats(struct net_device *netdev, 818 struct ethtool_stats *stats, u64 *data) 819 { 820 struct igc_adapter *adapter = netdev_priv(netdev); 821 struct rtnl_link_stats64 *net_stats = &adapter->stats64; 822 unsigned int start; 823 struct igc_ring *ring; 824 int i, j; 825 char *p; 826 827 spin_lock(&adapter->stats64_lock); 828 igc_update_stats(adapter); 829 830 for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { 831 p = (char *)adapter + igc_gstrings_stats[i].stat_offset; 832 data[i] = (igc_gstrings_stats[i].sizeof_stat == 833 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 834 } 835 for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { 836 p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; 837 data[i] = (igc_gstrings_net_stats[j].sizeof_stat == 838 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 839 } 840 for (j = 0; j < adapter->num_tx_queues; j++) { 841 u64 restart2; 842 843 ring = adapter->tx_ring[j]; 844 do { 845 start = u64_stats_fetch_begin(&ring->tx_syncp); 846 data[i] = ring->tx_stats.packets; 847 data[i + 1] = ring->tx_stats.bytes; 848 data[i + 2] = ring->tx_stats.restart_queue; 849 } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); 850 do { 851 start = u64_stats_fetch_begin(&ring->tx_syncp2); 852 restart2 = ring->tx_stats.restart_queue2; 853 } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); 854 data[i + 2] += restart2; 855 856 i += IGC_TX_QUEUE_STATS_LEN; 857 } 858 for (j = 0; j < adapter->num_rx_queues; j++) { 859 ring = adapter->rx_ring[j]; 860 do { 861 start = u64_stats_fetch_begin(&ring->rx_syncp); 862 data[i] = ring->rx_stats.packets; 863 data[i + 1] = ring->rx_stats.bytes; 864 data[i + 2] = ring->rx_stats.drops; 865 data[i + 3] = ring->rx_stats.csum_err; 866 data[i + 4] = ring->rx_stats.alloc_failed; 867 } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); 868 i += IGC_RX_QUEUE_STATS_LEN; 869 } 870 spin_unlock(&adapter->stats64_lock); 871 } 872 873 static int igc_ethtool_get_previous_rx_coalesce(struct igc_adapter *adapter) 874 { 875 return (adapter->rx_itr_setting <= 3) ? 876 adapter->rx_itr_setting : adapter->rx_itr_setting >> 2; 877 } 878 879 static int igc_ethtool_get_previous_tx_coalesce(struct igc_adapter *adapter) 880 { 881 return (adapter->tx_itr_setting <= 3) ? 882 adapter->tx_itr_setting : adapter->tx_itr_setting >> 2; 883 } 884 885 static int igc_ethtool_get_coalesce(struct net_device *netdev, 886 struct ethtool_coalesce *ec, 887 struct kernel_ethtool_coalesce *kernel_coal, 888 struct netlink_ext_ack *extack) 889 { 890 struct igc_adapter *adapter = netdev_priv(netdev); 891 892 ec->rx_coalesce_usecs = igc_ethtool_get_previous_rx_coalesce(adapter); 893 ec->tx_coalesce_usecs = igc_ethtool_get_previous_tx_coalesce(adapter); 894 895 return 0; 896 } 897 898 static int igc_ethtool_set_coalesce(struct net_device *netdev, 899 struct ethtool_coalesce *ec, 900 struct kernel_ethtool_coalesce *kernel_coal, 901 struct netlink_ext_ack *extack) 902 { 903 struct igc_adapter *adapter = netdev_priv(netdev); 904 int i; 905 906 if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || 907 (ec->rx_coalesce_usecs > 3 && 908 ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || 909 ec->rx_coalesce_usecs == 2) 910 return -EINVAL; 911 912 if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || 913 (ec->tx_coalesce_usecs > 3 && 914 ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || 915 ec->tx_coalesce_usecs == 2) 916 return -EINVAL; 917 918 if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && 919 ec->tx_coalesce_usecs != igc_ethtool_get_previous_tx_coalesce(adapter)) { 920 NL_SET_ERR_MSG_MOD(extack, 921 "Queue Pair mode enabled, both Rx and Tx coalescing controlled by rx-usecs"); 922 return -EINVAL; 923 } 924 925 /* If ITR is disabled, disable DMAC */ 926 if (ec->rx_coalesce_usecs == 0) { 927 if (adapter->flags & IGC_FLAG_DMAC) 928 adapter->flags &= ~IGC_FLAG_DMAC; 929 } 930 931 /* convert to rate of irq's per second */ 932 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) 933 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 934 else 935 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 936 937 /* convert to rate of irq's per second */ 938 if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) 939 adapter->tx_itr_setting = adapter->rx_itr_setting; 940 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) 941 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 942 else 943 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 944 945 for (i = 0; i < adapter->num_q_vectors; i++) { 946 struct igc_q_vector *q_vector = adapter->q_vector[i]; 947 948 q_vector->tx.work_limit = adapter->tx_work_limit; 949 if (q_vector->rx.ring) 950 q_vector->itr_val = adapter->rx_itr_setting; 951 else 952 q_vector->itr_val = adapter->tx_itr_setting; 953 if (q_vector->itr_val && q_vector->itr_val <= 3) 954 q_vector->itr_val = IGC_START_ITR; 955 q_vector->set_itr = 1; 956 } 957 958 return 0; 959 } 960 961 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) 962 #define VLAN_TCI_FULL_MASK ((__force __be16)~0) 963 static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, 964 struct ethtool_rxnfc *cmd) 965 { 966 struct ethtool_rx_flow_spec *fsp = &cmd->fs; 967 struct igc_nfc_rule *rule = NULL; 968 969 cmd->data = IGC_MAX_RXNFC_RULES; 970 971 mutex_lock(&adapter->nfc_rule_lock); 972 973 rule = igc_get_nfc_rule(adapter, fsp->location); 974 if (!rule) 975 goto out; 976 977 fsp->flow_type = ETHER_FLOW; 978 fsp->ring_cookie = rule->action; 979 980 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 981 fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype); 982 fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; 983 } 984 985 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { 986 fsp->flow_type |= FLOW_EXT; 987 fsp->h_ext.vlan_etype = htons(rule->filter.vlan_etype); 988 fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK; 989 } 990 991 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 992 fsp->flow_type |= FLOW_EXT; 993 fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); 994 fsp->m_ext.vlan_tci = htons(rule->filter.vlan_tci_mask); 995 } 996 997 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { 998 ether_addr_copy(fsp->h_u.ether_spec.h_dest, 999 rule->filter.dst_addr); 1000 eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); 1001 } 1002 1003 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { 1004 ether_addr_copy(fsp->h_u.ether_spec.h_source, 1005 rule->filter.src_addr); 1006 eth_broadcast_addr(fsp->m_u.ether_spec.h_source); 1007 } 1008 1009 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) { 1010 fsp->flow_type |= FLOW_EXT; 1011 memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data)); 1012 memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data)); 1013 } 1014 1015 mutex_unlock(&adapter->nfc_rule_lock); 1016 return 0; 1017 1018 out: 1019 mutex_unlock(&adapter->nfc_rule_lock); 1020 return -EINVAL; 1021 } 1022 1023 static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter, 1024 struct ethtool_rxnfc *cmd, 1025 u32 *rule_locs) 1026 { 1027 struct igc_nfc_rule *rule; 1028 int cnt = 0; 1029 1030 cmd->data = IGC_MAX_RXNFC_RULES; 1031 1032 mutex_lock(&adapter->nfc_rule_lock); 1033 1034 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { 1035 if (cnt == cmd->rule_cnt) { 1036 mutex_unlock(&adapter->nfc_rule_lock); 1037 return -EMSGSIZE; 1038 } 1039 rule_locs[cnt] = rule->location; 1040 cnt++; 1041 } 1042 1043 mutex_unlock(&adapter->nfc_rule_lock); 1044 1045 cmd->rule_cnt = cnt; 1046 1047 return 0; 1048 } 1049 1050 static int igc_ethtool_get_rxfh_fields(struct net_device *dev, 1051 struct ethtool_rxfh_fields *cmd) 1052 { 1053 struct igc_adapter *adapter = netdev_priv(dev); 1054 1055 cmd->data = 0; 1056 1057 /* Report default options for RSS on igc */ 1058 switch (cmd->flow_type) { 1059 case TCP_V4_FLOW: 1060 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1061 fallthrough; 1062 case UDP_V4_FLOW: 1063 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) 1064 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1065 fallthrough; 1066 case SCTP_V4_FLOW: 1067 case AH_ESP_V4_FLOW: 1068 case AH_V4_FLOW: 1069 case ESP_V4_FLOW: 1070 case IPV4_FLOW: 1071 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 1072 break; 1073 case TCP_V6_FLOW: 1074 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1075 fallthrough; 1076 case UDP_V6_FLOW: 1077 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) 1078 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1079 fallthrough; 1080 case SCTP_V6_FLOW: 1081 case AH_ESP_V6_FLOW: 1082 case AH_V6_FLOW: 1083 case ESP_V6_FLOW: 1084 case IPV6_FLOW: 1085 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 1086 break; 1087 default: 1088 return -EINVAL; 1089 } 1090 1091 return 0; 1092 } 1093 1094 static int igc_ethtool_get_rxnfc(struct net_device *dev, 1095 struct ethtool_rxnfc *cmd, u32 *rule_locs) 1096 { 1097 struct igc_adapter *adapter = netdev_priv(dev); 1098 1099 switch (cmd->cmd) { 1100 case ETHTOOL_GRXRINGS: 1101 cmd->data = adapter->num_rx_queues; 1102 return 0; 1103 case ETHTOOL_GRXCLSRLCNT: 1104 cmd->rule_cnt = adapter->nfc_rule_count; 1105 return 0; 1106 case ETHTOOL_GRXCLSRULE: 1107 return igc_ethtool_get_nfc_rule(adapter, cmd); 1108 case ETHTOOL_GRXCLSRLALL: 1109 return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs); 1110 default: 1111 return -EOPNOTSUPP; 1112 } 1113 } 1114 1115 #define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ 1116 IGC_FLAG_RSS_FIELD_IPV6_UDP) 1117 static int igc_ethtool_set_rxfh_fields(struct net_device *dev, 1118 const struct ethtool_rxfh_fields *nfc, 1119 struct netlink_ext_ack *extack) 1120 { 1121 struct igc_adapter *adapter = netdev_priv(dev); 1122 u32 flags = adapter->flags; 1123 1124 /* RSS does not support anything other than hashing 1125 * to queues on src and dst IPs and ports 1126 */ 1127 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 1128 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 1129 return -EINVAL; 1130 1131 switch (nfc->flow_type) { 1132 case TCP_V4_FLOW: 1133 case TCP_V6_FLOW: 1134 if (!(nfc->data & RXH_IP_SRC) || 1135 !(nfc->data & RXH_IP_DST) || 1136 !(nfc->data & RXH_L4_B_0_1) || 1137 !(nfc->data & RXH_L4_B_2_3)) 1138 return -EINVAL; 1139 break; 1140 case UDP_V4_FLOW: 1141 if (!(nfc->data & RXH_IP_SRC) || 1142 !(nfc->data & RXH_IP_DST)) 1143 return -EINVAL; 1144 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1145 case 0: 1146 flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; 1147 break; 1148 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1149 flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; 1150 break; 1151 default: 1152 return -EINVAL; 1153 } 1154 break; 1155 case UDP_V6_FLOW: 1156 if (!(nfc->data & RXH_IP_SRC) || 1157 !(nfc->data & RXH_IP_DST)) 1158 return -EINVAL; 1159 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1160 case 0: 1161 flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; 1162 break; 1163 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1164 flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; 1165 break; 1166 default: 1167 return -EINVAL; 1168 } 1169 break; 1170 case AH_ESP_V4_FLOW: 1171 case AH_V4_FLOW: 1172 case ESP_V4_FLOW: 1173 case SCTP_V4_FLOW: 1174 case AH_ESP_V6_FLOW: 1175 case AH_V6_FLOW: 1176 case ESP_V6_FLOW: 1177 case SCTP_V6_FLOW: 1178 if (!(nfc->data & RXH_IP_SRC) || 1179 !(nfc->data & RXH_IP_DST) || 1180 (nfc->data & RXH_L4_B_0_1) || 1181 (nfc->data & RXH_L4_B_2_3)) 1182 return -EINVAL; 1183 break; 1184 default: 1185 return -EINVAL; 1186 } 1187 1188 /* if we changed something we need to update flags */ 1189 if (flags != adapter->flags) { 1190 struct igc_hw *hw = &adapter->hw; 1191 u32 mrqc = rd32(IGC_MRQC); 1192 1193 if ((flags & UDP_RSS_FLAGS) && 1194 !(adapter->flags & UDP_RSS_FLAGS)) 1195 netdev_err(adapter->netdev, 1196 "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); 1197 1198 adapter->flags = flags; 1199 1200 /* Perform hash on these packet types */ 1201 mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | 1202 IGC_MRQC_RSS_FIELD_IPV4_TCP | 1203 IGC_MRQC_RSS_FIELD_IPV6 | 1204 IGC_MRQC_RSS_FIELD_IPV6_TCP; 1205 1206 mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | 1207 IGC_MRQC_RSS_FIELD_IPV6_UDP); 1208 1209 if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) 1210 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; 1211 1212 if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) 1213 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; 1214 1215 wr32(IGC_MRQC, mrqc); 1216 } 1217 1218 return 0; 1219 } 1220 1221 static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, 1222 const struct ethtool_rx_flow_spec *fsp) 1223 { 1224 INIT_LIST_HEAD(&rule->list); 1225 1226 rule->action = fsp->ring_cookie; 1227 rule->location = fsp->location; 1228 1229 if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { 1230 rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); 1231 rule->filter.vlan_tci_mask = ntohs(fsp->m_ext.vlan_tci); 1232 rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; 1233 } 1234 1235 if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { 1236 rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto); 1237 rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; 1238 } 1239 1240 /* Both source and destination address filters only support the full 1241 * mask. 1242 */ 1243 if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { 1244 rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; 1245 ether_addr_copy(rule->filter.src_addr, 1246 fsp->h_u.ether_spec.h_source); 1247 } 1248 1249 if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { 1250 rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; 1251 ether_addr_copy(rule->filter.dst_addr, 1252 fsp->h_u.ether_spec.h_dest); 1253 } 1254 1255 /* VLAN etype matching */ 1256 if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) { 1257 rule->filter.vlan_etype = ntohs(fsp->h_ext.vlan_etype); 1258 rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE; 1259 } 1260 1261 /* Check for user defined data */ 1262 if ((fsp->flow_type & FLOW_EXT) && 1263 (fsp->h_ext.data[0] || fsp->h_ext.data[1])) { 1264 rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA; 1265 memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data)); 1266 memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); 1267 } 1268 1269 /* The i225/i226 has various different filters. Flex filters provide a 1270 * way to match up to the first 128 bytes of a packet. Use them for: 1271 * a) For specific user data 1272 * b) For VLAN EtherType 1273 * c) For full TCI match 1274 * d) Or in case multiple filter criteria are set 1275 * 1276 * Otherwise, use the simple MAC, VLAN PRIO or EtherType filters. 1277 */ 1278 if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || 1279 (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || 1280 ((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) && 1281 rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) || 1282 (rule->filter.match_flags & (rule->filter.match_flags - 1))) 1283 rule->flex = true; 1284 else 1285 rule->flex = false; 1286 } 1287 1288 /** 1289 * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid 1290 * @adapter: Pointer to adapter 1291 * @rule: Rule under evaluation 1292 * 1293 * The driver doesn't support rules with multiple matches so if more than 1294 * one bit in filter flags is set, @rule is considered invalid. 1295 * 1296 * Also, if there is already another rule with the same filter in a different 1297 * location, @rule is considered invalid. 1298 * 1299 * Context: Expects adapter->nfc_rule_lock to be held by caller. 1300 * 1301 * Return: 0 in case of success, negative errno code otherwise. 1302 */ 1303 static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, 1304 struct igc_nfc_rule *rule) 1305 { 1306 struct net_device *dev = adapter->netdev; 1307 u8 flags = rule->filter.match_flags; 1308 struct igc_nfc_rule *tmp; 1309 1310 if (!flags) { 1311 netdev_dbg(dev, "Rule with no match\n"); 1312 return -EINVAL; 1313 } 1314 1315 list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { 1316 if (!memcmp(&rule->filter, &tmp->filter, 1317 sizeof(rule->filter)) && 1318 tmp->location != rule->location) { 1319 netdev_dbg(dev, "Rule already exists\n"); 1320 return -EEXIST; 1321 } 1322 } 1323 1324 return 0; 1325 } 1326 1327 static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, 1328 struct ethtool_rxnfc *cmd) 1329 { 1330 struct net_device *netdev = adapter->netdev; 1331 struct ethtool_rx_flow_spec *fsp = 1332 (struct ethtool_rx_flow_spec *)&cmd->fs; 1333 struct igc_nfc_rule *rule, *old_rule; 1334 int err; 1335 1336 if (!(netdev->hw_features & NETIF_F_NTUPLE)) { 1337 netdev_dbg(netdev, "N-tuple filters disabled\n"); 1338 return -EOPNOTSUPP; 1339 } 1340 1341 if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) { 1342 netdev_dbg(netdev, "Only ethernet flow type is supported\n"); 1343 return -EOPNOTSUPP; 1344 } 1345 1346 if (fsp->ring_cookie >= adapter->num_rx_queues) { 1347 netdev_dbg(netdev, "Invalid action\n"); 1348 return -EINVAL; 1349 } 1350 1351 /* There are two ways to match the VLAN TCI: 1352 * 1. Match on PCP field and use vlan prio filter for it 1353 * 2. Match on complete TCI field and use flex filter for it 1354 */ 1355 if ((fsp->flow_type & FLOW_EXT) && 1356 fsp->m_ext.vlan_tci && 1357 fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) && 1358 fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) { 1359 netdev_dbg(netdev, "VLAN mask not supported\n"); 1360 return -EOPNOTSUPP; 1361 } 1362 1363 /* VLAN EtherType can only be matched by full mask. */ 1364 if ((fsp->flow_type & FLOW_EXT) && 1365 fsp->m_ext.vlan_etype && 1366 fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) { 1367 netdev_dbg(netdev, "VLAN EtherType mask not supported\n"); 1368 return -EOPNOTSUPP; 1369 } 1370 1371 if (fsp->location >= IGC_MAX_RXNFC_RULES) { 1372 netdev_dbg(netdev, "Invalid location\n"); 1373 return -EINVAL; 1374 } 1375 1376 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1377 if (!rule) 1378 return -ENOMEM; 1379 1380 igc_ethtool_init_nfc_rule(rule, fsp); 1381 1382 mutex_lock(&adapter->nfc_rule_lock); 1383 1384 err = igc_ethtool_check_nfc_rule(adapter, rule); 1385 if (err) 1386 goto err; 1387 1388 old_rule = igc_get_nfc_rule(adapter, fsp->location); 1389 if (old_rule) 1390 igc_del_nfc_rule(adapter, old_rule); 1391 1392 err = igc_add_nfc_rule(adapter, rule); 1393 if (err) 1394 goto err; 1395 1396 mutex_unlock(&adapter->nfc_rule_lock); 1397 return 0; 1398 1399 err: 1400 mutex_unlock(&adapter->nfc_rule_lock); 1401 kfree(rule); 1402 return err; 1403 } 1404 1405 static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter, 1406 struct ethtool_rxnfc *cmd) 1407 { 1408 struct ethtool_rx_flow_spec *fsp = 1409 (struct ethtool_rx_flow_spec *)&cmd->fs; 1410 struct igc_nfc_rule *rule; 1411 1412 mutex_lock(&adapter->nfc_rule_lock); 1413 1414 rule = igc_get_nfc_rule(adapter, fsp->location); 1415 if (!rule) { 1416 mutex_unlock(&adapter->nfc_rule_lock); 1417 return -EINVAL; 1418 } 1419 1420 igc_del_nfc_rule(adapter, rule); 1421 1422 mutex_unlock(&adapter->nfc_rule_lock); 1423 return 0; 1424 } 1425 1426 static int igc_ethtool_set_rxnfc(struct net_device *dev, 1427 struct ethtool_rxnfc *cmd) 1428 { 1429 struct igc_adapter *adapter = netdev_priv(dev); 1430 1431 switch (cmd->cmd) { 1432 case ETHTOOL_SRXCLSRLINS: 1433 return igc_ethtool_add_nfc_rule(adapter, cmd); 1434 case ETHTOOL_SRXCLSRLDEL: 1435 return igc_ethtool_del_nfc_rule(adapter, cmd); 1436 default: 1437 return -EOPNOTSUPP; 1438 } 1439 } 1440 1441 void igc_write_rss_indir_tbl(struct igc_adapter *adapter) 1442 { 1443 struct igc_hw *hw = &adapter->hw; 1444 u32 reg = IGC_RETA(0); 1445 u32 shift = 0; 1446 int i = 0; 1447 1448 while (i < IGC_RETA_SIZE) { 1449 u32 val = 0; 1450 int j; 1451 1452 for (j = 3; j >= 0; j--) { 1453 val <<= 8; 1454 val |= adapter->rss_indir_tbl[i + j]; 1455 } 1456 1457 wr32(reg, val << shift); 1458 reg += 4; 1459 i += 4; 1460 } 1461 } 1462 1463 static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) 1464 { 1465 return IGC_RETA_SIZE; 1466 } 1467 1468 static int igc_ethtool_get_rxfh(struct net_device *netdev, 1469 struct ethtool_rxfh_param *rxfh) 1470 { 1471 struct igc_adapter *adapter = netdev_priv(netdev); 1472 int i; 1473 1474 rxfh->hfunc = ETH_RSS_HASH_TOP; 1475 if (!rxfh->indir) 1476 return 0; 1477 for (i = 0; i < IGC_RETA_SIZE; i++) 1478 rxfh->indir[i] = adapter->rss_indir_tbl[i]; 1479 1480 return 0; 1481 } 1482 1483 static int igc_ethtool_set_rxfh(struct net_device *netdev, 1484 struct ethtool_rxfh_param *rxfh, 1485 struct netlink_ext_ack *extack) 1486 { 1487 struct igc_adapter *adapter = netdev_priv(netdev); 1488 u32 num_queues; 1489 int i; 1490 1491 /* We do not allow change in unsupported parameters */ 1492 if (rxfh->key || 1493 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 1494 rxfh->hfunc != ETH_RSS_HASH_TOP)) 1495 return -EOPNOTSUPP; 1496 if (!rxfh->indir) 1497 return 0; 1498 1499 num_queues = adapter->rss_queues; 1500 1501 /* Verify user input. */ 1502 for (i = 0; i < IGC_RETA_SIZE; i++) 1503 if (rxfh->indir[i] >= num_queues) 1504 return -EINVAL; 1505 1506 for (i = 0; i < IGC_RETA_SIZE; i++) 1507 adapter->rss_indir_tbl[i] = rxfh->indir[i]; 1508 1509 igc_write_rss_indir_tbl(adapter); 1510 1511 return 0; 1512 } 1513 1514 static void igc_ethtool_get_channels(struct net_device *netdev, 1515 struct ethtool_channels *ch) 1516 { 1517 struct igc_adapter *adapter = netdev_priv(netdev); 1518 1519 /* Report maximum channels */ 1520 ch->max_combined = igc_get_max_rss_queues(adapter); 1521 1522 /* Report info for other vector */ 1523 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 1524 ch->max_other = NON_Q_VECTORS; 1525 ch->other_count = NON_Q_VECTORS; 1526 } 1527 1528 ch->combined_count = adapter->rss_queues; 1529 } 1530 1531 static int igc_ethtool_set_channels(struct net_device *netdev, 1532 struct ethtool_channels *ch) 1533 { 1534 struct igc_adapter *adapter = netdev_priv(netdev); 1535 unsigned int count = ch->combined_count; 1536 unsigned int max_combined = 0; 1537 1538 /* Verify they are not requesting separate vectors */ 1539 if (!count || ch->rx_count || ch->tx_count) 1540 return -EINVAL; 1541 1542 /* Verify other_count is valid and has not been changed */ 1543 if (ch->other_count != NON_Q_VECTORS) 1544 return -EINVAL; 1545 1546 /* Do not allow channel reconfiguration when mqprio is enabled */ 1547 if (adapter->strict_priority_enable) 1548 return -EINVAL; 1549 1550 /* Verify the number of channels doesn't exceed hw limits */ 1551 max_combined = igc_get_max_rss_queues(adapter); 1552 if (count > max_combined) 1553 return -EINVAL; 1554 1555 if (count != adapter->rss_queues) { 1556 adapter->rss_queues = count; 1557 igc_set_flag_queue_pairs(adapter, max_combined); 1558 1559 /* Hardware has to reinitialize queues and interrupts to 1560 * match the new configuration. 1561 */ 1562 return igc_reinit_queues(adapter); 1563 } 1564 1565 return 0; 1566 } 1567 1568 static int igc_ethtool_get_ts_info(struct net_device *dev, 1569 struct kernel_ethtool_ts_info *info) 1570 { 1571 struct igc_adapter *adapter = netdev_priv(dev); 1572 1573 if (adapter->ptp_clock) 1574 info->phc_index = ptp_clock_index(adapter->ptp_clock); 1575 1576 switch (adapter->hw.mac.type) { 1577 case igc_i225: 1578 info->so_timestamping = 1579 SOF_TIMESTAMPING_TX_SOFTWARE | 1580 SOF_TIMESTAMPING_TX_HARDWARE | 1581 SOF_TIMESTAMPING_RX_HARDWARE | 1582 SOF_TIMESTAMPING_RAW_HARDWARE; 1583 1584 info->tx_types = 1585 BIT(HWTSTAMP_TX_OFF) | 1586 BIT(HWTSTAMP_TX_ON); 1587 1588 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); 1589 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); 1590 1591 return 0; 1592 default: 1593 return -EOPNOTSUPP; 1594 } 1595 } 1596 1597 static u32 igc_ethtool_get_priv_flags(struct net_device *netdev) 1598 { 1599 struct igc_adapter *adapter = netdev_priv(netdev); 1600 u32 priv_flags = 0; 1601 1602 if (adapter->flags & IGC_FLAG_RX_LEGACY) 1603 priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; 1604 1605 if (adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO) 1606 priv_flags |= IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO; 1607 1608 return priv_flags; 1609 } 1610 1611 static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) 1612 { 1613 struct igc_adapter *adapter = netdev_priv(netdev); 1614 unsigned int flags = adapter->flags; 1615 1616 flags &= ~(IGC_FLAG_RX_LEGACY | IGC_FLAG_TSN_REVERSE_TXQ_PRIO); 1617 if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) 1618 flags |= IGC_FLAG_RX_LEGACY; 1619 1620 if (priv_flags & IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO) 1621 flags |= IGC_FLAG_TSN_REVERSE_TXQ_PRIO; 1622 1623 if (flags != adapter->flags) { 1624 adapter->flags = flags; 1625 1626 /* reset interface to repopulate queues */ 1627 if (netif_running(netdev)) 1628 igc_reinit_locked(adapter); 1629 } 1630 1631 return 0; 1632 } 1633 1634 static int igc_ethtool_get_eee(struct net_device *netdev, 1635 struct ethtool_keee *edata) 1636 { 1637 struct igc_adapter *adapter = netdev_priv(netdev); 1638 struct igc_hw *hw = &adapter->hw; 1639 struct igc_phy_info *phy = &hw->phy; 1640 u16 eee_advert, eee_lp_advert; 1641 u32 eeer, ret_val; 1642 1643 /* EEE supported */ 1644 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, 1645 edata->supported); 1646 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1647 edata->supported); 1648 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1649 edata->supported); 1650 1651 /* EEE Advertisement 1 - reg 7.60 */ 1652 ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << 1653 MMD_DEVADDR_SHIFT) | 1654 IGC_ANEG_EEE_AB1, 1655 &eee_advert); 1656 if (ret_val) { 1657 netdev_err(adapter->netdev, 1658 "Failed to read IEEE 7.60 register\n"); 1659 return -EINVAL; 1660 } 1661 1662 if (eee_advert & IGC_EEE_1000BT_MASK) 1663 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1664 edata->advertised); 1665 1666 if (eee_advert & IGC_EEE_100BT_MASK) 1667 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1668 edata->advertised); 1669 1670 /* EEE Advertisement 2 - reg 7.62 */ 1671 ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << 1672 MMD_DEVADDR_SHIFT) | 1673 IGC_ANEG_EEE_AB2, 1674 &eee_advert); 1675 if (ret_val) { 1676 netdev_err(adapter->netdev, 1677 "Failed to read IEEE 7.62 register\n"); 1678 return -EINVAL; 1679 } 1680 1681 if (eee_advert & IGC_EEE_2500BT_MASK) 1682 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, 1683 edata->advertised); 1684 1685 /* EEE Link-Partner Ability 1 - reg 7.61 */ 1686 ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << 1687 MMD_DEVADDR_SHIFT) | 1688 IGC_ANEG_EEE_LP_AB1, 1689 &eee_lp_advert); 1690 if (ret_val) { 1691 netdev_err(adapter->netdev, 1692 "Failed to read IEEE 7.61 register\n"); 1693 return -EINVAL; 1694 } 1695 1696 if (eee_lp_advert & IGC_LP_EEE_1000BT_MASK) 1697 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1698 edata->lp_advertised); 1699 1700 if (eee_lp_advert & IGC_LP_EEE_100BT_MASK) 1701 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1702 edata->lp_advertised); 1703 1704 /* EEE Link-Partner Ability 2 - reg 7.63 */ 1705 ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << 1706 MMD_DEVADDR_SHIFT) | 1707 IGC_ANEG_EEE_LP_AB2, 1708 &eee_lp_advert); 1709 if (ret_val) { 1710 netdev_err(adapter->netdev, 1711 "Failed to read IEEE 7.63 register\n"); 1712 return -EINVAL; 1713 } 1714 1715 if (eee_lp_advert & IGC_LP_EEE_2500BT_MASK) 1716 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, 1717 edata->lp_advertised); 1718 1719 eeer = rd32(IGC_EEER); 1720 1721 /* EEE status on negotiated link */ 1722 if (eeer & IGC_EEER_EEE_NEG) 1723 edata->eee_active = true; 1724 1725 if (eeer & IGC_EEER_TX_LPI_EN) 1726 edata->tx_lpi_enabled = true; 1727 1728 edata->eee_enabled = hw->dev_spec._base.eee_enable; 1729 1730 /* Report correct negotiated EEE status for devices that 1731 * wrongly report EEE at half-duplex 1732 */ 1733 if (adapter->link_duplex == HALF_DUPLEX) { 1734 edata->eee_enabled = false; 1735 edata->eee_active = false; 1736 edata->tx_lpi_enabled = false; 1737 linkmode_zero(edata->advertised); 1738 } 1739 1740 return 0; 1741 } 1742 1743 static int igc_ethtool_set_eee(struct net_device *netdev, 1744 struct ethtool_keee *edata) 1745 { 1746 struct igc_adapter *adapter = netdev_priv(netdev); 1747 struct igc_hw *hw = &adapter->hw; 1748 struct ethtool_keee eee_curr; 1749 s32 ret_val; 1750 1751 memset(&eee_curr, 0, sizeof(struct ethtool_keee)); 1752 1753 ret_val = igc_ethtool_get_eee(netdev, &eee_curr); 1754 if (ret_val) { 1755 netdev_err(netdev, 1756 "Problem setting EEE advertisement options\n"); 1757 return -EINVAL; 1758 } 1759 1760 if (eee_curr.eee_enabled) { 1761 if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { 1762 netdev_err(netdev, 1763 "Setting EEE tx-lpi is not supported\n"); 1764 return -EINVAL; 1765 } 1766 1767 /* Tx LPI timer is not implemented currently */ 1768 if (edata->tx_lpi_timer) { 1769 netdev_err(netdev, 1770 "Setting EEE Tx LPI timer is not supported\n"); 1771 return -EINVAL; 1772 } 1773 } else if (!edata->eee_enabled) { 1774 netdev_err(netdev, 1775 "Setting EEE options are not supported with EEE disabled\n"); 1776 return -EINVAL; 1777 } 1778 1779 if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { 1780 hw->dev_spec._base.eee_enable = edata->eee_enabled; 1781 adapter->flags |= IGC_FLAG_EEE; 1782 1783 /* reset link */ 1784 if (netif_running(netdev)) 1785 igc_reinit_locked(adapter); 1786 else 1787 igc_reset(adapter); 1788 } 1789 1790 return 0; 1791 } 1792 1793 static int igc_ethtool_get_mm(struct net_device *netdev, 1794 struct ethtool_mm_state *cmd) 1795 { 1796 struct igc_adapter *adapter = netdev_priv(netdev); 1797 struct igc_fpe_t *fpe = &adapter->fpe; 1798 1799 ethtool_mmsv_get_mm(&fpe->mmsv, cmd); 1800 cmd->tx_min_frag_size = fpe->tx_min_frag_size; 1801 cmd->rx_min_frag_size = IGC_RX_MIN_FRAG_SIZE; 1802 1803 return 0; 1804 } 1805 1806 static int igc_ethtool_set_mm(struct net_device *netdev, 1807 struct ethtool_mm_cfg *cmd, 1808 struct netlink_ext_ack *extack) 1809 { 1810 struct igc_adapter *adapter = netdev_priv(netdev); 1811 struct igc_fpe_t *fpe = &adapter->fpe; 1812 1813 fpe->tx_min_frag_size = igc_fpe_get_supported_frag_size(cmd->tx_min_frag_size); 1814 if (fpe->tx_min_frag_size != cmd->tx_min_frag_size) 1815 NL_SET_ERR_MSG_MOD(extack, 1816 "tx-min-frag-size value set is unsupported. Rounded up to supported value (64, 128, 192, 256)"); 1817 1818 if (fpe->mmsv.pmac_enabled != cmd->pmac_enabled) { 1819 if (cmd->pmac_enabled) 1820 static_branch_inc(&igc_fpe_enabled); 1821 else 1822 static_branch_dec(&igc_fpe_enabled); 1823 } 1824 1825 ethtool_mmsv_set_mm(&fpe->mmsv, cmd); 1826 1827 return igc_tsn_offload_apply(adapter); 1828 } 1829 1830 /** 1831 * igc_ethtool_get_frame_ass_error - Get the frame assembly error count. 1832 * @reg_value: Register value for IGC_PRMEXCPRCNT 1833 * Return: The count of frame assembly errors. 1834 */ 1835 static u64 igc_ethtool_get_frame_ass_error(u32 reg_value) 1836 { 1837 /* Out of order statistics */ 1838 u32 ooo_frame_cnt, ooo_frag_cnt; 1839 u32 miss_frame_frag_cnt; 1840 1841 ooo_frame_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAME_CNT, reg_value); 1842 ooo_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAG_CNT, reg_value); 1843 miss_frame_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT, 1844 reg_value); 1845 1846 return ooo_frame_cnt + ooo_frag_cnt + miss_frame_frag_cnt; 1847 } 1848 1849 static u64 igc_ethtool_get_frame_smd_error(u32 reg_value) 1850 { 1851 return FIELD_GET(IGC_PRMEXCPRCNT_OOO_SMDC, reg_value); 1852 } 1853 1854 static void igc_ethtool_get_mm_stats(struct net_device *dev, 1855 struct ethtool_mm_stats *stats) 1856 { 1857 struct igc_adapter *adapter = netdev_priv(dev); 1858 struct igc_hw *hw = &adapter->hw; 1859 u32 reg_value; 1860 1861 reg_value = rd32(IGC_PRMEXCPRCNT); 1862 1863 stats->MACMergeFrameAssErrorCount = igc_ethtool_get_frame_ass_error(reg_value); 1864 stats->MACMergeFrameSmdErrorCount = igc_ethtool_get_frame_smd_error(reg_value); 1865 stats->MACMergeFrameAssOkCount = rd32(IGC_PRMPTDRCNT); 1866 stats->MACMergeFragCountRx = rd32(IGC_PRMEVNTRCNT); 1867 stats->MACMergeFragCountTx = rd32(IGC_PRMEVNTTCNT); 1868 } 1869 1870 static int igc_ethtool_get_link_ksettings(struct net_device *netdev, 1871 struct ethtool_link_ksettings *cmd) 1872 { 1873 struct igc_adapter *adapter = netdev_priv(netdev); 1874 struct igc_hw *hw = &adapter->hw; 1875 u32 status; 1876 u32 speed; 1877 1878 ethtool_link_ksettings_zero_link_mode(cmd, supported); 1879 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 1880 1881 /* supported link modes */ 1882 ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); 1883 ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); 1884 ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); 1885 ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); 1886 ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); 1887 ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); 1888 1889 /* twisted pair */ 1890 cmd->base.port = PORT_TP; 1891 cmd->base.phy_address = hw->phy.addr; 1892 ethtool_link_ksettings_add_link_mode(cmd, supported, TP); 1893 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); 1894 1895 /* advertising link modes */ 1896 if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) 1897 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); 1898 if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) 1899 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); 1900 if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) 1901 ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); 1902 if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) 1903 ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); 1904 if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) 1905 ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); 1906 if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) 1907 ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); 1908 1909 /* set autoneg settings */ 1910 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 1911 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 1912 1913 /* Set pause flow control settings */ 1914 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 1915 1916 switch (hw->fc.requested_mode) { 1917 case igc_fc_full: 1918 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); 1919 break; 1920 case igc_fc_rx_pause: 1921 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); 1922 ethtool_link_ksettings_add_link_mode(cmd, advertising, 1923 Asym_Pause); 1924 break; 1925 case igc_fc_tx_pause: 1926 ethtool_link_ksettings_add_link_mode(cmd, advertising, 1927 Asym_Pause); 1928 break; 1929 default: 1930 break; 1931 } 1932 1933 status = pm_runtime_suspended(&adapter->pdev->dev) ? 1934 0 : rd32(IGC_STATUS); 1935 1936 if (status & IGC_STATUS_LU) { 1937 if (status & IGC_STATUS_SPEED_1000) { 1938 /* For I225, STATUS will indicate 1G speed in both 1939 * 1 Gbps and 2.5 Gbps link modes. 1940 * An additional bit is used 1941 * to differentiate between 1 Gbps and 2.5 Gbps. 1942 */ 1943 if (hw->mac.type == igc_i225 && 1944 (status & IGC_STATUS_SPEED_2500)) { 1945 speed = SPEED_2500; 1946 } else { 1947 speed = SPEED_1000; 1948 } 1949 } else if (status & IGC_STATUS_SPEED_100) { 1950 speed = SPEED_100; 1951 } else { 1952 speed = SPEED_10; 1953 } 1954 if ((status & IGC_STATUS_FD) || 1955 hw->phy.media_type != igc_media_type_copper) 1956 cmd->base.duplex = DUPLEX_FULL; 1957 else 1958 cmd->base.duplex = DUPLEX_HALF; 1959 } else { 1960 speed = SPEED_UNKNOWN; 1961 cmd->base.duplex = DUPLEX_UNKNOWN; 1962 } 1963 cmd->base.speed = speed; 1964 cmd->base.autoneg = AUTONEG_ENABLE; 1965 1966 /* MDI-X => 2; MDI =>1; Invalid =>0 */ 1967 if (hw->phy.media_type == igc_media_type_copper) 1968 cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : 1969 ETH_TP_MDI; 1970 else 1971 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; 1972 1973 if (hw->phy.mdix == AUTO_ALL_MODES) 1974 cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; 1975 else 1976 cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; 1977 1978 return 0; 1979 } 1980 1981 static int 1982 igc_ethtool_set_link_ksettings(struct net_device *netdev, 1983 const struct ethtool_link_ksettings *cmd) 1984 { 1985 struct igc_adapter *adapter = netdev_priv(netdev); 1986 struct net_device *dev = adapter->netdev; 1987 struct igc_hw *hw = &adapter->hw; 1988 u16 advertised = 0; 1989 1990 /* When adapter in resetting mode, autoneg/speed/duplex 1991 * cannot be changed 1992 */ 1993 if (igc_check_reset_block(hw)) { 1994 netdev_err(dev, "Cannot change link characteristics when reset is active\n"); 1995 return -EINVAL; 1996 } 1997 1998 /* MDI setting is only allowed when autoneg enabled because 1999 * some hardware doesn't allow MDI setting when speed or 2000 * duplex is forced. 2001 */ 2002 if (cmd->base.eth_tp_mdix_ctrl) { 2003 if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && 2004 cmd->base.autoneg != AUTONEG_ENABLE) { 2005 netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); 2006 return -EINVAL; 2007 } 2008 } 2009 2010 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 2011 usleep_range(1000, 2000); 2012 2013 if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2014 2500baseT_Full)) 2015 advertised |= ADVERTISE_2500_FULL; 2016 2017 if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2018 1000baseT_Full)) 2019 advertised |= ADVERTISE_1000_FULL; 2020 2021 if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2022 100baseT_Full)) 2023 advertised |= ADVERTISE_100_FULL; 2024 2025 if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2026 100baseT_Half)) 2027 advertised |= ADVERTISE_100_HALF; 2028 2029 if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2030 10baseT_Full)) 2031 advertised |= ADVERTISE_10_FULL; 2032 2033 if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2034 10baseT_Half)) 2035 advertised |= ADVERTISE_10_HALF; 2036 2037 if (cmd->base.autoneg == AUTONEG_ENABLE) { 2038 hw->phy.autoneg_advertised = advertised; 2039 if (adapter->fc_autoneg) 2040 hw->fc.requested_mode = igc_fc_default; 2041 } else { 2042 netdev_info(dev, "Force mode currently not supported\n"); 2043 } 2044 2045 /* MDI-X => 2; MDI => 1; Auto => 3 */ 2046 if (cmd->base.eth_tp_mdix_ctrl) { 2047 /* fix up the value for auto (3 => 0) as zero is mapped 2048 * internally to auto 2049 */ 2050 if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) 2051 hw->phy.mdix = AUTO_ALL_MODES; 2052 else 2053 hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; 2054 } 2055 2056 /* reset the link */ 2057 if (netif_running(adapter->netdev)) { 2058 igc_down(adapter); 2059 igc_up(adapter); 2060 } else { 2061 igc_reset(adapter); 2062 } 2063 2064 clear_bit(__IGC_RESETTING, &adapter->state); 2065 2066 return 0; 2067 } 2068 2069 static void igc_ethtool_diag_test(struct net_device *netdev, 2070 struct ethtool_test *eth_test, u64 *data) 2071 { 2072 struct igc_adapter *adapter = netdev_priv(netdev); 2073 bool if_running = netif_running(netdev); 2074 2075 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 2076 netdev_info(adapter->netdev, "Offline testing starting"); 2077 set_bit(__IGC_TESTING, &adapter->state); 2078 2079 /* Link test performed before hardware reset so autoneg doesn't 2080 * interfere with test result 2081 */ 2082 if (!igc_link_test(adapter, &data[TEST_LINK])) 2083 eth_test->flags |= ETH_TEST_FL_FAILED; 2084 2085 if (if_running) 2086 igc_close(netdev); 2087 else 2088 igc_reset(adapter); 2089 2090 netdev_info(adapter->netdev, "Register testing starting"); 2091 if (!igc_reg_test(adapter, &data[TEST_REG])) 2092 eth_test->flags |= ETH_TEST_FL_FAILED; 2093 2094 igc_reset(adapter); 2095 2096 netdev_info(adapter->netdev, "EEPROM testing starting"); 2097 if (!igc_eeprom_test(adapter, &data[TEST_EEP])) 2098 eth_test->flags |= ETH_TEST_FL_FAILED; 2099 2100 igc_reset(adapter); 2101 2102 /* loopback and interrupt tests 2103 * will be implemented in the future 2104 */ 2105 data[TEST_LOOP] = 0; 2106 data[TEST_IRQ] = 0; 2107 2108 clear_bit(__IGC_TESTING, &adapter->state); 2109 if (if_running) 2110 igc_open(netdev); 2111 } else { 2112 netdev_info(adapter->netdev, "Online testing starting"); 2113 2114 /* register, eeprom, intr and loopback tests not run online */ 2115 data[TEST_REG] = 0; 2116 data[TEST_EEP] = 0; 2117 data[TEST_IRQ] = 0; 2118 data[TEST_LOOP] = 0; 2119 2120 if (!igc_link_test(adapter, &data[TEST_LINK])) 2121 eth_test->flags |= ETH_TEST_FL_FAILED; 2122 } 2123 2124 msleep_interruptible(4 * 1000); 2125 } 2126 2127 static const struct ethtool_ops igc_ethtool_ops = { 2128 .supported_coalesce_params = ETHTOOL_COALESCE_USECS, 2129 .get_drvinfo = igc_ethtool_get_drvinfo, 2130 .get_regs_len = igc_ethtool_get_regs_len, 2131 .get_regs = igc_ethtool_get_regs, 2132 .get_wol = igc_ethtool_get_wol, 2133 .set_wol = igc_ethtool_set_wol, 2134 .get_msglevel = igc_ethtool_get_msglevel, 2135 .set_msglevel = igc_ethtool_set_msglevel, 2136 .nway_reset = igc_ethtool_nway_reset, 2137 .get_link = igc_ethtool_get_link, 2138 .get_eeprom_len = igc_ethtool_get_eeprom_len, 2139 .get_eeprom = igc_ethtool_get_eeprom, 2140 .set_eeprom = igc_ethtool_set_eeprom, 2141 .get_ringparam = igc_ethtool_get_ringparam, 2142 .set_ringparam = igc_ethtool_set_ringparam, 2143 .get_pauseparam = igc_ethtool_get_pauseparam, 2144 .set_pauseparam = igc_ethtool_set_pauseparam, 2145 .get_strings = igc_ethtool_get_strings, 2146 .get_sset_count = igc_ethtool_get_sset_count, 2147 .get_ethtool_stats = igc_ethtool_get_stats, 2148 .get_coalesce = igc_ethtool_get_coalesce, 2149 .set_coalesce = igc_ethtool_set_coalesce, 2150 .get_rxnfc = igc_ethtool_get_rxnfc, 2151 .set_rxnfc = igc_ethtool_set_rxnfc, 2152 .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, 2153 .get_rxfh = igc_ethtool_get_rxfh, 2154 .set_rxfh = igc_ethtool_set_rxfh, 2155 .get_rxfh_fields = igc_ethtool_get_rxfh_fields, 2156 .set_rxfh_fields = igc_ethtool_set_rxfh_fields, 2157 .get_ts_info = igc_ethtool_get_ts_info, 2158 .get_channels = igc_ethtool_get_channels, 2159 .set_channels = igc_ethtool_set_channels, 2160 .get_priv_flags = igc_ethtool_get_priv_flags, 2161 .set_priv_flags = igc_ethtool_set_priv_flags, 2162 .get_eee = igc_ethtool_get_eee, 2163 .set_eee = igc_ethtool_set_eee, 2164 .get_link_ksettings = igc_ethtool_get_link_ksettings, 2165 .set_link_ksettings = igc_ethtool_set_link_ksettings, 2166 .self_test = igc_ethtool_diag_test, 2167 .get_mm = igc_ethtool_get_mm, 2168 .get_mm_stats = igc_ethtool_get_mm_stats, 2169 .set_mm = igc_ethtool_set_mm, 2170 }; 2171 2172 void igc_ethtool_set_ops(struct net_device *netdev) 2173 { 2174 netdev->ethtool_ops = &igc_ethtool_ops; 2175 } 2176