1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 /* ethtool support for i40e */ 28 29 #include "i40e.h" 30 #include "i40e_diag.h" 31 32 struct i40e_stats { 33 char stat_string[ETH_GSTRING_LEN]; 34 int sizeof_stat; 35 int stat_offset; 36 }; 37 38 #define I40E_STAT(_type, _name, _stat) { \ 39 .stat_string = _name, \ 40 .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ 41 .stat_offset = offsetof(_type, _stat) \ 42 } 43 44 #define I40E_NETDEV_STAT(_net_stat) \ 45 I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) 46 #define I40E_PF_STAT(_name, _stat) \ 47 I40E_STAT(struct i40e_pf, _name, _stat) 48 #define I40E_VSI_STAT(_name, _stat) \ 49 I40E_STAT(struct i40e_vsi, _name, _stat) 50 #define I40E_VEB_STAT(_name, _stat) \ 51 I40E_STAT(struct i40e_veb, _name, _stat) 52 53 static const struct i40e_stats i40e_gstrings_net_stats[] = { 54 I40E_NETDEV_STAT(rx_packets), 55 I40E_NETDEV_STAT(tx_packets), 56 I40E_NETDEV_STAT(rx_bytes), 57 I40E_NETDEV_STAT(tx_bytes), 58 I40E_NETDEV_STAT(rx_errors), 59 I40E_NETDEV_STAT(tx_errors), 60 I40E_NETDEV_STAT(rx_dropped), 61 I40E_NETDEV_STAT(tx_dropped), 62 I40E_NETDEV_STAT(collisions), 63 I40E_NETDEV_STAT(rx_length_errors), 64 I40E_NETDEV_STAT(rx_crc_errors), 65 }; 66 67 static const struct i40e_stats i40e_gstrings_veb_stats[] = { 68 I40E_VEB_STAT("rx_bytes", stats.rx_bytes), 69 I40E_VEB_STAT("tx_bytes", stats.tx_bytes), 70 I40E_VEB_STAT("rx_unicast", stats.rx_unicast), 71 I40E_VEB_STAT("tx_unicast", stats.tx_unicast), 72 I40E_VEB_STAT("rx_multicast", stats.rx_multicast), 73 I40E_VEB_STAT("tx_multicast", stats.tx_multicast), 74 I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast), 75 I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast), 76 I40E_VEB_STAT("rx_discards", stats.rx_discards), 77 I40E_VEB_STAT("tx_discards", stats.tx_discards), 78 I40E_VEB_STAT("tx_errors", stats.tx_errors), 79 I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol), 80 }; 81 82 static const struct i40e_stats i40e_gstrings_misc_stats[] = { 83 I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast), 84 I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast), 85 I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast), 86 I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast), 87 I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), 88 I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), 89 I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), 90 }; 91 92 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, 93 struct ethtool_rxnfc *cmd); 94 95 /* These PF_STATs might look like duplicates of some NETDEV_STATs, 96 * but they are separate. This device supports Virtualization, and 97 * as such might have several netdevs supporting VMDq and FCoE going 98 * through a single port. The NETDEV_STATs are for individual netdevs 99 * seen at the top of the stack, and the PF_STATs are for the physical 100 * function at the bottom of the stack hosting those netdevs. 101 * 102 * The PF_STATs are appended to the netdev stats only when ethtool -S 103 * is queried on the base PF netdev, not on the VMDq or FCoE netdev. 104 */ 105 static struct i40e_stats i40e_gstrings_stats[] = { 106 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes), 107 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes), 108 I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast), 109 I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast), 110 I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast), 111 I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast), 112 I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), 113 I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), 114 I40E_PF_STAT("tx_errors", stats.eth.tx_errors), 115 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), 116 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards), 117 I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), 118 I40E_PF_STAT("crc_errors", stats.crc_errors), 119 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), 120 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), 121 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), 122 I40E_PF_STAT("tx_timeout", tx_timeout_count), 123 I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error), 124 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), 125 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), 126 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), 127 I40E_PF_STAT("link_xon_tx", stats.link_xon_tx), 128 I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx), 129 I40E_PF_STAT("rx_size_64", stats.rx_size_64), 130 I40E_PF_STAT("rx_size_127", stats.rx_size_127), 131 I40E_PF_STAT("rx_size_255", stats.rx_size_255), 132 I40E_PF_STAT("rx_size_511", stats.rx_size_511), 133 I40E_PF_STAT("rx_size_1023", stats.rx_size_1023), 134 I40E_PF_STAT("rx_size_1522", stats.rx_size_1522), 135 I40E_PF_STAT("rx_size_big", stats.rx_size_big), 136 I40E_PF_STAT("tx_size_64", stats.tx_size_64), 137 I40E_PF_STAT("tx_size_127", stats.tx_size_127), 138 I40E_PF_STAT("tx_size_255", stats.tx_size_255), 139 I40E_PF_STAT("tx_size_511", stats.tx_size_511), 140 I40E_PF_STAT("tx_size_1023", stats.tx_size_1023), 141 I40E_PF_STAT("tx_size_1522", stats.tx_size_1522), 142 I40E_PF_STAT("tx_size_big", stats.tx_size_big), 143 I40E_PF_STAT("rx_undersize", stats.rx_undersize), 144 I40E_PF_STAT("rx_fragments", stats.rx_fragments), 145 I40E_PF_STAT("rx_oversize", stats.rx_oversize), 146 I40E_PF_STAT("rx_jabber", stats.rx_jabber), 147 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), 148 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 149 I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), 150 I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), 151 I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), 152 153 /* LPI stats */ 154 I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), 155 I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), 156 I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count), 157 I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), 158 }; 159 160 #ifdef I40E_FCOE 161 static const struct i40e_stats i40e_gstrings_fcoe_stats[] = { 162 I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc), 163 I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped), 164 I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets), 165 I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords), 166 I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count), 167 I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error), 168 I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets), 169 I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords), 170 }; 171 172 #endif /* I40E_FCOE */ 173 #define I40E_QUEUE_STATS_LEN(n) \ 174 (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ 175 * 2 /* Tx and Rx together */ \ 176 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) 177 #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) 178 #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) 179 #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) 180 #ifdef I40E_FCOE 181 #define I40E_FCOE_STATS_LEN ARRAY_SIZE(i40e_gstrings_fcoe_stats) 182 #define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ 183 I40E_FCOE_STATS_LEN + \ 184 I40E_MISC_STATS_LEN + \ 185 I40E_QUEUE_STATS_LEN((n))) 186 #else 187 #define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ 188 I40E_MISC_STATS_LEN + \ 189 I40E_QUEUE_STATS_LEN((n))) 190 #endif /* I40E_FCOE */ 191 #define I40E_PFC_STATS_LEN ( \ 192 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ 193 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \ 194 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \ 195 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \ 196 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \ 197 / sizeof(u64)) 198 #define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats) 199 #define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ 200 I40E_PFC_STATS_LEN + \ 201 I40E_VSI_STATS_LEN((n))) 202 203 enum i40e_ethtool_test_id { 204 I40E_ETH_TEST_REG = 0, 205 I40E_ETH_TEST_EEPROM, 206 I40E_ETH_TEST_INTR, 207 I40E_ETH_TEST_LOOPBACK, 208 I40E_ETH_TEST_LINK, 209 }; 210 211 static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { 212 "Register test (offline)", 213 "Eeprom test (offline)", 214 "Interrupt test (offline)", 215 "Loopback test (offline)", 216 "Link test (on/offline)" 217 }; 218 219 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) 220 221 /** 222 * i40e_get_settings - Get Link Speed and Duplex settings 223 * @netdev: network interface device structure 224 * @ecmd: ethtool command 225 * 226 * Reports speed/duplex settings based on media_type 227 **/ 228 static int i40e_get_settings(struct net_device *netdev, 229 struct ethtool_cmd *ecmd) 230 { 231 struct i40e_netdev_priv *np = netdev_priv(netdev); 232 struct i40e_pf *pf = np->vsi->back; 233 struct i40e_hw *hw = &pf->hw; 234 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 235 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; 236 u32 link_speed = hw_link_info->link_speed; 237 238 /* hardware is either in 40G mode or 10G mode 239 * NOTE: this section initializes supported and advertising 240 */ 241 if (!link_up) { 242 /* link is down and the driver needs to fall back on 243 * device ID to determine what kinds of info to display, 244 * it's mostly a guess that may change when link is up 245 */ 246 switch (hw->device_id) { 247 case I40E_DEV_ID_QSFP_A: 248 case I40E_DEV_ID_QSFP_B: 249 case I40E_DEV_ID_QSFP_C: 250 /* pluggable QSFP */ 251 ecmd->supported = SUPPORTED_40000baseSR4_Full | 252 SUPPORTED_40000baseCR4_Full | 253 SUPPORTED_40000baseLR4_Full; 254 ecmd->advertising = ADVERTISED_40000baseSR4_Full | 255 ADVERTISED_40000baseCR4_Full | 256 ADVERTISED_40000baseLR4_Full; 257 break; 258 case I40E_DEV_ID_KX_B: 259 /* backplane 40G */ 260 ecmd->supported = SUPPORTED_40000baseKR4_Full; 261 ecmd->advertising = ADVERTISED_40000baseKR4_Full; 262 break; 263 case I40E_DEV_ID_KX_C: 264 /* backplane 10G */ 265 ecmd->supported = SUPPORTED_10000baseKR_Full; 266 ecmd->advertising = ADVERTISED_10000baseKR_Full; 267 break; 268 case I40E_DEV_ID_10G_BASE_T: 269 ecmd->supported = SUPPORTED_10000baseT_Full | 270 SUPPORTED_1000baseT_Full | 271 SUPPORTED_100baseT_Full; 272 ecmd->advertising = ADVERTISED_10000baseT_Full | 273 ADVERTISED_1000baseT_Full | 274 ADVERTISED_100baseT_Full; 275 break; 276 default: 277 /* all the rest are 10G/1G */ 278 ecmd->supported = SUPPORTED_10000baseT_Full | 279 SUPPORTED_1000baseT_Full; 280 ecmd->advertising = ADVERTISED_10000baseT_Full | 281 ADVERTISED_1000baseT_Full; 282 break; 283 } 284 285 /* skip phy_type use as it is zero when link is down */ 286 goto no_valid_phy_type; 287 } 288 289 switch (hw_link_info->phy_type) { 290 case I40E_PHY_TYPE_40GBASE_CR4: 291 case I40E_PHY_TYPE_40GBASE_CR4_CU: 292 ecmd->supported = SUPPORTED_Autoneg | 293 SUPPORTED_40000baseCR4_Full; 294 ecmd->advertising = ADVERTISED_Autoneg | 295 ADVERTISED_40000baseCR4_Full; 296 break; 297 case I40E_PHY_TYPE_40GBASE_KR4: 298 ecmd->supported = SUPPORTED_Autoneg | 299 SUPPORTED_40000baseKR4_Full; 300 ecmd->advertising = ADVERTISED_Autoneg | 301 ADVERTISED_40000baseKR4_Full; 302 break; 303 case I40E_PHY_TYPE_40GBASE_SR4: 304 case I40E_PHY_TYPE_XLPPI: 305 case I40E_PHY_TYPE_XLAUI: 306 ecmd->supported = SUPPORTED_40000baseSR4_Full; 307 break; 308 case I40E_PHY_TYPE_40GBASE_LR4: 309 ecmd->supported = SUPPORTED_40000baseLR4_Full; 310 break; 311 case I40E_PHY_TYPE_10GBASE_KX4: 312 ecmd->supported = SUPPORTED_Autoneg | 313 SUPPORTED_10000baseKX4_Full; 314 ecmd->advertising = ADVERTISED_Autoneg | 315 ADVERTISED_10000baseKX4_Full; 316 break; 317 case I40E_PHY_TYPE_10GBASE_KR: 318 ecmd->supported = SUPPORTED_Autoneg | 319 SUPPORTED_10000baseKR_Full; 320 ecmd->advertising = ADVERTISED_Autoneg | 321 ADVERTISED_10000baseKR_Full; 322 break; 323 case I40E_PHY_TYPE_10GBASE_SR: 324 case I40E_PHY_TYPE_10GBASE_LR: 325 case I40E_PHY_TYPE_1000BASE_SX: 326 case I40E_PHY_TYPE_1000BASE_LX: 327 ecmd->supported = SUPPORTED_10000baseT_Full; 328 ecmd->supported |= SUPPORTED_1000baseT_Full; 329 break; 330 case I40E_PHY_TYPE_10GBASE_CR1_CU: 331 case I40E_PHY_TYPE_10GBASE_CR1: 332 case I40E_PHY_TYPE_10GBASE_T: 333 ecmd->supported = SUPPORTED_Autoneg | 334 SUPPORTED_10000baseT_Full | 335 SUPPORTED_1000baseT_Full | 336 SUPPORTED_100baseT_Full; 337 ecmd->advertising = ADVERTISED_Autoneg | 338 ADVERTISED_10000baseT_Full | 339 ADVERTISED_1000baseT_Full | 340 ADVERTISED_100baseT_Full; 341 break; 342 case I40E_PHY_TYPE_XAUI: 343 case I40E_PHY_TYPE_XFI: 344 case I40E_PHY_TYPE_SFI: 345 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 346 ecmd->supported = SUPPORTED_10000baseT_Full; 347 break; 348 case I40E_PHY_TYPE_1000BASE_KX: 349 case I40E_PHY_TYPE_1000BASE_T: 350 ecmd->supported = SUPPORTED_Autoneg | 351 SUPPORTED_10000baseT_Full | 352 SUPPORTED_1000baseT_Full | 353 SUPPORTED_100baseT_Full; 354 ecmd->advertising = ADVERTISED_Autoneg | 355 ADVERTISED_10000baseT_Full | 356 ADVERTISED_1000baseT_Full | 357 ADVERTISED_100baseT_Full; 358 break; 359 case I40E_PHY_TYPE_100BASE_TX: 360 ecmd->supported = SUPPORTED_Autoneg | 361 SUPPORTED_10000baseT_Full | 362 SUPPORTED_1000baseT_Full | 363 SUPPORTED_100baseT_Full; 364 ecmd->advertising = ADVERTISED_Autoneg | 365 ADVERTISED_10000baseT_Full | 366 ADVERTISED_1000baseT_Full | 367 ADVERTISED_100baseT_Full; 368 break; 369 case I40E_PHY_TYPE_SGMII: 370 ecmd->supported = SUPPORTED_Autoneg | 371 SUPPORTED_1000baseT_Full | 372 SUPPORTED_100baseT_Full; 373 ecmd->advertising = ADVERTISED_Autoneg | 374 ADVERTISED_1000baseT_Full | 375 ADVERTISED_100baseT_Full; 376 break; 377 default: 378 /* if we got here and link is up something bad is afoot */ 379 netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n", 380 hw_link_info->phy_type); 381 } 382 383 no_valid_phy_type: 384 /* this is if autoneg is enabled or disabled */ 385 ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? 386 AUTONEG_ENABLE : AUTONEG_DISABLE); 387 388 switch (hw->phy.media_type) { 389 case I40E_MEDIA_TYPE_BACKPLANE: 390 ecmd->supported |= SUPPORTED_Autoneg | 391 SUPPORTED_Backplane; 392 ecmd->advertising |= ADVERTISED_Autoneg | 393 ADVERTISED_Backplane; 394 ecmd->port = PORT_NONE; 395 break; 396 case I40E_MEDIA_TYPE_BASET: 397 ecmd->supported |= SUPPORTED_TP; 398 ecmd->advertising |= ADVERTISED_TP; 399 ecmd->port = PORT_TP; 400 break; 401 case I40E_MEDIA_TYPE_DA: 402 case I40E_MEDIA_TYPE_CX4: 403 ecmd->supported |= SUPPORTED_FIBRE; 404 ecmd->advertising |= ADVERTISED_FIBRE; 405 ecmd->port = PORT_DA; 406 break; 407 case I40E_MEDIA_TYPE_FIBER: 408 ecmd->supported |= SUPPORTED_FIBRE; 409 ecmd->port = PORT_FIBRE; 410 break; 411 case I40E_MEDIA_TYPE_UNKNOWN: 412 default: 413 ecmd->port = PORT_OTHER; 414 break; 415 } 416 417 ecmd->transceiver = XCVR_EXTERNAL; 418 419 ecmd->supported |= SUPPORTED_Pause; 420 421 switch (hw->fc.current_mode) { 422 case I40E_FC_FULL: 423 ecmd->advertising |= ADVERTISED_Pause; 424 break; 425 case I40E_FC_TX_PAUSE: 426 ecmd->advertising |= ADVERTISED_Asym_Pause; 427 break; 428 case I40E_FC_RX_PAUSE: 429 ecmd->advertising |= (ADVERTISED_Pause | 430 ADVERTISED_Asym_Pause); 431 break; 432 default: 433 ecmd->advertising &= ~(ADVERTISED_Pause | 434 ADVERTISED_Asym_Pause); 435 break; 436 } 437 438 if (link_up) { 439 switch (link_speed) { 440 case I40E_LINK_SPEED_40GB: 441 /* need a SPEED_40000 in ethtool.h */ 442 ethtool_cmd_speed_set(ecmd, 40000); 443 break; 444 case I40E_LINK_SPEED_10GB: 445 ethtool_cmd_speed_set(ecmd, SPEED_10000); 446 break; 447 case I40E_LINK_SPEED_1GB: 448 ethtool_cmd_speed_set(ecmd, SPEED_1000); 449 break; 450 case I40E_LINK_SPEED_100MB: 451 ethtool_cmd_speed_set(ecmd, SPEED_100); 452 break; 453 default: 454 break; 455 } 456 ecmd->duplex = DUPLEX_FULL; 457 } else { 458 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); 459 ecmd->duplex = DUPLEX_UNKNOWN; 460 } 461 462 return 0; 463 } 464 465 /** 466 * i40e_set_settings - Set Speed and Duplex 467 * @netdev: network interface device structure 468 * @ecmd: ethtool command 469 * 470 * Set speed/duplex per media_types advertised/forced 471 **/ 472 static int i40e_set_settings(struct net_device *netdev, 473 struct ethtool_cmd *ecmd) 474 { 475 struct i40e_netdev_priv *np = netdev_priv(netdev); 476 struct i40e_aq_get_phy_abilities_resp abilities; 477 struct i40e_aq_set_phy_config config; 478 struct i40e_pf *pf = np->vsi->back; 479 struct i40e_vsi *vsi = np->vsi; 480 struct i40e_hw *hw = &pf->hw; 481 struct ethtool_cmd safe_ecmd; 482 i40e_status status = 0; 483 bool change = false; 484 int err = 0; 485 u8 autoneg; 486 u32 advertise; 487 488 if (vsi != pf->vsi[pf->lan_vsi]) 489 return -EOPNOTSUPP; 490 491 if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && 492 hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && 493 hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && 494 hw->phy.link_info.link_info & I40E_AQ_LINK_UP) 495 return -EOPNOTSUPP; 496 497 /* get our own copy of the bits to check against */ 498 memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd)); 499 i40e_get_settings(netdev, &safe_ecmd); 500 501 /* save autoneg and speed out of ecmd */ 502 autoneg = ecmd->autoneg; 503 advertise = ecmd->advertising; 504 505 /* set autoneg and speed back to what they currently are */ 506 ecmd->autoneg = safe_ecmd.autoneg; 507 ecmd->advertising = safe_ecmd.advertising; 508 509 ecmd->cmd = safe_ecmd.cmd; 510 /* If ecmd and safe_ecmd are not the same now, then they are 511 * trying to set something that we do not support 512 */ 513 if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd))) 514 return -EOPNOTSUPP; 515 516 while (test_bit(__I40E_CONFIG_BUSY, &vsi->state)) 517 usleep_range(1000, 2000); 518 519 /* Get the current phy config */ 520 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 521 NULL); 522 if (status) 523 return -EAGAIN; 524 525 /* Copy abilities to config in case autoneg is not 526 * set below 527 */ 528 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 529 config.abilities = abilities.abilities; 530 531 /* Check autoneg */ 532 if (autoneg == AUTONEG_ENABLE) { 533 /* If autoneg is not supported, return error */ 534 if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { 535 netdev_info(netdev, "Autoneg not supported on this phy\n"); 536 return -EINVAL; 537 } 538 /* If autoneg was not already enabled */ 539 if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { 540 config.abilities = abilities.abilities | 541 I40E_AQ_PHY_ENABLE_AN; 542 change = true; 543 } 544 } else { 545 /* If autoneg is supported 10GBASE_T is the only phy that 546 * can disable it, so otherwise return error 547 */ 548 if (safe_ecmd.supported & SUPPORTED_Autoneg && 549 hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) { 550 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); 551 return -EINVAL; 552 } 553 /* If autoneg is currently enabled */ 554 if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { 555 config.abilities = abilities.abilities & 556 ~I40E_AQ_PHY_ENABLE_AN; 557 change = true; 558 } 559 } 560 561 if (advertise & ~safe_ecmd.supported) 562 return -EINVAL; 563 564 if (advertise & ADVERTISED_100baseT_Full) 565 config.link_speed |= I40E_LINK_SPEED_100MB; 566 if (advertise & ADVERTISED_1000baseT_Full || 567 advertise & ADVERTISED_1000baseKX_Full) 568 config.link_speed |= I40E_LINK_SPEED_1GB; 569 if (advertise & ADVERTISED_10000baseT_Full || 570 advertise & ADVERTISED_10000baseKX4_Full || 571 advertise & ADVERTISED_10000baseKR_Full) 572 config.link_speed |= I40E_LINK_SPEED_10GB; 573 if (advertise & ADVERTISED_40000baseKR4_Full || 574 advertise & ADVERTISED_40000baseCR4_Full || 575 advertise & ADVERTISED_40000baseSR4_Full || 576 advertise & ADVERTISED_40000baseLR4_Full) 577 config.link_speed |= I40E_LINK_SPEED_40GB; 578 579 if (change || (abilities.link_speed != config.link_speed)) { 580 /* copy over the rest of the abilities */ 581 config.phy_type = abilities.phy_type; 582 config.eee_capability = abilities.eee_capability; 583 config.eeer = abilities.eeer_val; 584 config.low_power_ctrl = abilities.d3_lpan; 585 586 /* set link and auto negotiation so changes take effect */ 587 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 588 /* If link is up put link down */ 589 if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) { 590 /* Tell the OS link is going down, the link will go 591 * back up when fw says it is ready asynchronously 592 */ 593 netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n"); 594 netif_carrier_off(netdev); 595 netif_tx_stop_all_queues(netdev); 596 } 597 598 /* make the aq call */ 599 status = i40e_aq_set_phy_config(hw, &config, NULL); 600 if (status) { 601 netdev_info(netdev, "Set phy config failed with error %d.\n", 602 status); 603 return -EAGAIN; 604 } 605 606 status = i40e_update_link_info(hw, true); 607 if (status) 608 netdev_info(netdev, "Updating link info failed with error %d\n", 609 status); 610 611 } else { 612 netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); 613 } 614 615 return err; 616 } 617 618 static int i40e_nway_reset(struct net_device *netdev) 619 { 620 /* restart autonegotiation */ 621 struct i40e_netdev_priv *np = netdev_priv(netdev); 622 struct i40e_pf *pf = np->vsi->back; 623 struct i40e_hw *hw = &pf->hw; 624 bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 625 i40e_status ret = 0; 626 627 ret = i40e_aq_set_link_restart_an(hw, link_up, NULL); 628 if (ret) { 629 netdev_info(netdev, "link restart failed, aq_err=%d\n", 630 pf->hw.aq.asq_last_status); 631 return -EIO; 632 } 633 634 return 0; 635 } 636 637 /** 638 * i40e_get_pauseparam - Get Flow Control status 639 * Return tx/rx-pause status 640 **/ 641 static void i40e_get_pauseparam(struct net_device *netdev, 642 struct ethtool_pauseparam *pause) 643 { 644 struct i40e_netdev_priv *np = netdev_priv(netdev); 645 struct i40e_pf *pf = np->vsi->back; 646 struct i40e_hw *hw = &pf->hw; 647 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 648 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; 649 650 pause->autoneg = 651 ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? 652 AUTONEG_ENABLE : AUTONEG_DISABLE); 653 654 /* PFC enabled so report LFC as off */ 655 if (dcbx_cfg->pfc.pfcenable) { 656 pause->rx_pause = 0; 657 pause->tx_pause = 0; 658 return; 659 } 660 661 if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { 662 pause->rx_pause = 1; 663 } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { 664 pause->tx_pause = 1; 665 } else if (hw->fc.current_mode == I40E_FC_FULL) { 666 pause->rx_pause = 1; 667 pause->tx_pause = 1; 668 } 669 } 670 671 /** 672 * i40e_set_pauseparam - Set Flow Control parameter 673 * @netdev: network interface device structure 674 * @pause: return tx/rx flow control status 675 **/ 676 static int i40e_set_pauseparam(struct net_device *netdev, 677 struct ethtool_pauseparam *pause) 678 { 679 struct i40e_netdev_priv *np = netdev_priv(netdev); 680 struct i40e_pf *pf = np->vsi->back; 681 struct i40e_vsi *vsi = np->vsi; 682 struct i40e_hw *hw = &pf->hw; 683 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 684 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; 685 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; 686 i40e_status status; 687 u8 aq_failures; 688 int err = 0; 689 690 if (vsi != pf->vsi[pf->lan_vsi]) 691 return -EOPNOTSUPP; 692 693 if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? 694 AUTONEG_ENABLE : AUTONEG_DISABLE)) { 695 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 696 return -EOPNOTSUPP; 697 } 698 699 /* If we have link and don't have autoneg */ 700 if (!test_bit(__I40E_DOWN, &pf->state) && 701 !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) { 702 /* Send message that it might not necessarily work*/ 703 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); 704 } 705 706 if (dcbx_cfg->pfc.pfcenable) { 707 netdev_info(netdev, 708 "Priority flow control enabled. Cannot set link flow control.\n"); 709 return -EOPNOTSUPP; 710 } 711 712 if (pause->rx_pause && pause->tx_pause) 713 hw->fc.requested_mode = I40E_FC_FULL; 714 else if (pause->rx_pause && !pause->tx_pause) 715 hw->fc.requested_mode = I40E_FC_RX_PAUSE; 716 else if (!pause->rx_pause && pause->tx_pause) 717 hw->fc.requested_mode = I40E_FC_TX_PAUSE; 718 else if (!pause->rx_pause && !pause->tx_pause) 719 hw->fc.requested_mode = I40E_FC_NONE; 720 else 721 return -EINVAL; 722 723 /* Tell the OS link is going down, the link will go back up when fw 724 * says it is ready asynchronously 725 */ 726 netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n"); 727 netif_carrier_off(netdev); 728 netif_tx_stop_all_queues(netdev); 729 730 /* Set the fc mode and only restart an if link is up*/ 731 status = i40e_set_fc(hw, &aq_failures, link_up); 732 733 if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) { 734 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n", 735 status, hw->aq.asq_last_status); 736 err = -EAGAIN; 737 } 738 if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) { 739 netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n", 740 status, hw->aq.asq_last_status); 741 err = -EAGAIN; 742 } 743 if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { 744 netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n", 745 status, hw->aq.asq_last_status); 746 err = -EAGAIN; 747 } 748 749 if (!test_bit(__I40E_DOWN, &pf->state)) { 750 /* Give it a little more time to try to come back */ 751 msleep(75); 752 if (!test_bit(__I40E_DOWN, &pf->state)) 753 return i40e_nway_reset(netdev); 754 } 755 756 return err; 757 } 758 759 static u32 i40e_get_msglevel(struct net_device *netdev) 760 { 761 struct i40e_netdev_priv *np = netdev_priv(netdev); 762 struct i40e_pf *pf = np->vsi->back; 763 764 return pf->msg_enable; 765 } 766 767 static void i40e_set_msglevel(struct net_device *netdev, u32 data) 768 { 769 struct i40e_netdev_priv *np = netdev_priv(netdev); 770 struct i40e_pf *pf = np->vsi->back; 771 772 if (I40E_DEBUG_USER & data) 773 pf->hw.debug_mask = data; 774 pf->msg_enable = data; 775 } 776 777 static int i40e_get_regs_len(struct net_device *netdev) 778 { 779 int reg_count = 0; 780 int i; 781 782 for (i = 0; i40e_reg_list[i].offset != 0; i++) 783 reg_count += i40e_reg_list[i].elements; 784 785 return reg_count * sizeof(u32); 786 } 787 788 static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, 789 void *p) 790 { 791 struct i40e_netdev_priv *np = netdev_priv(netdev); 792 struct i40e_pf *pf = np->vsi->back; 793 struct i40e_hw *hw = &pf->hw; 794 u32 *reg_buf = p; 795 int i, j, ri; 796 u32 reg; 797 798 /* Tell ethtool which driver-version-specific regs output we have. 799 * 800 * At some point, if we have ethtool doing special formatting of 801 * this data, it will rely on this version number to know how to 802 * interpret things. Hence, this needs to be updated if/when the 803 * diags register table is changed. 804 */ 805 regs->version = 1; 806 807 /* loop through the diags reg table for what to print */ 808 ri = 0; 809 for (i = 0; i40e_reg_list[i].offset != 0; i++) { 810 for (j = 0; j < i40e_reg_list[i].elements; j++) { 811 reg = i40e_reg_list[i].offset 812 + (j * i40e_reg_list[i].stride); 813 reg_buf[ri++] = rd32(hw, reg); 814 } 815 } 816 817 } 818 819 static int i40e_get_eeprom(struct net_device *netdev, 820 struct ethtool_eeprom *eeprom, u8 *bytes) 821 { 822 struct i40e_netdev_priv *np = netdev_priv(netdev); 823 struct i40e_hw *hw = &np->vsi->back->hw; 824 struct i40e_pf *pf = np->vsi->back; 825 int ret_val = 0, len, offset; 826 u8 *eeprom_buff; 827 u16 i, sectors; 828 bool last; 829 u32 magic; 830 831 #define I40E_NVM_SECTOR_SIZE 4096 832 if (eeprom->len == 0) 833 return -EINVAL; 834 835 /* check for NVMUpdate access method */ 836 magic = hw->vendor_id | (hw->device_id << 16); 837 if (eeprom->magic && eeprom->magic != magic) { 838 struct i40e_nvm_access *cmd; 839 int errno; 840 841 /* make sure it is the right magic for NVMUpdate */ 842 if ((eeprom->magic >> 16) != hw->device_id) 843 return -EINVAL; 844 845 cmd = (struct i40e_nvm_access *)eeprom; 846 ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); 847 if (ret_val) 848 dev_info(&pf->pdev->dev, 849 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", 850 ret_val, hw->aq.asq_last_status, errno, 851 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), 852 cmd->offset, cmd->data_size); 853 854 return errno; 855 } 856 857 /* normal ethtool get_eeprom support */ 858 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 859 860 eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); 861 if (!eeprom_buff) 862 return -ENOMEM; 863 864 ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 865 if (ret_val) { 866 dev_info(&pf->pdev->dev, 867 "Failed Acquiring NVM resource for read err=%d status=0x%x\n", 868 ret_val, hw->aq.asq_last_status); 869 goto free_buff; 870 } 871 872 sectors = eeprom->len / I40E_NVM_SECTOR_SIZE; 873 sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0; 874 len = I40E_NVM_SECTOR_SIZE; 875 last = false; 876 for (i = 0; i < sectors; i++) { 877 if (i == (sectors - 1)) { 878 len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); 879 last = true; 880 } 881 offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), 882 ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len, 883 (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), 884 last, NULL); 885 if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { 886 dev_info(&pf->pdev->dev, 887 "read NVM failed, invalid offset 0x%x\n", 888 offset); 889 break; 890 } else if (ret_val && 891 hw->aq.asq_last_status == I40E_AQ_RC_EACCES) { 892 dev_info(&pf->pdev->dev, 893 "read NVM failed, access, offset 0x%x\n", 894 offset); 895 break; 896 } else if (ret_val) { 897 dev_info(&pf->pdev->dev, 898 "read NVM failed offset %d err=%d status=0x%x\n", 899 offset, ret_val, hw->aq.asq_last_status); 900 break; 901 } 902 } 903 904 i40e_release_nvm(hw); 905 memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); 906 free_buff: 907 kfree(eeprom_buff); 908 return ret_val; 909 } 910 911 static int i40e_get_eeprom_len(struct net_device *netdev) 912 { 913 struct i40e_netdev_priv *np = netdev_priv(netdev); 914 struct i40e_hw *hw = &np->vsi->back->hw; 915 u32 val; 916 917 val = (rd32(hw, I40E_GLPCI_LBARCTRL) 918 & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) 919 >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; 920 /* register returns value in power of 2, 64Kbyte chunks. */ 921 val = (64 * 1024) * (1 << val); 922 return val; 923 } 924 925 static int i40e_set_eeprom(struct net_device *netdev, 926 struct ethtool_eeprom *eeprom, u8 *bytes) 927 { 928 struct i40e_netdev_priv *np = netdev_priv(netdev); 929 struct i40e_hw *hw = &np->vsi->back->hw; 930 struct i40e_pf *pf = np->vsi->back; 931 struct i40e_nvm_access *cmd; 932 int ret_val = 0; 933 int errno; 934 u32 magic; 935 936 /* normal ethtool set_eeprom is not supported */ 937 magic = hw->vendor_id | (hw->device_id << 16); 938 if (eeprom->magic == magic) 939 return -EOPNOTSUPP; 940 941 /* check for NVMUpdate access method */ 942 if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) 943 return -EINVAL; 944 945 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || 946 test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) 947 return -EBUSY; 948 949 cmd = (struct i40e_nvm_access *)eeprom; 950 ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); 951 if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) 952 dev_info(&pf->pdev->dev, 953 "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", 954 ret_val, hw->aq.asq_last_status, errno, 955 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), 956 cmd->offset, cmd->data_size); 957 958 return errno; 959 } 960 961 static void i40e_get_drvinfo(struct net_device *netdev, 962 struct ethtool_drvinfo *drvinfo) 963 { 964 struct i40e_netdev_priv *np = netdev_priv(netdev); 965 struct i40e_vsi *vsi = np->vsi; 966 struct i40e_pf *pf = vsi->back; 967 968 strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); 969 strlcpy(drvinfo->version, i40e_driver_version_str, 970 sizeof(drvinfo->version)); 971 strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw), 972 sizeof(drvinfo->fw_version)); 973 strlcpy(drvinfo->bus_info, pci_name(pf->pdev), 974 sizeof(drvinfo->bus_info)); 975 } 976 977 static void i40e_get_ringparam(struct net_device *netdev, 978 struct ethtool_ringparam *ring) 979 { 980 struct i40e_netdev_priv *np = netdev_priv(netdev); 981 struct i40e_pf *pf = np->vsi->back; 982 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 983 984 ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS; 985 ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; 986 ring->rx_mini_max_pending = 0; 987 ring->rx_jumbo_max_pending = 0; 988 ring->rx_pending = vsi->rx_rings[0]->count; 989 ring->tx_pending = vsi->tx_rings[0]->count; 990 ring->rx_mini_pending = 0; 991 ring->rx_jumbo_pending = 0; 992 } 993 994 static int i40e_set_ringparam(struct net_device *netdev, 995 struct ethtool_ringparam *ring) 996 { 997 struct i40e_ring *tx_rings = NULL, *rx_rings = NULL; 998 struct i40e_netdev_priv *np = netdev_priv(netdev); 999 struct i40e_vsi *vsi = np->vsi; 1000 struct i40e_pf *pf = vsi->back; 1001 u32 new_rx_count, new_tx_count; 1002 int i, err = 0; 1003 1004 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 1005 return -EINVAL; 1006 1007 if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS || 1008 ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS || 1009 ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS || 1010 ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) { 1011 netdev_info(netdev, 1012 "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", 1013 ring->tx_pending, ring->rx_pending, 1014 I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS); 1015 return -EINVAL; 1016 } 1017 1018 new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); 1019 new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); 1020 1021 /* if nothing to do return success */ 1022 if ((new_tx_count == vsi->tx_rings[0]->count) && 1023 (new_rx_count == vsi->rx_rings[0]->count)) 1024 return 0; 1025 1026 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 1027 usleep_range(1000, 2000); 1028 1029 if (!netif_running(vsi->netdev)) { 1030 /* simple case - set for the next time the netdev is started */ 1031 for (i = 0; i < vsi->num_queue_pairs; i++) { 1032 vsi->tx_rings[i]->count = new_tx_count; 1033 vsi->rx_rings[i]->count = new_rx_count; 1034 } 1035 goto done; 1036 } 1037 1038 /* We can't just free everything and then setup again, 1039 * because the ISRs in MSI-X mode get passed pointers 1040 * to the Tx and Rx ring structs. 1041 */ 1042 1043 /* alloc updated Tx resources */ 1044 if (new_tx_count != vsi->tx_rings[0]->count) { 1045 netdev_info(netdev, 1046 "Changing Tx descriptor count from %d to %d.\n", 1047 vsi->tx_rings[0]->count, new_tx_count); 1048 tx_rings = kcalloc(vsi->alloc_queue_pairs, 1049 sizeof(struct i40e_ring), GFP_KERNEL); 1050 if (!tx_rings) { 1051 err = -ENOMEM; 1052 goto done; 1053 } 1054 1055 for (i = 0; i < vsi->num_queue_pairs; i++) { 1056 /* clone ring and setup updated count */ 1057 tx_rings[i] = *vsi->tx_rings[i]; 1058 tx_rings[i].count = new_tx_count; 1059 err = i40e_setup_tx_descriptors(&tx_rings[i]); 1060 if (err) { 1061 while (i) { 1062 i--; 1063 i40e_free_tx_resources(&tx_rings[i]); 1064 } 1065 kfree(tx_rings); 1066 tx_rings = NULL; 1067 1068 goto done; 1069 } 1070 } 1071 } 1072 1073 /* alloc updated Rx resources */ 1074 if (new_rx_count != vsi->rx_rings[0]->count) { 1075 netdev_info(netdev, 1076 "Changing Rx descriptor count from %d to %d\n", 1077 vsi->rx_rings[0]->count, new_rx_count); 1078 rx_rings = kcalloc(vsi->alloc_queue_pairs, 1079 sizeof(struct i40e_ring), GFP_KERNEL); 1080 if (!rx_rings) { 1081 err = -ENOMEM; 1082 goto free_tx; 1083 } 1084 1085 for (i = 0; i < vsi->num_queue_pairs; i++) { 1086 /* clone ring and setup updated count */ 1087 rx_rings[i] = *vsi->rx_rings[i]; 1088 rx_rings[i].count = new_rx_count; 1089 err = i40e_setup_rx_descriptors(&rx_rings[i]); 1090 if (err) { 1091 while (i) { 1092 i--; 1093 i40e_free_rx_resources(&rx_rings[i]); 1094 } 1095 kfree(rx_rings); 1096 rx_rings = NULL; 1097 1098 goto free_tx; 1099 } 1100 } 1101 } 1102 1103 /* Bring interface down, copy in the new ring info, 1104 * then restore the interface 1105 */ 1106 i40e_down(vsi); 1107 1108 if (tx_rings) { 1109 for (i = 0; i < vsi->num_queue_pairs; i++) { 1110 i40e_free_tx_resources(vsi->tx_rings[i]); 1111 *vsi->tx_rings[i] = tx_rings[i]; 1112 } 1113 kfree(tx_rings); 1114 tx_rings = NULL; 1115 } 1116 1117 if (rx_rings) { 1118 for (i = 0; i < vsi->num_queue_pairs; i++) { 1119 i40e_free_rx_resources(vsi->rx_rings[i]); 1120 *vsi->rx_rings[i] = rx_rings[i]; 1121 } 1122 kfree(rx_rings); 1123 rx_rings = NULL; 1124 } 1125 1126 i40e_up(vsi); 1127 1128 free_tx: 1129 /* error cleanup if the Rx allocations failed after getting Tx */ 1130 if (tx_rings) { 1131 for (i = 0; i < vsi->num_queue_pairs; i++) 1132 i40e_free_tx_resources(&tx_rings[i]); 1133 kfree(tx_rings); 1134 tx_rings = NULL; 1135 } 1136 1137 done: 1138 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 1139 1140 return err; 1141 } 1142 1143 static int i40e_get_sset_count(struct net_device *netdev, int sset) 1144 { 1145 struct i40e_netdev_priv *np = netdev_priv(netdev); 1146 struct i40e_vsi *vsi = np->vsi; 1147 struct i40e_pf *pf = vsi->back; 1148 1149 switch (sset) { 1150 case ETH_SS_TEST: 1151 return I40E_TEST_LEN; 1152 case ETH_SS_STATS: 1153 if (vsi == pf->vsi[pf->lan_vsi]) { 1154 int len = I40E_PF_STATS_LEN(netdev); 1155 1156 if (pf->lan_veb != I40E_NO_VEB) 1157 len += I40E_VEB_STATS_LEN; 1158 return len; 1159 } else { 1160 return I40E_VSI_STATS_LEN(netdev); 1161 } 1162 default: 1163 return -EOPNOTSUPP; 1164 } 1165 } 1166 1167 static void i40e_get_ethtool_stats(struct net_device *netdev, 1168 struct ethtool_stats *stats, u64 *data) 1169 { 1170 struct i40e_netdev_priv *np = netdev_priv(netdev); 1171 struct i40e_ring *tx_ring, *rx_ring; 1172 struct i40e_vsi *vsi = np->vsi; 1173 struct i40e_pf *pf = vsi->back; 1174 int i = 0; 1175 char *p; 1176 int j; 1177 struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); 1178 unsigned int start; 1179 1180 i40e_update_stats(vsi); 1181 1182 for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) { 1183 p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset; 1184 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == 1185 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1186 } 1187 for (j = 0; j < I40E_MISC_STATS_LEN; j++) { 1188 p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset; 1189 data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat == 1190 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1191 } 1192 #ifdef I40E_FCOE 1193 for (j = 0; j < I40E_FCOE_STATS_LEN; j++) { 1194 p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset; 1195 data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat == 1196 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1197 } 1198 #endif 1199 rcu_read_lock(); 1200 for (j = 0; j < vsi->num_queue_pairs; j++) { 1201 tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); 1202 1203 if (!tx_ring) 1204 continue; 1205 1206 /* process Tx ring statistics */ 1207 do { 1208 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 1209 data[i] = tx_ring->stats.packets; 1210 data[i + 1] = tx_ring->stats.bytes; 1211 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 1212 i += 2; 1213 1214 /* Rx ring is the 2nd half of the queue pair */ 1215 rx_ring = &tx_ring[1]; 1216 do { 1217 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 1218 data[i] = rx_ring->stats.packets; 1219 data[i + 1] = rx_ring->stats.bytes; 1220 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 1221 i += 2; 1222 } 1223 rcu_read_unlock(); 1224 if (vsi != pf->vsi[pf->lan_vsi]) 1225 return; 1226 1227 if (pf->lan_veb != I40E_NO_VEB) { 1228 struct i40e_veb *veb = pf->veb[pf->lan_veb]; 1229 for (j = 0; j < I40E_VEB_STATS_LEN; j++) { 1230 p = (char *)veb; 1231 p += i40e_gstrings_veb_stats[j].stat_offset; 1232 data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == 1233 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1234 } 1235 } 1236 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { 1237 p = (char *)pf + i40e_gstrings_stats[j].stat_offset; 1238 data[i++] = (i40e_gstrings_stats[j].sizeof_stat == 1239 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1240 } 1241 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { 1242 data[i++] = pf->stats.priority_xon_tx[j]; 1243 data[i++] = pf->stats.priority_xoff_tx[j]; 1244 } 1245 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { 1246 data[i++] = pf->stats.priority_xon_rx[j]; 1247 data[i++] = pf->stats.priority_xoff_rx[j]; 1248 } 1249 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) 1250 data[i++] = pf->stats.priority_xon_2_xoff[j]; 1251 } 1252 1253 static void i40e_get_strings(struct net_device *netdev, u32 stringset, 1254 u8 *data) 1255 { 1256 struct i40e_netdev_priv *np = netdev_priv(netdev); 1257 struct i40e_vsi *vsi = np->vsi; 1258 struct i40e_pf *pf = vsi->back; 1259 char *p = (char *)data; 1260 int i; 1261 1262 switch (stringset) { 1263 case ETH_SS_TEST: 1264 for (i = 0; i < I40E_TEST_LEN; i++) { 1265 memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN); 1266 data += ETH_GSTRING_LEN; 1267 } 1268 break; 1269 case ETH_SS_STATS: 1270 for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { 1271 snprintf(p, ETH_GSTRING_LEN, "%s", 1272 i40e_gstrings_net_stats[i].stat_string); 1273 p += ETH_GSTRING_LEN; 1274 } 1275 for (i = 0; i < I40E_MISC_STATS_LEN; i++) { 1276 snprintf(p, ETH_GSTRING_LEN, "%s", 1277 i40e_gstrings_misc_stats[i].stat_string); 1278 p += ETH_GSTRING_LEN; 1279 } 1280 #ifdef I40E_FCOE 1281 for (i = 0; i < I40E_FCOE_STATS_LEN; i++) { 1282 snprintf(p, ETH_GSTRING_LEN, "%s", 1283 i40e_gstrings_fcoe_stats[i].stat_string); 1284 p += ETH_GSTRING_LEN; 1285 } 1286 #endif 1287 for (i = 0; i < vsi->num_queue_pairs; i++) { 1288 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); 1289 p += ETH_GSTRING_LEN; 1290 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); 1291 p += ETH_GSTRING_LEN; 1292 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); 1293 p += ETH_GSTRING_LEN; 1294 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); 1295 p += ETH_GSTRING_LEN; 1296 } 1297 if (vsi != pf->vsi[pf->lan_vsi]) 1298 return; 1299 1300 if (pf->lan_veb != I40E_NO_VEB) { 1301 for (i = 0; i < I40E_VEB_STATS_LEN; i++) { 1302 snprintf(p, ETH_GSTRING_LEN, "veb.%s", 1303 i40e_gstrings_veb_stats[i].stat_string); 1304 p += ETH_GSTRING_LEN; 1305 } 1306 } 1307 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { 1308 snprintf(p, ETH_GSTRING_LEN, "port.%s", 1309 i40e_gstrings_stats[i].stat_string); 1310 p += ETH_GSTRING_LEN; 1311 } 1312 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 1313 snprintf(p, ETH_GSTRING_LEN, 1314 "port.tx_priority_%u_xon", i); 1315 p += ETH_GSTRING_LEN; 1316 snprintf(p, ETH_GSTRING_LEN, 1317 "port.tx_priority_%u_xoff", i); 1318 p += ETH_GSTRING_LEN; 1319 } 1320 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 1321 snprintf(p, ETH_GSTRING_LEN, 1322 "port.rx_priority_%u_xon", i); 1323 p += ETH_GSTRING_LEN; 1324 snprintf(p, ETH_GSTRING_LEN, 1325 "port.rx_priority_%u_xoff", i); 1326 p += ETH_GSTRING_LEN; 1327 } 1328 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 1329 snprintf(p, ETH_GSTRING_LEN, 1330 "port.rx_priority_%u_xon_2_xoff", i); 1331 p += ETH_GSTRING_LEN; 1332 } 1333 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ 1334 break; 1335 } 1336 } 1337 1338 static int i40e_get_ts_info(struct net_device *dev, 1339 struct ethtool_ts_info *info) 1340 { 1341 struct i40e_pf *pf = i40e_netdev_to_pf(dev); 1342 1343 /* only report HW timestamping if PTP is enabled */ 1344 if (!(pf->flags & I40E_FLAG_PTP)) 1345 return ethtool_op_get_ts_info(dev, info); 1346 1347 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1348 SOF_TIMESTAMPING_RX_SOFTWARE | 1349 SOF_TIMESTAMPING_SOFTWARE | 1350 SOF_TIMESTAMPING_TX_HARDWARE | 1351 SOF_TIMESTAMPING_RX_HARDWARE | 1352 SOF_TIMESTAMPING_RAW_HARDWARE; 1353 1354 if (pf->ptp_clock) 1355 info->phc_index = ptp_clock_index(pf->ptp_clock); 1356 else 1357 info->phc_index = -1; 1358 1359 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 1360 1361 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 1362 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 1363 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 1364 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | 1365 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 1366 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 1367 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | 1368 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | 1369 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 1370 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 1371 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | 1372 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); 1373 1374 return 0; 1375 } 1376 1377 static int i40e_link_test(struct net_device *netdev, u64 *data) 1378 { 1379 struct i40e_netdev_priv *np = netdev_priv(netdev); 1380 struct i40e_pf *pf = np->vsi->back; 1381 1382 netif_info(pf, hw, netdev, "link test\n"); 1383 if (i40e_get_link_status(&pf->hw)) 1384 *data = 0; 1385 else 1386 *data = 1; 1387 1388 return *data; 1389 } 1390 1391 static int i40e_reg_test(struct net_device *netdev, u64 *data) 1392 { 1393 struct i40e_netdev_priv *np = netdev_priv(netdev); 1394 struct i40e_pf *pf = np->vsi->back; 1395 1396 netif_info(pf, hw, netdev, "register test\n"); 1397 *data = i40e_diag_reg_test(&pf->hw); 1398 1399 return *data; 1400 } 1401 1402 static int i40e_eeprom_test(struct net_device *netdev, u64 *data) 1403 { 1404 struct i40e_netdev_priv *np = netdev_priv(netdev); 1405 struct i40e_pf *pf = np->vsi->back; 1406 1407 netif_info(pf, hw, netdev, "eeprom test\n"); 1408 *data = i40e_diag_eeprom_test(&pf->hw); 1409 1410 /* forcebly clear the NVM Update state machine */ 1411 pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT; 1412 1413 return *data; 1414 } 1415 1416 static int i40e_intr_test(struct net_device *netdev, u64 *data) 1417 { 1418 struct i40e_netdev_priv *np = netdev_priv(netdev); 1419 struct i40e_pf *pf = np->vsi->back; 1420 u16 swc_old = pf->sw_int_count; 1421 1422 netif_info(pf, hw, netdev, "interrupt test\n"); 1423 wr32(&pf->hw, I40E_PFINT_DYN_CTL0, 1424 (I40E_PFINT_DYN_CTL0_INTENA_MASK | 1425 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | 1426 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | 1427 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | 1428 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); 1429 usleep_range(1000, 2000); 1430 *data = (swc_old == pf->sw_int_count); 1431 1432 return *data; 1433 } 1434 1435 static int i40e_loopback_test(struct net_device *netdev, u64 *data) 1436 { 1437 struct i40e_netdev_priv *np = netdev_priv(netdev); 1438 struct i40e_pf *pf = np->vsi->back; 1439 1440 netif_info(pf, hw, netdev, "loopback test not implemented\n"); 1441 *data = 0; 1442 1443 return *data; 1444 } 1445 1446 static void i40e_diag_test(struct net_device *netdev, 1447 struct ethtool_test *eth_test, u64 *data) 1448 { 1449 struct i40e_netdev_priv *np = netdev_priv(netdev); 1450 struct i40e_pf *pf = np->vsi->back; 1451 1452 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1453 /* Offline tests */ 1454 netif_info(pf, drv, netdev, "offline testing starting\n"); 1455 1456 set_bit(__I40E_TESTING, &pf->state); 1457 1458 /* Link test performed before hardware reset 1459 * so autoneg doesn't interfere with test result 1460 */ 1461 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) 1462 eth_test->flags |= ETH_TEST_FL_FAILED; 1463 1464 if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM])) 1465 eth_test->flags |= ETH_TEST_FL_FAILED; 1466 1467 if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR])) 1468 eth_test->flags |= ETH_TEST_FL_FAILED; 1469 1470 if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK])) 1471 eth_test->flags |= ETH_TEST_FL_FAILED; 1472 1473 /* run reg test last, a reset is required after it */ 1474 if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG])) 1475 eth_test->flags |= ETH_TEST_FL_FAILED; 1476 1477 clear_bit(__I40E_TESTING, &pf->state); 1478 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 1479 } else { 1480 /* Online tests */ 1481 netif_info(pf, drv, netdev, "online testing starting\n"); 1482 1483 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) 1484 eth_test->flags |= ETH_TEST_FL_FAILED; 1485 1486 /* Offline only tests, not run in online; pass by default */ 1487 data[I40E_ETH_TEST_REG] = 0; 1488 data[I40E_ETH_TEST_EEPROM] = 0; 1489 data[I40E_ETH_TEST_INTR] = 0; 1490 data[I40E_ETH_TEST_LOOPBACK] = 0; 1491 } 1492 1493 netif_info(pf, drv, netdev, "testing finished\n"); 1494 } 1495 1496 static void i40e_get_wol(struct net_device *netdev, 1497 struct ethtool_wolinfo *wol) 1498 { 1499 struct i40e_netdev_priv *np = netdev_priv(netdev); 1500 struct i40e_pf *pf = np->vsi->back; 1501 struct i40e_hw *hw = &pf->hw; 1502 u16 wol_nvm_bits; 1503 1504 /* NVM bit on means WoL disabled for the port */ 1505 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 1506 if ((1 << hw->port) & wol_nvm_bits) { 1507 wol->supported = 0; 1508 wol->wolopts = 0; 1509 } else { 1510 wol->supported = WAKE_MAGIC; 1511 wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0); 1512 } 1513 } 1514 1515 static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1516 { 1517 struct i40e_netdev_priv *np = netdev_priv(netdev); 1518 struct i40e_pf *pf = np->vsi->back; 1519 struct i40e_hw *hw = &pf->hw; 1520 u16 wol_nvm_bits; 1521 1522 /* NVM bit on means WoL disabled for the port */ 1523 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 1524 if (((1 << hw->port) & wol_nvm_bits)) 1525 return -EOPNOTSUPP; 1526 1527 /* only magic packet is supported */ 1528 if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)) 1529 return -EOPNOTSUPP; 1530 1531 /* is this a new value? */ 1532 if (pf->wol_en != !!wol->wolopts) { 1533 pf->wol_en = !!wol->wolopts; 1534 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 1535 } 1536 1537 return 0; 1538 } 1539 1540 static int i40e_set_phys_id(struct net_device *netdev, 1541 enum ethtool_phys_id_state state) 1542 { 1543 struct i40e_netdev_priv *np = netdev_priv(netdev); 1544 struct i40e_pf *pf = np->vsi->back; 1545 struct i40e_hw *hw = &pf->hw; 1546 int blink_freq = 2; 1547 1548 switch (state) { 1549 case ETHTOOL_ID_ACTIVE: 1550 pf->led_status = i40e_led_get(hw); 1551 return blink_freq; 1552 case ETHTOOL_ID_ON: 1553 i40e_led_set(hw, 0xF, false); 1554 break; 1555 case ETHTOOL_ID_OFF: 1556 i40e_led_set(hw, 0x0, false); 1557 break; 1558 case ETHTOOL_ID_INACTIVE: 1559 i40e_led_set(hw, pf->led_status, false); 1560 break; 1561 } 1562 1563 return 0; 1564 } 1565 1566 /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt 1567 * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also 1568 * 125us (8000 interrupts per second) == ITR(62) 1569 */ 1570 1571 static int i40e_get_coalesce(struct net_device *netdev, 1572 struct ethtool_coalesce *ec) 1573 { 1574 struct i40e_netdev_priv *np = netdev_priv(netdev); 1575 struct i40e_vsi *vsi = np->vsi; 1576 1577 ec->tx_max_coalesced_frames_irq = vsi->work_limit; 1578 ec->rx_max_coalesced_frames_irq = vsi->work_limit; 1579 1580 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) 1581 ec->use_adaptive_rx_coalesce = 1; 1582 1583 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) 1584 ec->use_adaptive_tx_coalesce = 1; 1585 1586 ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC; 1587 ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC; 1588 1589 return 0; 1590 } 1591 1592 static int i40e_set_coalesce(struct net_device *netdev, 1593 struct ethtool_coalesce *ec) 1594 { 1595 struct i40e_netdev_priv *np = netdev_priv(netdev); 1596 struct i40e_q_vector *q_vector; 1597 struct i40e_vsi *vsi = np->vsi; 1598 struct i40e_pf *pf = vsi->back; 1599 struct i40e_hw *hw = &pf->hw; 1600 u16 vector; 1601 int i; 1602 1603 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) 1604 vsi->work_limit = ec->tx_max_coalesced_frames_irq; 1605 1606 vector = vsi->base_vector; 1607 if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && 1608 (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { 1609 vsi->rx_itr_setting = ec->rx_coalesce_usecs; 1610 } else if (ec->rx_coalesce_usecs == 0) { 1611 vsi->rx_itr_setting = ec->rx_coalesce_usecs; 1612 if (ec->use_adaptive_rx_coalesce) 1613 netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); 1614 } else { 1615 netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); 1616 return -EINVAL; 1617 } 1618 1619 if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && 1620 (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { 1621 vsi->tx_itr_setting = ec->tx_coalesce_usecs; 1622 } else if (ec->tx_coalesce_usecs == 0) { 1623 vsi->tx_itr_setting = ec->tx_coalesce_usecs; 1624 if (ec->use_adaptive_tx_coalesce) 1625 netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); 1626 } else { 1627 netif_info(pf, drv, netdev, 1628 "Invalid value, tx-usecs range is 0-8160\n"); 1629 return -EINVAL; 1630 } 1631 1632 if (ec->use_adaptive_rx_coalesce) 1633 vsi->rx_itr_setting |= I40E_ITR_DYNAMIC; 1634 else 1635 vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC; 1636 1637 if (ec->use_adaptive_tx_coalesce) 1638 vsi->tx_itr_setting |= I40E_ITR_DYNAMIC; 1639 else 1640 vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; 1641 1642 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 1643 q_vector = vsi->q_vectors[i]; 1644 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 1645 wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); 1646 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 1647 wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr); 1648 i40e_flush(hw); 1649 } 1650 1651 return 0; 1652 } 1653 1654 /** 1655 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type 1656 * @pf: pointer to the physical function struct 1657 * @cmd: ethtool rxnfc command 1658 * 1659 * Returns Success if the flow is supported, else Invalid Input. 1660 **/ 1661 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) 1662 { 1663 cmd->data = 0; 1664 1665 /* Report default options for RSS on i40e */ 1666 switch (cmd->flow_type) { 1667 case TCP_V4_FLOW: 1668 case UDP_V4_FLOW: 1669 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1670 /* fall through to add IP fields */ 1671 case SCTP_V4_FLOW: 1672 case AH_ESP_V4_FLOW: 1673 case AH_V4_FLOW: 1674 case ESP_V4_FLOW: 1675 case IPV4_FLOW: 1676 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 1677 break; 1678 case TCP_V6_FLOW: 1679 case UDP_V6_FLOW: 1680 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 1681 /* fall through to add IP fields */ 1682 case SCTP_V6_FLOW: 1683 case AH_ESP_V6_FLOW: 1684 case AH_V6_FLOW: 1685 case ESP_V6_FLOW: 1686 case IPV6_FLOW: 1687 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 1688 break; 1689 default: 1690 return -EINVAL; 1691 } 1692 1693 return 0; 1694 } 1695 1696 /** 1697 * i40e_get_ethtool_fdir_all - Populates the rule count of a command 1698 * @pf: Pointer to the physical function struct 1699 * @cmd: The command to get or set Rx flow classification rules 1700 * @rule_locs: Array of used rule locations 1701 * 1702 * This function populates both the total and actual rule count of 1703 * the ethtool flow classification command 1704 * 1705 * Returns 0 on success or -EMSGSIZE if entry not found 1706 **/ 1707 static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf, 1708 struct ethtool_rxnfc *cmd, 1709 u32 *rule_locs) 1710 { 1711 struct i40e_fdir_filter *rule; 1712 struct hlist_node *node2; 1713 int cnt = 0; 1714 1715 /* report total rule count */ 1716 cmd->data = i40e_get_fd_cnt_all(pf); 1717 1718 hlist_for_each_entry_safe(rule, node2, 1719 &pf->fdir_filter_list, fdir_node) { 1720 if (cnt == cmd->rule_cnt) 1721 return -EMSGSIZE; 1722 1723 rule_locs[cnt] = rule->fd_id; 1724 cnt++; 1725 } 1726 1727 cmd->rule_cnt = cnt; 1728 1729 return 0; 1730 } 1731 1732 /** 1733 * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow 1734 * @pf: Pointer to the physical function struct 1735 * @cmd: The command to get or set Rx flow classification rules 1736 * 1737 * This function looks up a filter based on the Rx flow classification 1738 * command and fills the flow spec info for it if found 1739 * 1740 * Returns 0 on success or -EINVAL if filter not found 1741 **/ 1742 static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, 1743 struct ethtool_rxnfc *cmd) 1744 { 1745 struct ethtool_rx_flow_spec *fsp = 1746 (struct ethtool_rx_flow_spec *)&cmd->fs; 1747 struct i40e_fdir_filter *rule = NULL; 1748 struct hlist_node *node2; 1749 1750 hlist_for_each_entry_safe(rule, node2, 1751 &pf->fdir_filter_list, fdir_node) { 1752 if (fsp->location <= rule->fd_id) 1753 break; 1754 } 1755 1756 if (!rule || fsp->location != rule->fd_id) 1757 return -EINVAL; 1758 1759 fsp->flow_type = rule->flow_type; 1760 if (fsp->flow_type == IP_USER_FLOW) { 1761 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1762 fsp->h_u.usr_ip4_spec.proto = 0; 1763 fsp->m_u.usr_ip4_spec.proto = 0; 1764 } 1765 1766 /* Reverse the src and dest notion, since the HW views them from 1767 * Tx perspective where as the user expects it from Rx filter view. 1768 */ 1769 fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port; 1770 fsp->h_u.tcp_ip4_spec.pdst = rule->src_port; 1771 fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0]; 1772 fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0]; 1773 1774 if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) 1775 fsp->ring_cookie = RX_CLS_FLOW_DISC; 1776 else 1777 fsp->ring_cookie = rule->q_index; 1778 1779 return 0; 1780 } 1781 1782 /** 1783 * i40e_get_rxnfc - command to get RX flow classification rules 1784 * @netdev: network interface device structure 1785 * @cmd: ethtool rxnfc command 1786 * 1787 * Returns Success if the command is supported. 1788 **/ 1789 static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 1790 u32 *rule_locs) 1791 { 1792 struct i40e_netdev_priv *np = netdev_priv(netdev); 1793 struct i40e_vsi *vsi = np->vsi; 1794 struct i40e_pf *pf = vsi->back; 1795 int ret = -EOPNOTSUPP; 1796 1797 switch (cmd->cmd) { 1798 case ETHTOOL_GRXRINGS: 1799 cmd->data = vsi->alloc_queue_pairs; 1800 ret = 0; 1801 break; 1802 case ETHTOOL_GRXFH: 1803 ret = i40e_get_rss_hash_opts(pf, cmd); 1804 break; 1805 case ETHTOOL_GRXCLSRLCNT: 1806 cmd->rule_cnt = pf->fdir_pf_active_filters; 1807 /* report total rule count */ 1808 cmd->data = i40e_get_fd_cnt_all(pf); 1809 ret = 0; 1810 break; 1811 case ETHTOOL_GRXCLSRULE: 1812 ret = i40e_get_ethtool_fdir_entry(pf, cmd); 1813 break; 1814 case ETHTOOL_GRXCLSRLALL: 1815 ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs); 1816 break; 1817 default: 1818 break; 1819 } 1820 1821 return ret; 1822 } 1823 1824 /** 1825 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash 1826 * @pf: pointer to the physical function struct 1827 * @cmd: ethtool rxnfc command 1828 * 1829 * Returns Success if the flow input set is supported. 1830 **/ 1831 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) 1832 { 1833 struct i40e_hw *hw = &pf->hw; 1834 u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | 1835 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); 1836 1837 /* RSS does not support anything other than hashing 1838 * to queues on src and dst IPs and ports 1839 */ 1840 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 1841 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 1842 return -EINVAL; 1843 1844 /* We need at least the IP SRC and DEST fields for hashing */ 1845 if (!(nfc->data & RXH_IP_SRC) || 1846 !(nfc->data & RXH_IP_DST)) 1847 return -EINVAL; 1848 1849 switch (nfc->flow_type) { 1850 case TCP_V4_FLOW: 1851 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1852 case 0: 1853 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 1854 break; 1855 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1856 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 1857 break; 1858 default: 1859 return -EINVAL; 1860 } 1861 break; 1862 case TCP_V6_FLOW: 1863 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1864 case 0: 1865 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 1866 break; 1867 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1868 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 1869 break; 1870 default: 1871 return -EINVAL; 1872 } 1873 break; 1874 case UDP_V4_FLOW: 1875 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1876 case 0: 1877 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | 1878 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); 1879 break; 1880 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1881 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | 1882 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); 1883 break; 1884 default: 1885 return -EINVAL; 1886 } 1887 break; 1888 case UDP_V6_FLOW: 1889 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1890 case 0: 1891 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | 1892 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); 1893 break; 1894 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1895 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | 1896 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); 1897 break; 1898 default: 1899 return -EINVAL; 1900 } 1901 break; 1902 case AH_ESP_V4_FLOW: 1903 case AH_V4_FLOW: 1904 case ESP_V4_FLOW: 1905 case SCTP_V4_FLOW: 1906 if ((nfc->data & RXH_L4_B_0_1) || 1907 (nfc->data & RXH_L4_B_2_3)) 1908 return -EINVAL; 1909 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 1910 break; 1911 case AH_ESP_V6_FLOW: 1912 case AH_V6_FLOW: 1913 case ESP_V6_FLOW: 1914 case SCTP_V6_FLOW: 1915 if ((nfc->data & RXH_L4_B_0_1) || 1916 (nfc->data & RXH_L4_B_2_3)) 1917 return -EINVAL; 1918 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 1919 break; 1920 case IPV4_FLOW: 1921 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | 1922 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); 1923 break; 1924 case IPV6_FLOW: 1925 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | 1926 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 1927 break; 1928 default: 1929 return -EINVAL; 1930 } 1931 1932 wr32(hw, I40E_PFQF_HENA(0), (u32)hena); 1933 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 1934 i40e_flush(hw); 1935 1936 return 0; 1937 } 1938 1939 /** 1940 * i40e_match_fdir_input_set - Match a new filter against an existing one 1941 * @rule: The filter already added 1942 * @input: The new filter to comapre against 1943 * 1944 * Returns true if the two input set match 1945 **/ 1946 static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule, 1947 struct i40e_fdir_filter *input) 1948 { 1949 if ((rule->dst_ip[0] != input->dst_ip[0]) || 1950 (rule->src_ip[0] != input->src_ip[0]) || 1951 (rule->dst_port != input->dst_port) || 1952 (rule->src_port != input->src_port)) 1953 return false; 1954 return true; 1955 } 1956 1957 /** 1958 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry 1959 * @vsi: Pointer to the targeted VSI 1960 * @input: The filter to update or NULL to indicate deletion 1961 * @sw_idx: Software index to the filter 1962 * @cmd: The command to get or set Rx flow classification rules 1963 * 1964 * This function updates (or deletes) a Flow Director entry from 1965 * the hlist of the corresponding PF 1966 * 1967 * Returns 0 on success 1968 **/ 1969 static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, 1970 struct i40e_fdir_filter *input, 1971 u16 sw_idx, 1972 struct ethtool_rxnfc *cmd) 1973 { 1974 struct i40e_fdir_filter *rule, *parent; 1975 struct i40e_pf *pf = vsi->back; 1976 struct hlist_node *node2; 1977 int err = -EINVAL; 1978 1979 parent = NULL; 1980 rule = NULL; 1981 1982 hlist_for_each_entry_safe(rule, node2, 1983 &pf->fdir_filter_list, fdir_node) { 1984 /* hash found, or no matching entry */ 1985 if (rule->fd_id >= sw_idx) 1986 break; 1987 parent = rule; 1988 } 1989 1990 /* if there is an old rule occupying our place remove it */ 1991 if (rule && (rule->fd_id == sw_idx)) { 1992 if (input && !i40e_match_fdir_input_set(rule, input)) 1993 err = i40e_add_del_fdir(vsi, rule, false); 1994 else if (!input) 1995 err = i40e_add_del_fdir(vsi, rule, false); 1996 hlist_del(&rule->fdir_node); 1997 kfree(rule); 1998 pf->fdir_pf_active_filters--; 1999 } 2000 2001 /* If no input this was a delete, err should be 0 if a rule was 2002 * successfully found and removed from the list else -EINVAL 2003 */ 2004 if (!input) 2005 return err; 2006 2007 /* initialize node and set software index */ 2008 INIT_HLIST_NODE(&input->fdir_node); 2009 2010 /* add filter to the list */ 2011 if (parent) 2012 hlist_add_behind(&input->fdir_node, &parent->fdir_node); 2013 else 2014 hlist_add_head(&input->fdir_node, 2015 &pf->fdir_filter_list); 2016 2017 /* update counts */ 2018 pf->fdir_pf_active_filters++; 2019 2020 return 0; 2021 } 2022 2023 /** 2024 * i40e_del_fdir_entry - Deletes a Flow Director filter entry 2025 * @vsi: Pointer to the targeted VSI 2026 * @cmd: The command to get or set Rx flow classification rules 2027 * 2028 * The function removes a Flow Director filter entry from the 2029 * hlist of the corresponding PF 2030 * 2031 * Returns 0 on success 2032 */ 2033 static int i40e_del_fdir_entry(struct i40e_vsi *vsi, 2034 struct ethtool_rxnfc *cmd) 2035 { 2036 struct ethtool_rx_flow_spec *fsp = 2037 (struct ethtool_rx_flow_spec *)&cmd->fs; 2038 struct i40e_pf *pf = vsi->back; 2039 int ret = 0; 2040 2041 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || 2042 test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) 2043 return -EBUSY; 2044 2045 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 2046 return -EBUSY; 2047 2048 ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); 2049 2050 i40e_fdir_check_and_reenable(pf); 2051 return ret; 2052 } 2053 2054 /** 2055 * i40e_add_fdir_ethtool - Add/Remove Flow Director filters 2056 * @vsi: pointer to the targeted VSI 2057 * @cmd: command to get or set RX flow classification rules 2058 * 2059 * Add Flow Director filters for a specific flow spec based on their 2060 * protocol. Returns 0 if the filters were successfully added. 2061 **/ 2062 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, 2063 struct ethtool_rxnfc *cmd) 2064 { 2065 struct ethtool_rx_flow_spec *fsp; 2066 struct i40e_fdir_filter *input; 2067 struct i40e_pf *pf; 2068 int ret = -EINVAL; 2069 2070 if (!vsi) 2071 return -EINVAL; 2072 2073 pf = vsi->back; 2074 2075 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 2076 return -EOPNOTSUPP; 2077 2078 if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) 2079 return -ENOSPC; 2080 2081 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || 2082 test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) 2083 return -EBUSY; 2084 2085 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 2086 return -EBUSY; 2087 2088 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 2089 2090 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + 2091 pf->hw.func_caps.fd_filters_guaranteed)) { 2092 return -EINVAL; 2093 } 2094 2095 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && 2096 (fsp->ring_cookie >= vsi->num_queue_pairs)) 2097 return -EINVAL; 2098 2099 input = kzalloc(sizeof(*input), GFP_KERNEL); 2100 2101 if (!input) 2102 return -ENOMEM; 2103 2104 input->fd_id = fsp->location; 2105 2106 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) 2107 input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; 2108 else 2109 input->dest_ctl = 2110 I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; 2111 2112 input->q_index = fsp->ring_cookie; 2113 input->flex_off = 0; 2114 input->pctype = 0; 2115 input->dest_vsi = vsi->id; 2116 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; 2117 input->cnt_index = pf->fd_sb_cnt_idx; 2118 input->flow_type = fsp->flow_type; 2119 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; 2120 2121 /* Reverse the src and dest notion, since the HW expects them to be from 2122 * Tx perspective where as the input from user is from Rx filter view. 2123 */ 2124 input->dst_port = fsp->h_u.tcp_ip4_spec.psrc; 2125 input->src_port = fsp->h_u.tcp_ip4_spec.pdst; 2126 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 2127 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 2128 2129 ret = i40e_add_del_fdir(vsi, input, true); 2130 if (ret) 2131 kfree(input); 2132 else 2133 i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); 2134 2135 return ret; 2136 } 2137 2138 /** 2139 * i40e_set_rxnfc - command to set RX flow classification rules 2140 * @netdev: network interface device structure 2141 * @cmd: ethtool rxnfc command 2142 * 2143 * Returns Success if the command is supported. 2144 **/ 2145 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 2146 { 2147 struct i40e_netdev_priv *np = netdev_priv(netdev); 2148 struct i40e_vsi *vsi = np->vsi; 2149 struct i40e_pf *pf = vsi->back; 2150 int ret = -EOPNOTSUPP; 2151 2152 switch (cmd->cmd) { 2153 case ETHTOOL_SRXFH: 2154 ret = i40e_set_rss_hash_opt(pf, cmd); 2155 break; 2156 case ETHTOOL_SRXCLSRLINS: 2157 ret = i40e_add_fdir_ethtool(vsi, cmd); 2158 break; 2159 case ETHTOOL_SRXCLSRLDEL: 2160 ret = i40e_del_fdir_entry(vsi, cmd); 2161 break; 2162 default: 2163 break; 2164 } 2165 2166 return ret; 2167 } 2168 2169 /** 2170 * i40e_max_channels - get Max number of combined channels supported 2171 * @vsi: vsi pointer 2172 **/ 2173 static unsigned int i40e_max_channels(struct i40e_vsi *vsi) 2174 { 2175 /* TODO: This code assumes DCB and FD is disabled for now. */ 2176 return vsi->alloc_queue_pairs; 2177 } 2178 2179 /** 2180 * i40e_get_channels - Get the current channels enabled and max supported etc. 2181 * @netdev: network interface device structure 2182 * @ch: ethtool channels structure 2183 * 2184 * We don't support separate tx and rx queues as channels. The other count 2185 * represents how many queues are being used for control. max_combined counts 2186 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2187 * q_vectors since we support a lot more queue pairs than q_vectors. 2188 **/ 2189 static void i40e_get_channels(struct net_device *dev, 2190 struct ethtool_channels *ch) 2191 { 2192 struct i40e_netdev_priv *np = netdev_priv(dev); 2193 struct i40e_vsi *vsi = np->vsi; 2194 struct i40e_pf *pf = vsi->back; 2195 2196 /* report maximum channels */ 2197 ch->max_combined = i40e_max_channels(vsi); 2198 2199 /* report info for other vector */ 2200 ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0; 2201 ch->max_other = ch->other_count; 2202 2203 /* Note: This code assumes DCB is disabled for now. */ 2204 ch->combined_count = vsi->num_queue_pairs; 2205 } 2206 2207 /** 2208 * i40e_set_channels - Set the new channels count. 2209 * @netdev: network interface device structure 2210 * @ch: ethtool channels structure 2211 * 2212 * The new channels count may not be the same as requested by the user 2213 * since it gets rounded down to a power of 2 value. 2214 **/ 2215 static int i40e_set_channels(struct net_device *dev, 2216 struct ethtool_channels *ch) 2217 { 2218 struct i40e_netdev_priv *np = netdev_priv(dev); 2219 unsigned int count = ch->combined_count; 2220 struct i40e_vsi *vsi = np->vsi; 2221 struct i40e_pf *pf = vsi->back; 2222 int new_count; 2223 2224 /* We do not support setting channels for any other VSI at present */ 2225 if (vsi->type != I40E_VSI_MAIN) 2226 return -EINVAL; 2227 2228 /* verify they are not requesting separate vectors */ 2229 if (!count || ch->rx_count || ch->tx_count) 2230 return -EINVAL; 2231 2232 /* verify other_count has not changed */ 2233 if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0)) 2234 return -EINVAL; 2235 2236 /* verify the number of channels does not exceed hardware limits */ 2237 if (count > i40e_max_channels(vsi)) 2238 return -EINVAL; 2239 2240 /* update feature limits from largest to smallest supported values */ 2241 /* TODO: Flow director limit, DCB etc */ 2242 2243 /* cap RSS limit */ 2244 if (count > pf->rss_size_max) 2245 count = pf->rss_size_max; 2246 2247 /* use rss_reconfig to rebuild with new queue count and update traffic 2248 * class queue mapping 2249 */ 2250 new_count = i40e_reconfig_rss_queues(pf, count); 2251 if (new_count > 0) 2252 return 0; 2253 else 2254 return -EINVAL; 2255 } 2256 2257 static const struct ethtool_ops i40e_ethtool_ops = { 2258 .get_settings = i40e_get_settings, 2259 .set_settings = i40e_set_settings, 2260 .get_drvinfo = i40e_get_drvinfo, 2261 .get_regs_len = i40e_get_regs_len, 2262 .get_regs = i40e_get_regs, 2263 .nway_reset = i40e_nway_reset, 2264 .get_link = ethtool_op_get_link, 2265 .get_wol = i40e_get_wol, 2266 .set_wol = i40e_set_wol, 2267 .set_eeprom = i40e_set_eeprom, 2268 .get_eeprom_len = i40e_get_eeprom_len, 2269 .get_eeprom = i40e_get_eeprom, 2270 .get_ringparam = i40e_get_ringparam, 2271 .set_ringparam = i40e_set_ringparam, 2272 .get_pauseparam = i40e_get_pauseparam, 2273 .set_pauseparam = i40e_set_pauseparam, 2274 .get_msglevel = i40e_get_msglevel, 2275 .set_msglevel = i40e_set_msglevel, 2276 .get_rxnfc = i40e_get_rxnfc, 2277 .set_rxnfc = i40e_set_rxnfc, 2278 .self_test = i40e_diag_test, 2279 .get_strings = i40e_get_strings, 2280 .set_phys_id = i40e_set_phys_id, 2281 .get_sset_count = i40e_get_sset_count, 2282 .get_ethtool_stats = i40e_get_ethtool_stats, 2283 .get_coalesce = i40e_get_coalesce, 2284 .set_coalesce = i40e_set_coalesce, 2285 .get_channels = i40e_get_channels, 2286 .set_channels = i40e_set_channels, 2287 .get_ts_info = i40e_get_ts_info, 2288 }; 2289 2290 void i40e_set_ethtool_ops(struct net_device *netdev) 2291 { 2292 netdev->ethtool_ops = &i40e_ethtool_ops; 2293 } 2294