1 /****************************************************************************** 2 3 Copyright (c) 2001-2012, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixgbe_type.h" 36 #include "ixgbe_82599.h" 37 #include "ixgbe_api.h" 38 #include "ixgbe_common.h" 39 #include "ixgbe_phy.h" 40 41 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 42 ixgbe_link_speed speed, 43 bool autoneg, 44 bool autoneg_wait_to_complete); 45 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 46 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 47 u16 offset, u16 *data); 48 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 49 u16 words, u16 *data); 50 51 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 52 { 53 struct ixgbe_mac_info *mac = &hw->mac; 54 55 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 56 57 /* enable the laser control functions for SFP+ fiber */ 58 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { 59 mac->ops.disable_tx_laser = 60 &ixgbe_disable_tx_laser_multispeed_fiber; 61 mac->ops.enable_tx_laser = 62 &ixgbe_enable_tx_laser_multispeed_fiber; 63 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 64 65 } else { 66 mac->ops.disable_tx_laser = NULL; 67 mac->ops.enable_tx_laser = NULL; 68 mac->ops.flap_tx_laser = NULL; 69 } 70 71 if (hw->phy.multispeed_fiber) { 72 /* Set up dual speed SFP+ support */ 73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 74 } else { 75 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && 76 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 77 hw->phy.smart_speed == ixgbe_smart_speed_on) && 78 !ixgbe_verify_lesm_fw_enabled_82599(hw)) { 79 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; 80 } else { 81 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; 82 } 83 } 84 } 85 86 /** 87 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 88 * @hw: pointer to hardware structure 89 * 90 * Initialize any function pointers that were not able to be 91 * set during init_shared_code because the PHY/SFP type was 92 * not known. Perform the SFP init if necessary. 93 * 94 **/ 95 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 96 { 97 struct ixgbe_mac_info *mac = &hw->mac; 98 struct ixgbe_phy_info *phy = &hw->phy; 99 s32 ret_val = IXGBE_SUCCESS; 100 101 DEBUGFUNC("ixgbe_init_phy_ops_82599"); 102 103 /* Identify the PHY or SFP module */ 104 ret_val = phy->ops.identify(hw); 105 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) 106 goto init_phy_ops_out; 107 108 /* Setup function pointers based on detected SFP module and speeds */ 109 ixgbe_init_mac_link_ops_82599(hw); 110 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) 111 hw->phy.ops.reset = NULL; 112 113 /* If copper media, overwrite with copper function pointers */ 114 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 115 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 116 mac->ops.get_link_capabilities = 117 &ixgbe_get_copper_link_capabilities_generic; 118 } 119 120 /* Set necessary function pointers based on phy type */ 121 switch (hw->phy.type) { 122 case ixgbe_phy_tn: 123 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 124 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 125 phy->ops.get_firmware_version = 126 &ixgbe_get_phy_firmware_version_tnx; 127 break; 128 default: 129 break; 130 } 131 init_phy_ops_out: 132 return ret_val; 133 } 134 135 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 136 { 137 s32 ret_val = IXGBE_SUCCESS; 138 u32 reg_anlp1 = 0; 139 u32 i = 0; 140 u16 list_offset, data_offset, data_value; 141 142 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); 143 144 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 145 ixgbe_init_mac_link_ops_82599(hw); 146 147 hw->phy.ops.reset = NULL; 148 149 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 150 &data_offset); 151 if (ret_val != IXGBE_SUCCESS) 152 goto setup_sfp_out; 153 154 /* PHY config will finish before releasing the semaphore */ 155 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 156 IXGBE_GSSR_MAC_CSR_SM); 157 if (ret_val != IXGBE_SUCCESS) { 158 ret_val = IXGBE_ERR_SWFW_SYNC; 159 goto setup_sfp_out; 160 } 161 162 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 163 while (data_value != 0xffff) { 164 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 165 IXGBE_WRITE_FLUSH(hw); 166 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 167 } 168 169 /* Release the semaphore */ 170 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 171 /* Delay obtaining semaphore again to allow FW access */ 172 msec_delay(hw->eeprom.semaphore_delay); 173 174 /* Now restart DSP by setting Restart_AN and clearing LMS */ 175 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, 176 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | 177 IXGBE_AUTOC_AN_RESTART)); 178 179 /* Wait for AN to leave state 0 */ 180 for (i = 0; i < 10; i++) { 181 msec_delay(4); 182 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); 183 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) 184 break; 185 } 186 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { 187 DEBUGOUT("sfp module setup not complete\n"); 188 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 189 goto setup_sfp_out; 190 } 191 192 /* Restart DSP by setting Restart_AN and return to SFI mode */ 193 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, 194 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | 195 IXGBE_AUTOC_AN_RESTART)); 196 } 197 198 setup_sfp_out: 199 return ret_val; 200 } 201 202 /** 203 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type 204 * @hw: pointer to hardware structure 205 * 206 * Initialize the function pointers and assign the MAC type for 82599. 207 * Does not touch the hardware. 208 **/ 209 210 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) 211 { 212 struct ixgbe_mac_info *mac = &hw->mac; 213 struct ixgbe_phy_info *phy = &hw->phy; 214 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 215 s32 ret_val; 216 217 DEBUGFUNC("ixgbe_init_ops_82599"); 218 219 ret_val = ixgbe_init_phy_ops_generic(hw); 220 ret_val = ixgbe_init_ops_generic(hw); 221 222 /* PHY */ 223 phy->ops.identify = &ixgbe_identify_phy_82599; 224 phy->ops.init = &ixgbe_init_phy_ops_82599; 225 226 /* MAC */ 227 mac->ops.reset_hw = &ixgbe_reset_hw_82599; 228 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2; 229 mac->ops.get_media_type = &ixgbe_get_media_type_82599; 230 mac->ops.get_supported_physical_layer = 231 &ixgbe_get_supported_physical_layer_82599; 232 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic; 233 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic; 234 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; 235 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; 236 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; 237 mac->ops.start_hw = &ixgbe_start_hw_82599; 238 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; 239 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; 240 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic; 241 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; 242 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; 243 244 /* RAR, Multicast, VLAN */ 245 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; 246 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; 247 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; 248 mac->rar_highwater = 1; 249 mac->ops.set_vfta = &ixgbe_set_vfta_generic; 250 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic; 251 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; 252 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; 253 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; 254 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing; 255 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing; 256 257 /* Link */ 258 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; 259 mac->ops.check_link = &ixgbe_check_mac_link_generic; 260 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic; 261 ixgbe_init_mac_link_ops_82599(hw); 262 263 mac->mcft_size = 128; 264 mac->vft_size = 128; 265 mac->num_rar_entries = 128; 266 mac->rx_pb_size = 512; 267 mac->max_tx_queues = 128; 268 mac->max_rx_queues = 128; 269 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 270 271 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) & 272 IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE; 273 274 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; 275 276 /* EEPROM */ 277 eeprom->ops.read = &ixgbe_read_eeprom_82599; 278 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599; 279 280 /* Manageability interface */ 281 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic; 282 283 284 return ret_val; 285 } 286 287 /** 288 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 289 * @hw: pointer to hardware structure 290 * @speed: pointer to link speed 291 * @negotiation: TRUE when autoneg or autotry is enabled 292 * 293 * Determines the link capabilities by reading the AUTOC register. 294 **/ 295 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 296 ixgbe_link_speed *speed, 297 bool *negotiation) 298 { 299 s32 status = IXGBE_SUCCESS; 300 u32 autoc = 0; 301 302 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); 303 304 305 /* Check if 1G SFP module. */ 306 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 307 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { 308 *speed = IXGBE_LINK_SPEED_1GB_FULL; 309 *negotiation = TRUE; 310 goto out; 311 } 312 313 /* 314 * Determine link capabilities based on the stored value of AUTOC, 315 * which represents EEPROM defaults. If AUTOC value has not 316 * been stored, use the current register values. 317 */ 318 if (hw->mac.orig_link_settings_stored) 319 autoc = hw->mac.orig_autoc; 320 else 321 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 322 323 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 324 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 325 *speed = IXGBE_LINK_SPEED_1GB_FULL; 326 *negotiation = FALSE; 327 break; 328 329 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 330 *speed = IXGBE_LINK_SPEED_10GB_FULL; 331 *negotiation = FALSE; 332 break; 333 334 case IXGBE_AUTOC_LMS_1G_AN: 335 *speed = IXGBE_LINK_SPEED_1GB_FULL; 336 *negotiation = TRUE; 337 break; 338 339 case IXGBE_AUTOC_LMS_10G_SERIAL: 340 *speed = IXGBE_LINK_SPEED_10GB_FULL; 341 *negotiation = FALSE; 342 break; 343 344 case IXGBE_AUTOC_LMS_KX4_KX_KR: 345 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 346 *speed = IXGBE_LINK_SPEED_UNKNOWN; 347 if (autoc & IXGBE_AUTOC_KR_SUPP) 348 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 349 if (autoc & IXGBE_AUTOC_KX4_SUPP) 350 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 351 if (autoc & IXGBE_AUTOC_KX_SUPP) 352 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 353 *negotiation = TRUE; 354 break; 355 356 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 357 *speed = IXGBE_LINK_SPEED_100_FULL; 358 if (autoc & IXGBE_AUTOC_KR_SUPP) 359 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 360 if (autoc & IXGBE_AUTOC_KX4_SUPP) 361 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 362 if (autoc & IXGBE_AUTOC_KX_SUPP) 363 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 364 *negotiation = TRUE; 365 break; 366 367 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 368 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 369 *negotiation = FALSE; 370 break; 371 372 default: 373 status = IXGBE_ERR_LINK_SETUP; 374 goto out; 375 break; 376 } 377 378 if (hw->phy.multispeed_fiber) { 379 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 380 IXGBE_LINK_SPEED_1GB_FULL; 381 *negotiation = TRUE; 382 } 383 384 out: 385 return status; 386 } 387 388 /** 389 * ixgbe_get_media_type_82599 - Get media type 390 * @hw: pointer to hardware structure 391 * 392 * Returns the media type (fiber, copper, backplane) 393 **/ 394 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 395 { 396 enum ixgbe_media_type media_type; 397 398 DEBUGFUNC("ixgbe_get_media_type_82599"); 399 400 /* Detect if there is a copper PHY attached. */ 401 switch (hw->phy.type) { 402 case ixgbe_phy_cu_unknown: 403 case ixgbe_phy_tn: 404 media_type = ixgbe_media_type_copper; 405 goto out; 406 default: 407 break; 408 } 409 410 switch (hw->device_id) { 411 case IXGBE_DEV_ID_82599_KX4: 412 case IXGBE_DEV_ID_82599_KX4_MEZZ: 413 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 414 case IXGBE_DEV_ID_82599_KR: 415 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: 416 case IXGBE_DEV_ID_82599_XAUI_LOM: 417 /* Default device ID is mezzanine card KX/KX4 */ 418 media_type = ixgbe_media_type_backplane; 419 break; 420 case IXGBE_DEV_ID_82599_SFP: 421 case IXGBE_DEV_ID_82599_SFP_FCOE: 422 case IXGBE_DEV_ID_82599_SFP_EM: 423 case IXGBE_DEV_ID_82599EN_SFP: 424 media_type = ixgbe_media_type_fiber; 425 break; 426 case IXGBE_DEV_ID_82599_CX4: 427 media_type = ixgbe_media_type_cx4; 428 break; 429 case IXGBE_DEV_ID_82599_T3_LOM: 430 media_type = ixgbe_media_type_copper; 431 break; 432 default: 433 media_type = ixgbe_media_type_unknown; 434 break; 435 } 436 out: 437 return media_type; 438 } 439 440 /** 441 * ixgbe_start_mac_link_82599 - Setup MAC link settings 442 * @hw: pointer to hardware structure 443 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 444 * 445 * Configures link settings based on values in the ixgbe_hw struct. 446 * Restarts the link. Performs autonegotiation if needed. 447 **/ 448 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 449 bool autoneg_wait_to_complete) 450 { 451 u32 autoc_reg; 452 u32 links_reg; 453 u32 i; 454 s32 status = IXGBE_SUCCESS; 455 456 DEBUGFUNC("ixgbe_start_mac_link_82599"); 457 458 459 /* Restart link */ 460 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 461 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 462 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 463 464 /* Only poll for autoneg to complete if specified to do so */ 465 if (autoneg_wait_to_complete) { 466 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 467 IXGBE_AUTOC_LMS_KX4_KX_KR || 468 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 469 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 470 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 471 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 472 links_reg = 0; /* Just in case Autoneg time = 0 */ 473 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 474 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 475 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 476 break; 477 msec_delay(100); 478 } 479 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 480 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 481 DEBUGOUT("Autoneg did not complete.\n"); 482 } 483 } 484 } 485 486 /* Add delay to filter out noises during initial link setup */ 487 msec_delay(50); 488 489 return status; 490 } 491 492 /** 493 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 494 * @hw: pointer to hardware structure 495 * 496 * The base drivers may require better control over SFP+ module 497 * PHY states. This includes selectively shutting down the Tx 498 * laser on the PHY, effectively halting physical link. 499 **/ 500 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 501 { 502 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 503 504 /* Disable tx laser; allow 100us to go dark per spec */ 505 esdp_reg |= IXGBE_ESDP_SDP3; 506 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 507 IXGBE_WRITE_FLUSH(hw); 508 usec_delay(100); 509 } 510 511 /** 512 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser 513 * @hw: pointer to hardware structure 514 * 515 * The base drivers may require better control over SFP+ module 516 * PHY states. This includes selectively turning on the Tx 517 * laser on the PHY, effectively starting physical link. 518 **/ 519 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 520 { 521 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 522 523 /* Enable tx laser; allow 100ms to light up */ 524 esdp_reg &= ~IXGBE_ESDP_SDP3; 525 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 526 IXGBE_WRITE_FLUSH(hw); 527 msec_delay(100); 528 } 529 530 /** 531 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 532 * @hw: pointer to hardware structure 533 * 534 * When the driver changes the link speeds that it can support, 535 * it sets autotry_restart to TRUE to indicate that we need to 536 * initiate a new autotry session with the link partner. To do 537 * so, we set the speed then disable and re-enable the tx laser, to 538 * alert the link partner that it also needs to restart autotry on its 539 * end. This is consistent with TRUE clause 37 autoneg, which also 540 * involves a loss of signal. 541 **/ 542 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 543 { 544 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); 545 546 if (hw->mac.autotry_restart) { 547 ixgbe_disable_tx_laser_multispeed_fiber(hw); 548 ixgbe_enable_tx_laser_multispeed_fiber(hw); 549 hw->mac.autotry_restart = FALSE; 550 } 551 } 552 553 /** 554 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 555 * @hw: pointer to hardware structure 556 * @speed: new link speed 557 * @autoneg: TRUE if autonegotiation enabled 558 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 559 * 560 * Set the link speed in the AUTOC register and restarts link. 561 **/ 562 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 563 ixgbe_link_speed speed, bool autoneg, 564 bool autoneg_wait_to_complete) 565 { 566 s32 status = IXGBE_SUCCESS; 567 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 568 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 569 u32 speedcnt = 0; 570 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 571 u32 i = 0; 572 bool link_up = FALSE; 573 bool negotiation; 574 575 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 576 577 /* Mask off requested but non-supported speeds */ 578 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); 579 if (status != IXGBE_SUCCESS) 580 return status; 581 582 speed &= link_speed; 583 584 /* 585 * Try each speed one by one, highest priority first. We do this in 586 * software because 10gb fiber doesn't support speed autonegotiation. 587 */ 588 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 589 speedcnt++; 590 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 591 592 /* If we already have link at this speed, just jump out */ 593 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 594 if (status != IXGBE_SUCCESS) 595 return status; 596 597 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 598 goto out; 599 600 /* Set the module link speed */ 601 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 602 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 603 IXGBE_WRITE_FLUSH(hw); 604 605 /* Allow module to change analog characteristics (1G->10G) */ 606 msec_delay(40); 607 608 status = ixgbe_setup_mac_link_82599(hw, 609 IXGBE_LINK_SPEED_10GB_FULL, 610 autoneg, 611 autoneg_wait_to_complete); 612 if (status != IXGBE_SUCCESS) 613 return status; 614 615 /* Flap the tx laser if it has not already been done */ 616 ixgbe_flap_tx_laser(hw); 617 618 /* 619 * Wait for the controller to acquire link. Per IEEE 802.3ap, 620 * Section 73.10.2, we may have to wait up to 500ms if KR is 621 * attempted. 82599 uses the same timing for 10g SFI. 622 */ 623 for (i = 0; i < 5; i++) { 624 /* Wait for the link partner to also set speed */ 625 msec_delay(100); 626 627 /* If we have link, just jump out */ 628 status = ixgbe_check_link(hw, &link_speed, 629 &link_up, FALSE); 630 if (status != IXGBE_SUCCESS) 631 return status; 632 633 if (link_up) 634 goto out; 635 } 636 } 637 638 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 639 speedcnt++; 640 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 641 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 642 643 /* If we already have link at this speed, just jump out */ 644 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 645 if (status != IXGBE_SUCCESS) 646 return status; 647 648 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 649 goto out; 650 651 /* Set the module link speed */ 652 esdp_reg &= ~IXGBE_ESDP_SDP5; 653 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 654 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 655 IXGBE_WRITE_FLUSH(hw); 656 657 /* Allow module to change analog characteristics (10G->1G) */ 658 msec_delay(40); 659 660 status = ixgbe_setup_mac_link_82599(hw, 661 IXGBE_LINK_SPEED_1GB_FULL, 662 autoneg, 663 autoneg_wait_to_complete); 664 if (status != IXGBE_SUCCESS) 665 return status; 666 667 /* Flap the tx laser if it has not already been done */ 668 ixgbe_flap_tx_laser(hw); 669 670 /* Wait for the link partner to also set speed */ 671 msec_delay(100); 672 673 /* If we have link, just jump out */ 674 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 675 if (status != IXGBE_SUCCESS) 676 return status; 677 678 if (link_up) 679 goto out; 680 } 681 682 /* 683 * We didn't get link. Configure back to the highest speed we tried, 684 * (if there was more than one). We call ourselves back with just the 685 * single highest speed that the user requested. 686 */ 687 if (speedcnt > 1) 688 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 689 highest_link_speed, autoneg, autoneg_wait_to_complete); 690 691 out: 692 /* Set autoneg_advertised value based on input link speed */ 693 hw->phy.autoneg_advertised = 0; 694 695 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 696 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 697 698 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 699 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 700 701 return status; 702 } 703 704 /** 705 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed 706 * @hw: pointer to hardware structure 707 * @speed: new link speed 708 * @autoneg: TRUE if autonegotiation enabled 709 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 710 * 711 * Implements the Intel SmartSpeed algorithm. 712 **/ 713 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 714 ixgbe_link_speed speed, bool autoneg, 715 bool autoneg_wait_to_complete) 716 { 717 s32 status = IXGBE_SUCCESS; 718 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 719 s32 i, j; 720 bool link_up = FALSE; 721 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 722 723 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); 724 725 /* Set autoneg_advertised value based on input link speed */ 726 hw->phy.autoneg_advertised = 0; 727 728 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 729 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 730 731 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 732 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 733 734 if (speed & IXGBE_LINK_SPEED_100_FULL) 735 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 736 737 /* 738 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the 739 * autoneg advertisement if link is unable to be established at the 740 * highest negotiated rate. This can sometimes happen due to integrity 741 * issues with the physical media connection. 742 */ 743 744 /* First, try to get link with full advertisement */ 745 hw->phy.smart_speed_active = FALSE; 746 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 747 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 748 autoneg_wait_to_complete); 749 if (status != IXGBE_SUCCESS) 750 goto out; 751 752 /* 753 * Wait for the controller to acquire link. Per IEEE 802.3ap, 754 * Section 73.10.2, we may have to wait up to 500ms if KR is 755 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per 756 * Table 9 in the AN MAS. 757 */ 758 for (i = 0; i < 5; i++) { 759 msec_delay(100); 760 761 /* If we have link, just jump out */ 762 status = ixgbe_check_link(hw, &link_speed, &link_up, 763 FALSE); 764 if (status != IXGBE_SUCCESS) 765 goto out; 766 767 if (link_up) 768 goto out; 769 } 770 } 771 772 /* 773 * We didn't get link. If we advertised KR plus one of KX4/KX 774 * (or BX4/BX), then disable KR and try again. 775 */ 776 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || 777 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) 778 goto out; 779 780 /* Turn SmartSpeed on to disable KR support */ 781 hw->phy.smart_speed_active = TRUE; 782 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 783 autoneg_wait_to_complete); 784 if (status != IXGBE_SUCCESS) 785 goto out; 786 787 /* 788 * Wait for the controller to acquire link. 600ms will allow for 789 * the AN link_fail_inhibit_timer as well for multiple cycles of 790 * parallel detect, both 10g and 1g. This allows for the maximum 791 * connect attempts as defined in the AN MAS table 73-7. 792 */ 793 for (i = 0; i < 6; i++) { 794 msec_delay(100); 795 796 /* If we have link, just jump out */ 797 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 798 if (status != IXGBE_SUCCESS) 799 goto out; 800 801 if (link_up) 802 goto out; 803 } 804 805 /* We didn't get link. Turn SmartSpeed back off. */ 806 hw->phy.smart_speed_active = FALSE; 807 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 808 autoneg_wait_to_complete); 809 810 out: 811 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 812 DEBUGOUT("Smartspeed has downgraded the link speed " 813 "from the maximum advertised\n"); 814 return status; 815 } 816 817 /** 818 * ixgbe_setup_mac_link_82599 - Set MAC link speed 819 * @hw: pointer to hardware structure 820 * @speed: new link speed 821 * @autoneg: TRUE if autonegotiation enabled 822 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 823 * 824 * Set the link speed in the AUTOC register and restarts link. 825 **/ 826 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 827 ixgbe_link_speed speed, bool autoneg, 828 bool autoneg_wait_to_complete) 829 { 830 s32 status = IXGBE_SUCCESS; 831 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 832 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 833 u32 start_autoc = autoc; 834 u32 orig_autoc = 0; 835 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 836 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 837 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 838 u32 links_reg; 839 u32 i; 840 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 841 842 DEBUGFUNC("ixgbe_setup_mac_link_82599"); 843 844 /* Check to see if speed passed in is supported. */ 845 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 846 if (status != IXGBE_SUCCESS) 847 goto out; 848 849 speed &= link_capabilities; 850 851 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 852 status = IXGBE_ERR_LINK_SETUP; 853 goto out; 854 } 855 856 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 857 if (hw->mac.orig_link_settings_stored) 858 orig_autoc = hw->mac.orig_autoc; 859 else 860 orig_autoc = autoc; 861 862 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 863 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 864 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 865 /* Set KX4/KX/KR support according to speed requested */ 866 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 867 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 868 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 869 autoc |= IXGBE_AUTOC_KX4_SUPP; 870 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && 871 (hw->phy.smart_speed_active == FALSE)) 872 autoc |= IXGBE_AUTOC_KR_SUPP; 873 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 874 autoc |= IXGBE_AUTOC_KX_SUPP; 875 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 876 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 877 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 878 /* Switch from 1G SFI to 10G SFI if requested */ 879 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 880 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 881 autoc &= ~IXGBE_AUTOC_LMS_MASK; 882 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 883 } 884 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 885 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 886 /* Switch from 10G SFI to 1G SFI if requested */ 887 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 888 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 889 autoc &= ~IXGBE_AUTOC_LMS_MASK; 890 if (autoneg) 891 autoc |= IXGBE_AUTOC_LMS_1G_AN; 892 else 893 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 894 } 895 } 896 897 if (autoc != start_autoc) { 898 /* Restart link */ 899 autoc |= IXGBE_AUTOC_AN_RESTART; 900 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 901 902 /* Only poll for autoneg to complete if specified to do so */ 903 if (autoneg_wait_to_complete) { 904 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 905 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 906 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 907 links_reg = 0; /*Just in case Autoneg time=0*/ 908 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 909 links_reg = 910 IXGBE_READ_REG(hw, IXGBE_LINKS); 911 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 912 break; 913 msec_delay(100); 914 } 915 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 916 status = 917 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 918 DEBUGOUT("Autoneg did not complete.\n"); 919 } 920 } 921 } 922 923 /* Add delay to filter out noises during initial link setup */ 924 msec_delay(50); 925 } 926 927 out: 928 return status; 929 } 930 931 /** 932 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field 933 * @hw: pointer to hardware structure 934 * @speed: new link speed 935 * @autoneg: TRUE if autonegotiation enabled 936 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 937 * 938 * Restarts link on PHY and MAC based on settings passed in. 939 **/ 940 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 941 ixgbe_link_speed speed, 942 bool autoneg, 943 bool autoneg_wait_to_complete) 944 { 945 s32 status; 946 947 DEBUGFUNC("ixgbe_setup_copper_link_82599"); 948 949 /* Setup the PHY according to input speed */ 950 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 951 autoneg_wait_to_complete); 952 /* Set up MAC */ 953 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 954 955 return status; 956 } 957 958 /** 959 * ixgbe_reset_hw_82599 - Perform hardware reset 960 * @hw: pointer to hardware structure 961 * 962 * Resets the hardware by resetting the transmit and receive units, masks 963 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 964 * reset. 965 **/ 966 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 967 { 968 ixgbe_link_speed link_speed; 969 s32 status; 970 u32 ctrl, i, autoc, autoc2; 971 bool link_up = FALSE; 972 973 DEBUGFUNC("ixgbe_reset_hw_82599"); 974 975 /* Call adapter stop to disable tx/rx and clear interrupts */ 976 status = hw->mac.ops.stop_adapter(hw); 977 if (status != IXGBE_SUCCESS) 978 goto reset_hw_out; 979 980 /* flush pending Tx transactions */ 981 ixgbe_clear_tx_pending(hw); 982 983 /* PHY ops must be identified and initialized prior to reset */ 984 985 /* Identify PHY and related function pointers */ 986 status = hw->phy.ops.init(hw); 987 988 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 989 goto reset_hw_out; 990 991 /* Setup SFP module if there is one present. */ 992 if (hw->phy.sfp_setup_needed) { 993 status = hw->mac.ops.setup_sfp(hw); 994 hw->phy.sfp_setup_needed = FALSE; 995 } 996 997 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 998 goto reset_hw_out; 999 1000 /* Reset PHY */ 1001 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) 1002 hw->phy.ops.reset(hw); 1003 1004 mac_reset_top: 1005 /* 1006 * Issue global reset to the MAC. Needs to be SW reset if link is up. 1007 * If link reset is used when link is up, it might reset the PHY when 1008 * mng is using it. If link is down or the flag to force full link 1009 * reset is set, then perform link reset. 1010 */ 1011 ctrl = IXGBE_CTRL_LNK_RST; 1012 if (!hw->force_full_reset) { 1013 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); 1014 if (link_up) 1015 ctrl = IXGBE_CTRL_RST; 1016 } 1017 1018 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); 1019 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 1020 IXGBE_WRITE_FLUSH(hw); 1021 1022 /* Poll for reset bit to self-clear indicating reset is complete */ 1023 for (i = 0; i < 10; i++) { 1024 usec_delay(1); 1025 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1026 if (!(ctrl & IXGBE_CTRL_RST_MASK)) 1027 break; 1028 } 1029 1030 if (ctrl & IXGBE_CTRL_RST_MASK) { 1031 status = IXGBE_ERR_RESET_FAILED; 1032 DEBUGOUT("Reset polling failed to complete.\n"); 1033 } 1034 1035 msec_delay(50); 1036 1037 /* 1038 * Double resets are required for recovery from certain error 1039 * conditions. Between resets, it is necessary to stall to allow time 1040 * for any pending HW events to complete. 1041 */ 1042 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 1043 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 1044 goto mac_reset_top; 1045 } 1046 1047 /* 1048 * Store the original AUTOC/AUTOC2 values if they have not been 1049 * stored off yet. Otherwise restore the stored original 1050 * values since the reset operation sets back to defaults. 1051 */ 1052 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1053 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1054 if (hw->mac.orig_link_settings_stored == FALSE) { 1055 hw->mac.orig_autoc = autoc; 1056 hw->mac.orig_autoc2 = autoc2; 1057 hw->mac.orig_link_settings_stored = TRUE; 1058 } else { 1059 if (autoc != hw->mac.orig_autoc) 1060 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | 1061 IXGBE_AUTOC_AN_RESTART)); 1062 1063 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1064 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1065 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1066 autoc2 |= (hw->mac.orig_autoc2 & 1067 IXGBE_AUTOC2_UPPER_MASK); 1068 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1069 } 1070 } 1071 1072 /* Store the permanent mac address */ 1073 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 1074 1075 /* 1076 * Store MAC address from RAR0, clear receive address registers, and 1077 * clear the multicast table. Also reset num_rar_entries to 128, 1078 * since we modify this value when programming the SAN MAC address. 1079 */ 1080 hw->mac.num_rar_entries = 128; 1081 hw->mac.ops.init_rx_addrs(hw); 1082 1083 /* Store the permanent SAN mac address */ 1084 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1085 1086 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1087 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 1088 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1089 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1090 1091 /* Reserve the last RAR for the SAN MAC address */ 1092 hw->mac.num_rar_entries--; 1093 } 1094 1095 /* Store the alternative WWNN/WWPN prefix */ 1096 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1097 &hw->mac.wwpn_prefix); 1098 1099 reset_hw_out: 1100 return status; 1101 } 1102 1103 /** 1104 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1105 * @hw: pointer to hardware structure 1106 **/ 1107 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1108 { 1109 int i; 1110 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1111 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1112 1113 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); 1114 1115 /* 1116 * Before starting reinitialization process, 1117 * FDIRCMD.CMD must be zero. 1118 */ 1119 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { 1120 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1121 IXGBE_FDIRCMD_CMD_MASK)) 1122 break; 1123 usec_delay(10); 1124 } 1125 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1126 DEBUGOUT("Flow Director previous command isn't complete, " 1127 "aborting table re-initialization.\n"); 1128 return IXGBE_ERR_FDIR_REINIT_FAILED; 1129 } 1130 1131 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1132 IXGBE_WRITE_FLUSH(hw); 1133 /* 1134 * 82599 adapters flow director init flow cannot be restarted, 1135 * Workaround 82599 silicon errata by performing the following steps 1136 * before re-writing the FDIRCTRL control register with the same value. 1137 * - write 1 to bit 8 of FDIRCMD register & 1138 * - write 0 to bit 8 of FDIRCMD register 1139 */ 1140 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1141 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1142 IXGBE_FDIRCMD_CLEARHT)); 1143 IXGBE_WRITE_FLUSH(hw); 1144 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1145 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1146 ~IXGBE_FDIRCMD_CLEARHT)); 1147 IXGBE_WRITE_FLUSH(hw); 1148 /* 1149 * Clear FDIR Hash register to clear any leftover hashes 1150 * waiting to be programmed. 1151 */ 1152 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); 1153 IXGBE_WRITE_FLUSH(hw); 1154 1155 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1156 IXGBE_WRITE_FLUSH(hw); 1157 1158 /* Poll init-done after we write FDIRCTRL register */ 1159 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1160 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1161 IXGBE_FDIRCTRL_INIT_DONE) 1162 break; 1163 usec_delay(10); 1164 } 1165 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1166 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1167 return IXGBE_ERR_FDIR_REINIT_FAILED; 1168 } 1169 1170 /* Clear FDIR statistics registers (read to clear) */ 1171 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); 1172 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); 1173 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 1174 IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 1175 IXGBE_READ_REG(hw, IXGBE_FDIRLEN); 1176 1177 return IXGBE_SUCCESS; 1178 } 1179 1180 /** 1181 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers 1182 * @hw: pointer to hardware structure 1183 * @fdirctrl: value to write to flow director control register 1184 **/ 1185 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1186 { 1187 int i; 1188 1189 DEBUGFUNC("ixgbe_fdir_enable_82599"); 1190 1191 /* Prime the keys for hashing */ 1192 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1193 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1194 1195 /* 1196 * Poll init-done after we write the register. Estimated times: 1197 * 10G: PBALLOC = 11b, timing is 60us 1198 * 1G: PBALLOC = 11b, timing is 600us 1199 * 100M: PBALLOC = 11b, timing is 6ms 1200 * 1201 * Multiple these timings by 4 if under full Rx load 1202 * 1203 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1204 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1205 * this might not finish in our poll time, but we can live with that 1206 * for now. 1207 */ 1208 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1209 IXGBE_WRITE_FLUSH(hw); 1210 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1211 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1212 IXGBE_FDIRCTRL_INIT_DONE) 1213 break; 1214 msec_delay(1); 1215 } 1216 1217 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1218 DEBUGOUT("Flow Director poll time exceeded!\n"); 1219 } 1220 1221 /** 1222 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1223 * @hw: pointer to hardware structure 1224 * @fdirctrl: value to write to flow director control register, initially 1225 * contains just the value of the Rx packet buffer allocation 1226 **/ 1227 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1228 { 1229 DEBUGFUNC("ixgbe_init_fdir_signature_82599"); 1230 1231 /* 1232 * Continue setup of fdirctrl register bits: 1233 * Move the flexible bytes to use the ethertype - shift 6 words 1234 * Set the maximum length per hash bucket to 0xA filters 1235 * Send interrupt when 64 filters are left 1236 */ 1237 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1238 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1239 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1240 1241 /* write hashes and fdirctrl register, poll for completion */ 1242 ixgbe_fdir_enable_82599(hw, fdirctrl); 1243 1244 return IXGBE_SUCCESS; 1245 } 1246 1247 /** 1248 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1249 * @hw: pointer to hardware structure 1250 * @fdirctrl: value to write to flow director control register, initially 1251 * contains just the value of the Rx packet buffer allocation 1252 **/ 1253 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1254 { 1255 DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); 1256 1257 /* 1258 * Continue setup of fdirctrl register bits: 1259 * Turn perfect match filtering on 1260 * Report hash in RSS field of Rx wb descriptor 1261 * Initialize the drop queue 1262 * Move the flexible bytes to use the ethertype - shift 6 words 1263 * Set the maximum length per hash bucket to 0xA filters 1264 * Send interrupt when 64 (0x4 * 16) filters are left 1265 */ 1266 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | 1267 IXGBE_FDIRCTRL_REPORT_STATUS | 1268 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | 1269 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1270 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1271 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1272 1273 /* write hashes and fdirctrl register, poll for completion */ 1274 ixgbe_fdir_enable_82599(hw, fdirctrl); 1275 1276 return IXGBE_SUCCESS; 1277 } 1278 1279 /* 1280 * These defines allow us to quickly generate all of the necessary instructions 1281 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION 1282 * for values 0 through 15 1283 */ 1284 #define IXGBE_ATR_COMMON_HASH_KEY \ 1285 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) 1286 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ 1287 do { \ 1288 u32 n = (_n); \ 1289 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ 1290 common_hash ^= lo_hash_dword >> n; \ 1291 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1292 bucket_hash ^= lo_hash_dword >> n; \ 1293 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ 1294 sig_hash ^= lo_hash_dword << (16 - n); \ 1295 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ 1296 common_hash ^= hi_hash_dword >> n; \ 1297 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1298 bucket_hash ^= hi_hash_dword >> n; \ 1299 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1300 sig_hash ^= hi_hash_dword << (16 - n); \ 1301 } while (0); 1302 1303 /** 1304 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1305 * @stream: input bitstream to compute the hash on 1306 * 1307 * This function is almost identical to the function above but contains 1308 * several optomizations such as unwinding all of the loops, letting the 1309 * compiler work out all of the conditional ifs since the keys are static 1310 * defines, and computing two keys at once since the hashed dword stream 1311 * will be the same for both keys. 1312 **/ 1313 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 1314 union ixgbe_atr_hash_dword common) 1315 { 1316 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1317 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; 1318 1319 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1320 flow_vm_vlan = IXGBE_NTOHL(input.dword); 1321 1322 /* generate common hash dword */ 1323 hi_hash_dword = IXGBE_NTOHL(common.dword); 1324 1325 /* low dword is word swapped version of common */ 1326 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1327 1328 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1329 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1330 1331 /* Process bits 0 and 16 */ 1332 IXGBE_COMPUTE_SIG_HASH_ITERATION(0); 1333 1334 /* 1335 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1336 * delay this because bit 0 of the stream should not be processed 1337 * so we do not add the vlan until after bit 0 was processed 1338 */ 1339 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1340 1341 /* Process remaining 30 bit of the key */ 1342 IXGBE_COMPUTE_SIG_HASH_ITERATION(1); 1343 IXGBE_COMPUTE_SIG_HASH_ITERATION(2); 1344 IXGBE_COMPUTE_SIG_HASH_ITERATION(3); 1345 IXGBE_COMPUTE_SIG_HASH_ITERATION(4); 1346 IXGBE_COMPUTE_SIG_HASH_ITERATION(5); 1347 IXGBE_COMPUTE_SIG_HASH_ITERATION(6); 1348 IXGBE_COMPUTE_SIG_HASH_ITERATION(7); 1349 IXGBE_COMPUTE_SIG_HASH_ITERATION(8); 1350 IXGBE_COMPUTE_SIG_HASH_ITERATION(9); 1351 IXGBE_COMPUTE_SIG_HASH_ITERATION(10); 1352 IXGBE_COMPUTE_SIG_HASH_ITERATION(11); 1353 IXGBE_COMPUTE_SIG_HASH_ITERATION(12); 1354 IXGBE_COMPUTE_SIG_HASH_ITERATION(13); 1355 IXGBE_COMPUTE_SIG_HASH_ITERATION(14); 1356 IXGBE_COMPUTE_SIG_HASH_ITERATION(15); 1357 1358 /* combine common_hash result with signature and bucket hashes */ 1359 bucket_hash ^= common_hash; 1360 bucket_hash &= IXGBE_ATR_HASH_MASK; 1361 1362 sig_hash ^= common_hash << 16; 1363 sig_hash &= IXGBE_ATR_HASH_MASK << 16; 1364 1365 /* return completed signature hash */ 1366 return sig_hash ^ bucket_hash; 1367 } 1368 1369 /** 1370 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1371 * @hw: pointer to hardware structure 1372 * @input: unique input dword 1373 * @common: compressed common input dword 1374 * @queue: queue index to direct traffic to 1375 **/ 1376 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1377 union ixgbe_atr_hash_dword input, 1378 union ixgbe_atr_hash_dword common, 1379 u8 queue) 1380 { 1381 u64 fdirhashcmd; 1382 u32 fdircmd; 1383 1384 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); 1385 1386 /* 1387 * Get the flow_type in order to program FDIRCMD properly 1388 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 1389 */ 1390 switch (input.formatted.flow_type) { 1391 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1392 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1393 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1394 case IXGBE_ATR_FLOW_TYPE_TCPV6: 1395 case IXGBE_ATR_FLOW_TYPE_UDPV6: 1396 case IXGBE_ATR_FLOW_TYPE_SCTPV6: 1397 break; 1398 default: 1399 DEBUGOUT(" Error on flow type input\n"); 1400 return IXGBE_ERR_CONFIG; 1401 } 1402 1403 /* configure FDIRCMD register */ 1404 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1405 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1406 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1407 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1408 1409 /* 1410 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1411 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1412 */ 1413 fdirhashcmd = (u64)fdircmd << 32; 1414 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); 1415 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1416 1417 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); 1418 1419 return IXGBE_SUCCESS; 1420 } 1421 1422 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ 1423 do { \ 1424 u32 n = (_n); \ 1425 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1426 bucket_hash ^= lo_hash_dword >> n; \ 1427 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1428 bucket_hash ^= hi_hash_dword >> n; \ 1429 } while (0); 1430 1431 /** 1432 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash 1433 * @atr_input: input bitstream to compute the hash on 1434 * @input_mask: mask for the input bitstream 1435 * 1436 * This function serves two main purposes. First it applys the input_mask 1437 * to the atr_input resulting in a cleaned up atr_input data stream. 1438 * Secondly it computes the hash and stores it in the bkt_hash field at 1439 * the end of the input byte stream. This way it will be available for 1440 * future use without needing to recompute the hash. 1441 **/ 1442 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 1443 union ixgbe_atr_input *input_mask) 1444 { 1445 1446 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1447 u32 bucket_hash = 0; 1448 1449 /* Apply masks to input data */ 1450 input->dword_stream[0] &= input_mask->dword_stream[0]; 1451 input->dword_stream[1] &= input_mask->dword_stream[1]; 1452 input->dword_stream[2] &= input_mask->dword_stream[2]; 1453 input->dword_stream[3] &= input_mask->dword_stream[3]; 1454 input->dword_stream[4] &= input_mask->dword_stream[4]; 1455 input->dword_stream[5] &= input_mask->dword_stream[5]; 1456 input->dword_stream[6] &= input_mask->dword_stream[6]; 1457 input->dword_stream[7] &= input_mask->dword_stream[7]; 1458 input->dword_stream[8] &= input_mask->dword_stream[8]; 1459 input->dword_stream[9] &= input_mask->dword_stream[9]; 1460 input->dword_stream[10] &= input_mask->dword_stream[10]; 1461 1462 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1463 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); 1464 1465 /* generate common hash dword */ 1466 hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^ 1467 input->dword_stream[2] ^ 1468 input->dword_stream[3] ^ 1469 input->dword_stream[4] ^ 1470 input->dword_stream[5] ^ 1471 input->dword_stream[6] ^ 1472 input->dword_stream[7] ^ 1473 input->dword_stream[8] ^ 1474 input->dword_stream[9] ^ 1475 input->dword_stream[10]); 1476 1477 /* low dword is word swapped version of common */ 1478 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1479 1480 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1481 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1482 1483 /* Process bits 0 and 16 */ 1484 IXGBE_COMPUTE_BKT_HASH_ITERATION(0); 1485 1486 /* 1487 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1488 * delay this because bit 0 of the stream should not be processed 1489 * so we do not add the vlan until after bit 0 was processed 1490 */ 1491 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1492 1493 /* Process remaining 30 bit of the key */ 1494 IXGBE_COMPUTE_BKT_HASH_ITERATION(1); 1495 IXGBE_COMPUTE_BKT_HASH_ITERATION(2); 1496 IXGBE_COMPUTE_BKT_HASH_ITERATION(3); 1497 IXGBE_COMPUTE_BKT_HASH_ITERATION(4); 1498 IXGBE_COMPUTE_BKT_HASH_ITERATION(5); 1499 IXGBE_COMPUTE_BKT_HASH_ITERATION(6); 1500 IXGBE_COMPUTE_BKT_HASH_ITERATION(7); 1501 IXGBE_COMPUTE_BKT_HASH_ITERATION(8); 1502 IXGBE_COMPUTE_BKT_HASH_ITERATION(9); 1503 IXGBE_COMPUTE_BKT_HASH_ITERATION(10); 1504 IXGBE_COMPUTE_BKT_HASH_ITERATION(11); 1505 IXGBE_COMPUTE_BKT_HASH_ITERATION(12); 1506 IXGBE_COMPUTE_BKT_HASH_ITERATION(13); 1507 IXGBE_COMPUTE_BKT_HASH_ITERATION(14); 1508 IXGBE_COMPUTE_BKT_HASH_ITERATION(15); 1509 1510 /* 1511 * Limit hash to 13 bits since max bucket count is 8K. 1512 * Store result at the end of the input stream. 1513 */ 1514 input->formatted.bkt_hash = bucket_hash & 0x1FFF; 1515 } 1516 1517 /** 1518 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks 1519 * @input_mask: mask to be bit swapped 1520 * 1521 * The source and destination port masks for flow director are bit swapped 1522 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to 1523 * generate a correctly swapped value we need to bit swap the mask and that 1524 * is what is accomplished by this function. 1525 **/ 1526 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) 1527 { 1528 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); 1529 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1530 mask |= IXGBE_NTOHS(input_mask->formatted.src_port); 1531 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1532 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1533 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1534 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); 1535 } 1536 1537 /* 1538 * These two macros are meant to address the fact that we have registers 1539 * that are either all or in part big-endian. As a result on big-endian 1540 * systems we will end up byte swapping the value to little-endian before 1541 * it is byte swapped again and written to the hardware in the original 1542 * big-endian format. 1543 */ 1544 #define IXGBE_STORE_AS_BE32(_value) \ 1545 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ 1546 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) 1547 1548 #define IXGBE_WRITE_REG_BE32(a, reg, value) \ 1549 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) 1550 1551 #define IXGBE_STORE_AS_BE16(_value) \ 1552 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) 1553 1554 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, 1555 union ixgbe_atr_input *input_mask) 1556 { 1557 /* mask IPv6 since it is currently not supported */ 1558 u32 fdirm = IXGBE_FDIRM_DIPv6; 1559 u32 fdirtcpm; 1560 1561 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); 1562 1563 /* 1564 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1565 * are zero, then assume a full mask for that field. Also assume that 1566 * a VLAN of 0 is unspecified, so mask that out as well. L4type 1567 * cannot be masked out in this implementation. 1568 * 1569 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1570 * point in time. 1571 */ 1572 1573 /* verify bucket hash is cleared on hash generation */ 1574 if (input_mask->formatted.bkt_hash) 1575 DEBUGOUT(" bucket hash should always be 0 in mask\n"); 1576 1577 /* Program FDIRM and verify partial masks */ 1578 switch (input_mask->formatted.vm_pool & 0x7F) { 1579 case 0x0: 1580 fdirm |= IXGBE_FDIRM_POOL; 1581 case 0x7F: 1582 break; 1583 default: 1584 DEBUGOUT(" Error on vm pool mask\n"); 1585 return IXGBE_ERR_CONFIG; 1586 } 1587 1588 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { 1589 case 0x0: 1590 fdirm |= IXGBE_FDIRM_L4P; 1591 if (input_mask->formatted.dst_port || 1592 input_mask->formatted.src_port) { 1593 DEBUGOUT(" Error on src/dst port mask\n"); 1594 return IXGBE_ERR_CONFIG; 1595 } 1596 case IXGBE_ATR_L4TYPE_MASK: 1597 break; 1598 default: 1599 DEBUGOUT(" Error on flow type mask\n"); 1600 return IXGBE_ERR_CONFIG; 1601 } 1602 1603 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { 1604 case 0x0000: 1605 /* mask VLAN ID, fall through to mask VLAN priority */ 1606 fdirm |= IXGBE_FDIRM_VLANID; 1607 case 0x0FFF: 1608 /* mask VLAN priority */ 1609 fdirm |= IXGBE_FDIRM_VLANP; 1610 break; 1611 case 0xE000: 1612 /* mask VLAN ID only, fall through */ 1613 fdirm |= IXGBE_FDIRM_VLANID; 1614 case 0xEFFF: 1615 /* no VLAN fields masked */ 1616 break; 1617 default: 1618 DEBUGOUT(" Error on VLAN mask\n"); 1619 return IXGBE_ERR_CONFIG; 1620 } 1621 1622 switch (input_mask->formatted.flex_bytes & 0xFFFF) { 1623 case 0x0000: 1624 /* Mask Flex Bytes, fall through */ 1625 fdirm |= IXGBE_FDIRM_FLEX; 1626 case 0xFFFF: 1627 break; 1628 default: 1629 DEBUGOUT(" Error on flexible byte mask\n"); 1630 return IXGBE_ERR_CONFIG; 1631 } 1632 1633 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1634 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1635 1636 /* store the TCP/UDP port masks, bit reversed from port layout */ 1637 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); 1638 1639 /* write both the same so that UDP and TCP use the same mask */ 1640 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1641 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); 1642 1643 /* store source and destination IP masks (big-enian) */ 1644 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1645 ~input_mask->formatted.src_ip[0]); 1646 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1647 ~input_mask->formatted.dst_ip[0]); 1648 1649 return IXGBE_SUCCESS; 1650 } 1651 1652 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, 1653 union ixgbe_atr_input *input, 1654 u16 soft_id, u8 queue) 1655 { 1656 u32 fdirport, fdirvlan, fdirhash, fdircmd; 1657 1658 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); 1659 1660 /* currently IPv6 is not supported, must be programmed with 0 */ 1661 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), 1662 input->formatted.src_ip[0]); 1663 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), 1664 input->formatted.src_ip[1]); 1665 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), 1666 input->formatted.src_ip[2]); 1667 1668 /* record the source address (big-endian) */ 1669 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); 1670 1671 /* record the first 32 bits of the destination address (big-endian) */ 1672 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); 1673 1674 /* record source and destination port (little-endian)*/ 1675 fdirport = IXGBE_NTOHS(input->formatted.dst_port); 1676 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; 1677 fdirport |= IXGBE_NTOHS(input->formatted.src_port); 1678 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1679 1680 /* record vlan (little-endian) and flex_bytes(big-endian) */ 1681 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); 1682 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1683 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); 1684 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1685 1686 /* configure FDIRHASH register */ 1687 fdirhash = input->formatted.bkt_hash; 1688 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1689 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1690 1691 /* 1692 * flush all previous writes to make certain registers are 1693 * programmed prior to issuing the command 1694 */ 1695 IXGBE_WRITE_FLUSH(hw); 1696 1697 /* configure FDIRCMD register */ 1698 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1699 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1700 if (queue == IXGBE_FDIR_DROP_QUEUE) 1701 fdircmd |= IXGBE_FDIRCMD_DROP; 1702 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1703 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1704 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; 1705 1706 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1707 1708 return IXGBE_SUCCESS; 1709 } 1710 1711 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, 1712 union ixgbe_atr_input *input, 1713 u16 soft_id) 1714 { 1715 u32 fdirhash; 1716 u32 fdircmd = 0; 1717 u32 retry_count; 1718 s32 err = IXGBE_SUCCESS; 1719 1720 /* configure FDIRHASH register */ 1721 fdirhash = input->formatted.bkt_hash; 1722 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1723 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1724 1725 /* flush hash to HW */ 1726 IXGBE_WRITE_FLUSH(hw); 1727 1728 /* Query if filter is present */ 1729 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); 1730 1731 for (retry_count = 10; retry_count; retry_count--) { 1732 /* allow 10us for query to process */ 1733 usec_delay(10); 1734 /* verify query completed successfully */ 1735 fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); 1736 if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) 1737 break; 1738 } 1739 1740 if (!retry_count) 1741 err = IXGBE_ERR_FDIR_REINIT_FAILED; 1742 1743 /* if filter exists in hardware then remove it */ 1744 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { 1745 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1746 IXGBE_WRITE_FLUSH(hw); 1747 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1748 IXGBE_FDIRCMD_CMD_REMOVE_FLOW); 1749 } 1750 1751 return err; 1752 } 1753 1754 /** 1755 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1756 * @hw: pointer to hardware structure 1757 * @input: input bitstream 1758 * @input_mask: mask for the input bitstream 1759 * @soft_id: software index for the filters 1760 * @queue: queue index to direct traffic to 1761 * 1762 * Note that the caller to this function must lock before calling, since the 1763 * hardware writes must be protected from one another. 1764 **/ 1765 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1766 union ixgbe_atr_input *input, 1767 union ixgbe_atr_input *input_mask, 1768 u16 soft_id, u8 queue) 1769 { 1770 s32 err = IXGBE_ERR_CONFIG; 1771 1772 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); 1773 1774 /* 1775 * Check flow_type formatting, and bail out before we touch the hardware 1776 * if there's a configuration issue 1777 */ 1778 switch (input->formatted.flow_type) { 1779 case IXGBE_ATR_FLOW_TYPE_IPV4: 1780 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; 1781 if (input->formatted.dst_port || input->formatted.src_port) { 1782 DEBUGOUT(" Error on src/dst port\n"); 1783 return IXGBE_ERR_CONFIG; 1784 } 1785 break; 1786 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1787 if (input->formatted.dst_port || input->formatted.src_port) { 1788 DEBUGOUT(" Error on src/dst port\n"); 1789 return IXGBE_ERR_CONFIG; 1790 } 1791 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1792 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1793 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 1794 IXGBE_ATR_L4TYPE_MASK; 1795 break; 1796 default: 1797 DEBUGOUT(" Error on flow type input\n"); 1798 return err; 1799 } 1800 1801 /* program input mask into the HW */ 1802 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask); 1803 if (err) 1804 return err; 1805 1806 /* apply mask and compute/store hash */ 1807 ixgbe_atr_compute_perfect_hash_82599(input, input_mask); 1808 1809 /* program filters to filter memory */ 1810 return ixgbe_fdir_write_perfect_filter_82599(hw, input, 1811 soft_id, queue); 1812 } 1813 1814 /** 1815 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1816 * @hw: pointer to hardware structure 1817 * @reg: analog register to read 1818 * @val: read value 1819 * 1820 * Performs read operation to Omer analog register specified. 1821 **/ 1822 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) 1823 { 1824 u32 core_ctl; 1825 1826 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); 1827 1828 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 1829 (reg << 8)); 1830 IXGBE_WRITE_FLUSH(hw); 1831 usec_delay(10); 1832 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 1833 *val = (u8)core_ctl; 1834 1835 return IXGBE_SUCCESS; 1836 } 1837 1838 /** 1839 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 1840 * @hw: pointer to hardware structure 1841 * @reg: atlas register to write 1842 * @val: value to write 1843 * 1844 * Performs write operation to Omer analog register specified. 1845 **/ 1846 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) 1847 { 1848 u32 core_ctl; 1849 1850 DEBUGFUNC("ixgbe_write_analog_reg8_82599"); 1851 1852 core_ctl = (reg << 8) | val; 1853 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 1854 IXGBE_WRITE_FLUSH(hw); 1855 usec_delay(10); 1856 1857 return IXGBE_SUCCESS; 1858 } 1859 1860 /** 1861 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx 1862 * @hw: pointer to hardware structure 1863 * 1864 * Starts the hardware using the generic start_hw function 1865 * and the generation start_hw function. 1866 * Then performs revision-specific operations, if any. 1867 **/ 1868 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) 1869 { 1870 s32 ret_val = IXGBE_SUCCESS; 1871 1872 DEBUGFUNC("ixgbe_start_hw_82599"); 1873 1874 ret_val = ixgbe_start_hw_generic(hw); 1875 if (ret_val != IXGBE_SUCCESS) 1876 goto out; 1877 1878 ret_val = ixgbe_start_hw_gen2(hw); 1879 if (ret_val != IXGBE_SUCCESS) 1880 goto out; 1881 1882 /* We need to run link autotry after the driver loads */ 1883 hw->mac.autotry_restart = TRUE; 1884 1885 if (ret_val == IXGBE_SUCCESS) 1886 ret_val = ixgbe_verify_fw_version_82599(hw); 1887 out: 1888 return ret_val; 1889 } 1890 1891 /** 1892 * ixgbe_identify_phy_82599 - Get physical layer module 1893 * @hw: pointer to hardware structure 1894 * 1895 * Determines the physical layer module found on the current adapter. 1896 * If PHY already detected, maintains current PHY type in hw struct, 1897 * otherwise executes the PHY detection routine. 1898 **/ 1899 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1900 { 1901 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1902 1903 DEBUGFUNC("ixgbe_identify_phy_82599"); 1904 1905 /* Detect PHY if not unknown - returns success if already detected. */ 1906 status = ixgbe_identify_phy_generic(hw); 1907 if (status != IXGBE_SUCCESS) { 1908 /* 82599 10GBASE-T requires an external PHY */ 1909 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 1910 goto out; 1911 else 1912 status = ixgbe_identify_module_generic(hw); 1913 } 1914 1915 /* Set PHY type none if no PHY detected */ 1916 if (hw->phy.type == ixgbe_phy_unknown) { 1917 hw->phy.type = ixgbe_phy_none; 1918 status = IXGBE_SUCCESS; 1919 } 1920 1921 /* Return error if SFP module has been detected but is not supported */ 1922 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 1923 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1924 1925 out: 1926 return status; 1927 } 1928 1929 /** 1930 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type 1931 * @hw: pointer to hardware structure 1932 * 1933 * Determines physical layer capabilities of the current configuration. 1934 **/ 1935 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 1936 { 1937 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1938 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1939 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1940 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 1941 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1942 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1943 u16 ext_ability = 0; 1944 u8 comp_codes_10g = 0; 1945 u8 comp_codes_1g = 0; 1946 1947 DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); 1948 1949 hw->phy.ops.identify(hw); 1950 1951 switch (hw->phy.type) { 1952 case ixgbe_phy_tn: 1953 case ixgbe_phy_cu_unknown: 1954 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 1955 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 1956 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 1957 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1958 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 1959 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1960 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 1961 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1962 goto out; 1963 default: 1964 break; 1965 } 1966 1967 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1968 case IXGBE_AUTOC_LMS_1G_AN: 1969 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1970 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { 1971 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | 1972 IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1973 goto out; 1974 } else 1975 /* SFI mode so read SFP module */ 1976 goto sfp_check; 1977 break; 1978 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1979 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) 1980 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1981 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) 1982 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1983 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) 1984 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; 1985 goto out; 1986 break; 1987 case IXGBE_AUTOC_LMS_10G_SERIAL: 1988 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { 1989 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; 1990 goto out; 1991 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) 1992 goto sfp_check; 1993 break; 1994 case IXGBE_AUTOC_LMS_KX4_KX_KR: 1995 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 1996 if (autoc & IXGBE_AUTOC_KX_SUPP) 1997 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1998 if (autoc & IXGBE_AUTOC_KX4_SUPP) 1999 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2000 if (autoc & IXGBE_AUTOC_KR_SUPP) 2001 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2002 goto out; 2003 break; 2004 default: 2005 goto out; 2006 break; 2007 } 2008 2009 sfp_check: 2010 /* SFP check must be done last since DA modules are sometimes used to 2011 * test KR mode - we need to id KR mode correctly before SFP module. 2012 * Call identify_sfp because the pluggable module may have changed */ 2013 hw->phy.ops.identify_sfp(hw); 2014 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) 2015 goto out; 2016 2017 switch (hw->phy.type) { 2018 case ixgbe_phy_sfp_passive_tyco: 2019 case ixgbe_phy_sfp_passive_unknown: 2020 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 2021 break; 2022 case ixgbe_phy_sfp_ftl_active: 2023 case ixgbe_phy_sfp_active_unknown: 2024 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; 2025 break; 2026 case ixgbe_phy_sfp_avago: 2027 case ixgbe_phy_sfp_ftl: 2028 case ixgbe_phy_sfp_intel: 2029 case ixgbe_phy_sfp_unknown: 2030 hw->phy.ops.read_i2c_eeprom(hw, 2031 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); 2032 hw->phy.ops.read_i2c_eeprom(hw, 2033 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); 2034 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 2035 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 2036 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 2037 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 2038 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) 2039 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; 2040 break; 2041 default: 2042 break; 2043 } 2044 2045 out: 2046 return physical_layer; 2047 } 2048 2049 /** 2050 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 2051 * @hw: pointer to hardware structure 2052 * @regval: register value to write to RXCTRL 2053 * 2054 * Enables the Rx DMA unit for 82599 2055 **/ 2056 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 2057 { 2058 2059 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); 2060 2061 /* 2062 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2063 * If traffic is incoming before we enable the Rx unit, it could hang 2064 * the Rx DMA unit. Therefore, make sure the security engine is 2065 * completely disabled prior to enabling the Rx unit. 2066 */ 2067 2068 hw->mac.ops.disable_sec_rx_path(hw); 2069 2070 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2071 2072 hw->mac.ops.enable_sec_rx_path(hw); 2073 2074 return IXGBE_SUCCESS; 2075 } 2076 2077 /** 2078 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 2079 * @hw: pointer to hardware structure 2080 * 2081 * Verifies that installed the firmware version is 0.6 or higher 2082 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. 2083 * 2084 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or 2085 * if the FW version is not supported. 2086 **/ 2087 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) 2088 { 2089 s32 status = IXGBE_ERR_EEPROM_VERSION; 2090 u16 fw_offset, fw_ptp_cfg_offset; 2091 u16 fw_version = 0; 2092 2093 DEBUGFUNC("ixgbe_verify_fw_version_82599"); 2094 2095 /* firmware check is only necessary for SFI devices */ 2096 if (hw->phy.media_type != ixgbe_media_type_fiber) { 2097 status = IXGBE_SUCCESS; 2098 goto fw_version_out; 2099 } 2100 2101 /* get the offset to the Firmware Module block */ 2102 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2103 2104 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2105 goto fw_version_out; 2106 2107 /* get the offset to the Pass Through Patch Configuration block */ 2108 hw->eeprom.ops.read(hw, (fw_offset + 2109 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), 2110 &fw_ptp_cfg_offset); 2111 2112 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2113 goto fw_version_out; 2114 2115 /* get the firmware version */ 2116 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + 2117 IXGBE_FW_PATCH_VERSION_4), &fw_version); 2118 2119 if (fw_version > 0x5) 2120 status = IXGBE_SUCCESS; 2121 2122 fw_version_out: 2123 return status; 2124 } 2125 2126 /** 2127 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. 2128 * @hw: pointer to hardware structure 2129 * 2130 * Returns TRUE if the LESM FW module is present and enabled. Otherwise 2131 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled. 2132 **/ 2133 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2134 { 2135 bool lesm_enabled = FALSE; 2136 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2137 s32 status; 2138 2139 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); 2140 2141 /* get the offset to the Firmware Module block */ 2142 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2143 2144 if ((status != IXGBE_SUCCESS) || 2145 (fw_offset == 0) || (fw_offset == 0xFFFF)) 2146 goto out; 2147 2148 /* get the offset to the LESM Parameters block */ 2149 status = hw->eeprom.ops.read(hw, (fw_offset + 2150 IXGBE_FW_LESM_PARAMETERS_PTR), 2151 &fw_lesm_param_offset); 2152 2153 if ((status != IXGBE_SUCCESS) || 2154 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) 2155 goto out; 2156 2157 /* get the lesm state word */ 2158 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + 2159 IXGBE_FW_LESM_STATE_1), 2160 &fw_lesm_state); 2161 2162 if ((status == IXGBE_SUCCESS) && 2163 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) 2164 lesm_enabled = TRUE; 2165 2166 out: 2167 return lesm_enabled; 2168 } 2169 2170 /** 2171 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using 2172 * fastest available method 2173 * 2174 * @hw: pointer to hardware structure 2175 * @offset: offset of word in EEPROM to read 2176 * @words: number of words 2177 * @data: word(s) read from the EEPROM 2178 * 2179 * Retrieves 16 bit word(s) read from EEPROM 2180 **/ 2181 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 2182 u16 words, u16 *data) 2183 { 2184 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2185 s32 ret_val = IXGBE_ERR_CONFIG; 2186 2187 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); 2188 2189 /* 2190 * If EEPROM is detected and can be addressed using 14 bits, 2191 * use EERD otherwise use bit bang 2192 */ 2193 if ((eeprom->type == ixgbe_eeprom_spi) && 2194 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) 2195 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, 2196 data); 2197 else 2198 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, 2199 words, 2200 data); 2201 2202 return ret_val; 2203 } 2204 2205 /** 2206 * ixgbe_read_eeprom_82599 - Read EEPROM word using 2207 * fastest available method 2208 * 2209 * @hw: pointer to hardware structure 2210 * @offset: offset of word in the EEPROM to read 2211 * @data: word read from the EEPROM 2212 * 2213 * Reads a 16 bit word from the EEPROM 2214 **/ 2215 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 2216 u16 offset, u16 *data) 2217 { 2218 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2219 s32 ret_val = IXGBE_ERR_CONFIG; 2220 2221 DEBUGFUNC("ixgbe_read_eeprom_82599"); 2222 2223 /* 2224 * If EEPROM is detected and can be addressed using 14 bits, 2225 * use EERD otherwise use bit bang 2226 */ 2227 if ((eeprom->type == ixgbe_eeprom_spi) && 2228 (offset <= IXGBE_EERD_MAX_ADDR)) 2229 ret_val = ixgbe_read_eerd_generic(hw, offset, data); 2230 else 2231 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); 2232 2233 return ret_val; 2234 } 2235 2236