1 /****************************************************************************** 2 3 Copyright (c) 2001-2010, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixgbe_type.h" 36 #include "ixgbe_api.h" 37 #include "ixgbe_common.h" 38 #include "ixgbe_phy.h" 39 40 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); 41 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 42 ixgbe_link_speed *speed, 43 bool *autoneg); 44 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); 45 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 46 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 47 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 48 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 49 ixgbe_link_speed speed, bool autoneg, 50 bool autoneg_wait_to_complete); 51 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 52 ixgbe_link_speed speed, bool autoneg, 53 bool autoneg_wait_to_complete); 54 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 55 bool autoneg_wait_to_complete); 56 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 57 ixgbe_link_speed speed, 58 bool autoneg, 59 bool autoneg_wait_to_complete); 60 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 61 ixgbe_link_speed speed, 62 bool autoneg, 63 bool autoneg_wait_to_complete); 64 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); 65 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); 66 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); 67 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); 68 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); 69 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw); 70 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); 71 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); 72 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); 73 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); 74 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 75 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); 76 77 78 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 79 { 80 struct ixgbe_mac_info *mac = &hw->mac; 81 82 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 83 84 /* enable the laser control functions for SFP+ fiber */ 85 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { 86 mac->ops.disable_tx_laser = 87 &ixgbe_disable_tx_laser_multispeed_fiber; 88 mac->ops.enable_tx_laser = 89 &ixgbe_enable_tx_laser_multispeed_fiber; 90 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 91 92 } else { 93 mac->ops.disable_tx_laser = NULL; 94 mac->ops.enable_tx_laser = NULL; 95 mac->ops.flap_tx_laser = NULL; 96 } 97 98 if (hw->phy.multispeed_fiber) { 99 /* Set up dual speed SFP+ support */ 100 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 101 } else { 102 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && 103 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 104 hw->phy.smart_speed == ixgbe_smart_speed_on) && 105 !ixgbe_verify_lesm_fw_enabled_82599(hw)) { 106 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; 107 } else { 108 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; 109 } 110 } 111 } 112 113 /** 114 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 115 * @hw: pointer to hardware structure 116 * 117 * Initialize any function pointers that were not able to be 118 * set during init_shared_code because the PHY/SFP type was 119 * not known. Perform the SFP init if necessary. 120 * 121 **/ 122 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 123 { 124 struct ixgbe_mac_info *mac = &hw->mac; 125 struct ixgbe_phy_info *phy = &hw->phy; 126 s32 ret_val = IXGBE_SUCCESS; 127 128 DEBUGFUNC("ixgbe_init_phy_ops_82599"); 129 130 /* Identify the PHY or SFP module */ 131 ret_val = phy->ops.identify(hw); 132 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) 133 goto init_phy_ops_out; 134 135 /* Setup function pointers based on detected SFP module and speeds */ 136 ixgbe_init_mac_link_ops_82599(hw); 137 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) 138 hw->phy.ops.reset = NULL; 139 140 /* If copper media, overwrite with copper function pointers */ 141 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 142 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 143 mac->ops.get_link_capabilities = 144 &ixgbe_get_copper_link_capabilities_generic; 145 } 146 147 /* Set necessary function pointers based on phy type */ 148 switch (hw->phy.type) { 149 case ixgbe_phy_tn: 150 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 151 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 152 phy->ops.get_firmware_version = 153 &ixgbe_get_phy_firmware_version_tnx; 154 break; 155 case ixgbe_phy_aq: 156 phy->ops.get_firmware_version = 157 &ixgbe_get_phy_firmware_version_generic; 158 break; 159 default: 160 break; 161 } 162 init_phy_ops_out: 163 return ret_val; 164 } 165 166 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 167 { 168 s32 ret_val = IXGBE_SUCCESS; 169 u32 reg_anlp1 = 0; 170 u32 i = 0; 171 u16 list_offset, data_offset, data_value; 172 173 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); 174 175 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 176 ixgbe_init_mac_link_ops_82599(hw); 177 178 hw->phy.ops.reset = NULL; 179 180 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 181 &data_offset); 182 if (ret_val != IXGBE_SUCCESS) 183 goto setup_sfp_out; 184 185 /* PHY config will finish before releasing the semaphore */ 186 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 187 if (ret_val != IXGBE_SUCCESS) { 188 ret_val = IXGBE_ERR_SWFW_SYNC; 189 goto setup_sfp_out; 190 } 191 192 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 193 while (data_value != 0xffff) { 194 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 195 IXGBE_WRITE_FLUSH(hw); 196 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 197 } 198 199 /* Release the semaphore */ 200 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 201 /* Delay obtaining semaphore again to allow FW access */ 202 msec_delay(hw->eeprom.semaphore_delay); 203 204 /* Now restart DSP by setting Restart_AN and clearing LMS */ 205 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, 206 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | 207 IXGBE_AUTOC_AN_RESTART)); 208 209 /* Wait for AN to leave state 0 */ 210 for (i = 0; i < 10; i++) { 211 msec_delay(4); 212 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); 213 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) 214 break; 215 } 216 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { 217 DEBUGOUT("sfp module setup not complete\n"); 218 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 219 goto setup_sfp_out; 220 } 221 222 /* Restart DSP by setting Restart_AN and return to SFI mode */ 223 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, 224 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | 225 IXGBE_AUTOC_AN_RESTART)); 226 } 227 228 setup_sfp_out: 229 return ret_val; 230 } 231 232 /** 233 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type 234 * @hw: pointer to hardware structure 235 * 236 * Initialize the function pointers and assign the MAC type for 82599. 237 * Does not touch the hardware. 238 **/ 239 240 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) 241 { 242 struct ixgbe_mac_info *mac = &hw->mac; 243 struct ixgbe_phy_info *phy = &hw->phy; 244 s32 ret_val; 245 246 DEBUGFUNC("ixgbe_init_ops_82599"); 247 248 ret_val = ixgbe_init_phy_ops_generic(hw); 249 ret_val = ixgbe_init_ops_generic(hw); 250 251 /* PHY */ 252 phy->ops.identify = &ixgbe_identify_phy_82599; 253 phy->ops.init = &ixgbe_init_phy_ops_82599; 254 255 /* MAC */ 256 mac->ops.reset_hw = &ixgbe_reset_hw_82599; 257 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2; 258 mac->ops.get_media_type = &ixgbe_get_media_type_82599; 259 mac->ops.get_supported_physical_layer = 260 &ixgbe_get_supported_physical_layer_82599; 261 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; 262 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; 263 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; 264 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599; 265 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; 266 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; 267 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic; 268 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; 269 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; 270 271 /* RAR, Multicast, VLAN */ 272 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; 273 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; 274 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; 275 mac->rar_highwater = 1; 276 mac->ops.set_vfta = &ixgbe_set_vfta_generic; 277 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; 278 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; 279 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; 280 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing; 281 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing; 282 283 /* Link */ 284 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; 285 mac->ops.check_link = &ixgbe_check_mac_link_generic; 286 ixgbe_init_mac_link_ops_82599(hw); 287 288 mac->mcft_size = 128; 289 mac->vft_size = 128; 290 mac->num_rar_entries = 128; 291 mac->rx_pb_size = 512; 292 mac->max_tx_queues = 128; 293 mac->max_rx_queues = 128; 294 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 295 296 return ret_val; 297 } 298 299 /** 300 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 301 * @hw: pointer to hardware structure 302 * @speed: pointer to link speed 303 * @negotiation: TRUE when autoneg or autotry is enabled 304 * 305 * Determines the link capabilities by reading the AUTOC register. 306 **/ 307 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 308 ixgbe_link_speed *speed, 309 bool *negotiation) 310 { 311 s32 status = IXGBE_SUCCESS; 312 u32 autoc = 0; 313 314 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); 315 316 317 /* Check if 1G SFP module. */ 318 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 319 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { 320 *speed = IXGBE_LINK_SPEED_1GB_FULL; 321 *negotiation = TRUE; 322 goto out; 323 } 324 325 /* 326 * Determine link capabilities based on the stored value of AUTOC, 327 * which represents EEPROM defaults. If AUTOC value has not 328 * been stored, use the current register values. 329 */ 330 if (hw->mac.orig_link_settings_stored) 331 autoc = hw->mac.orig_autoc; 332 else 333 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 334 335 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 336 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 337 *speed = IXGBE_LINK_SPEED_1GB_FULL; 338 *negotiation = FALSE; 339 break; 340 341 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 342 *speed = IXGBE_LINK_SPEED_10GB_FULL; 343 *negotiation = FALSE; 344 break; 345 346 case IXGBE_AUTOC_LMS_1G_AN: 347 *speed = IXGBE_LINK_SPEED_1GB_FULL; 348 *negotiation = TRUE; 349 break; 350 351 case IXGBE_AUTOC_LMS_10G_SERIAL: 352 *speed = IXGBE_LINK_SPEED_10GB_FULL; 353 *negotiation = FALSE; 354 break; 355 356 case IXGBE_AUTOC_LMS_KX4_KX_KR: 357 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 358 *speed = IXGBE_LINK_SPEED_UNKNOWN; 359 if (autoc & IXGBE_AUTOC_KR_SUPP) 360 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 361 if (autoc & IXGBE_AUTOC_KX4_SUPP) 362 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 363 if (autoc & IXGBE_AUTOC_KX_SUPP) 364 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 365 *negotiation = TRUE; 366 break; 367 368 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 369 *speed = IXGBE_LINK_SPEED_100_FULL; 370 if (autoc & IXGBE_AUTOC_KR_SUPP) 371 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 372 if (autoc & IXGBE_AUTOC_KX4_SUPP) 373 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 374 if (autoc & IXGBE_AUTOC_KX_SUPP) 375 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 376 *negotiation = TRUE; 377 break; 378 379 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 380 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 381 *negotiation = FALSE; 382 break; 383 384 default: 385 status = IXGBE_ERR_LINK_SETUP; 386 goto out; 387 } 388 389 if (hw->phy.multispeed_fiber) { 390 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 391 IXGBE_LINK_SPEED_1GB_FULL; 392 *negotiation = TRUE; 393 } 394 395 out: 396 return status; 397 } 398 399 /** 400 * ixgbe_get_media_type_82599 - Get media type 401 * @hw: pointer to hardware structure 402 * 403 * Returns the media type (fiber, copper, backplane) 404 **/ 405 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 406 { 407 enum ixgbe_media_type media_type; 408 409 DEBUGFUNC("ixgbe_get_media_type_82599"); 410 411 /* Detect if there is a copper PHY attached. */ 412 switch (hw->phy.type) { 413 case ixgbe_phy_cu_unknown: 414 case ixgbe_phy_tn: 415 case ixgbe_phy_aq: 416 media_type = ixgbe_media_type_copper; 417 goto out; 418 default: 419 break; 420 } 421 422 switch (hw->device_id) { 423 case IXGBE_DEV_ID_82599_KX4: 424 case IXGBE_DEV_ID_82599_KX4_MEZZ: 425 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 426 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: 427 case IXGBE_DEV_ID_82599_XAUI_LOM: 428 /* Default device ID is mezzanine card KX/KX4 */ 429 media_type = ixgbe_media_type_backplane; 430 break; 431 case IXGBE_DEV_ID_82599_SFP: 432 case IXGBE_DEV_ID_82599_SFP_FCOE: 433 media_type = ixgbe_media_type_fiber; 434 break; 435 case IXGBE_DEV_ID_82599_CX4: 436 media_type = ixgbe_media_type_cx4; 437 break; 438 case IXGBE_DEV_ID_82599_T3_LOM: 439 media_type = ixgbe_media_type_copper; 440 break; 441 default: 442 media_type = ixgbe_media_type_unknown; 443 break; 444 } 445 out: 446 return media_type; 447 } 448 449 /** 450 * ixgbe_start_mac_link_82599 - Setup MAC link settings 451 * @hw: pointer to hardware structure 452 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 453 * 454 * Configures link settings based on values in the ixgbe_hw struct. 455 * Restarts the link. Performs autonegotiation if needed. 456 **/ 457 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 458 bool autoneg_wait_to_complete) 459 { 460 u32 autoc_reg; 461 u32 links_reg; 462 u32 i; 463 s32 status = IXGBE_SUCCESS; 464 465 DEBUGFUNC("ixgbe_start_mac_link_82599"); 466 467 468 /* Restart link */ 469 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 470 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 471 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 472 473 /* Only poll for autoneg to complete if specified to do so */ 474 if (autoneg_wait_to_complete) { 475 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 476 IXGBE_AUTOC_LMS_KX4_KX_KR || 477 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 478 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 479 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 480 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 481 links_reg = 0; /* Just in case Autoneg time = 0 */ 482 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 483 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 484 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 485 break; 486 msec_delay(100); 487 } 488 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 489 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 490 DEBUGOUT("Autoneg did not complete.\n"); 491 } 492 } 493 } 494 495 /* Add delay to filter out noises during initial link setup */ 496 msec_delay(50); 497 498 return status; 499 } 500 501 /** 502 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 503 * @hw: pointer to hardware structure 504 * 505 * The base drivers may require better control over SFP+ module 506 * PHY states. This includes selectively shutting down the Tx 507 * laser on the PHY, effectively halting physical link. 508 **/ 509 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 510 { 511 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 512 513 /* Disable tx laser; allow 100us to go dark per spec */ 514 esdp_reg |= IXGBE_ESDP_SDP3; 515 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 516 IXGBE_WRITE_FLUSH(hw); 517 usec_delay(100); 518 } 519 520 /** 521 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser 522 * @hw: pointer to hardware structure 523 * 524 * The base drivers may require better control over SFP+ module 525 * PHY states. This includes selectively turning on the Tx 526 * laser on the PHY, effectively starting physical link. 527 **/ 528 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 529 { 530 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 531 532 /* Enable tx laser; allow 100ms to light up */ 533 esdp_reg &= ~IXGBE_ESDP_SDP3; 534 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 535 IXGBE_WRITE_FLUSH(hw); 536 msec_delay(100); 537 } 538 539 /** 540 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 541 * @hw: pointer to hardware structure 542 * 543 * When the driver changes the link speeds that it can support, 544 * it sets autotry_restart to TRUE to indicate that we need to 545 * initiate a new autotry session with the link partner. To do 546 * so, we set the speed then disable and re-enable the tx laser, to 547 * alert the link partner that it also needs to restart autotry on its 548 * end. This is consistent with TRUE clause 37 autoneg, which also 549 * involves a loss of signal. 550 **/ 551 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 552 { 553 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); 554 555 if (hw->mac.autotry_restart) { 556 ixgbe_disable_tx_laser_multispeed_fiber(hw); 557 ixgbe_enable_tx_laser_multispeed_fiber(hw); 558 hw->mac.autotry_restart = FALSE; 559 } 560 } 561 562 /** 563 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 564 * @hw: pointer to hardware structure 565 * @speed: new link speed 566 * @autoneg: TRUE if autonegotiation enabled 567 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 568 * 569 * Set the link speed in the AUTOC register and restarts link. 570 **/ 571 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 572 ixgbe_link_speed speed, bool autoneg, 573 bool autoneg_wait_to_complete) 574 { 575 s32 status = IXGBE_SUCCESS; 576 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 577 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 578 u32 speedcnt = 0; 579 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 580 u32 i = 0; 581 bool link_up = FALSE; 582 bool negotiation; 583 584 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 585 586 /* Mask off requested but non-supported speeds */ 587 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); 588 if (status != IXGBE_SUCCESS) 589 return status; 590 591 speed &= link_speed; 592 593 /* 594 * Try each speed one by one, highest priority first. We do this in 595 * software because 10gb fiber doesn't support speed autonegotiation. 596 */ 597 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 598 speedcnt++; 599 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 600 601 /* If we already have link at this speed, just jump out */ 602 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 603 if (status != IXGBE_SUCCESS) 604 return status; 605 606 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 607 goto out; 608 609 /* Set the module link speed */ 610 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 611 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 612 IXGBE_WRITE_FLUSH(hw); 613 614 /* Allow module to change analog characteristics (1G->10G) */ 615 msec_delay(40); 616 617 status = ixgbe_setup_mac_link_82599(hw, 618 IXGBE_LINK_SPEED_10GB_FULL, 619 autoneg, 620 autoneg_wait_to_complete); 621 if (status != IXGBE_SUCCESS) 622 return status; 623 624 /* Flap the tx laser if it has not already been done */ 625 ixgbe_flap_tx_laser(hw); 626 627 /* 628 * Wait for the controller to acquire link. Per IEEE 802.3ap, 629 * Section 73.10.2, we may have to wait up to 500ms if KR is 630 * attempted. 82599 uses the same timing for 10g SFI. 631 */ 632 for (i = 0; i < 5; i++) { 633 /* Wait for the link partner to also set speed */ 634 msec_delay(100); 635 636 /* If we have link, just jump out */ 637 status = ixgbe_check_link(hw, &link_speed, 638 &link_up, FALSE); 639 if (status != IXGBE_SUCCESS) 640 return status; 641 642 if (link_up) 643 goto out; 644 } 645 } 646 647 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 648 speedcnt++; 649 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 650 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 651 652 /* If we already have link at this speed, just jump out */ 653 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 654 if (status != IXGBE_SUCCESS) 655 return status; 656 657 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 658 goto out; 659 660 /* Set the module link speed */ 661 esdp_reg &= ~IXGBE_ESDP_SDP5; 662 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 663 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 664 IXGBE_WRITE_FLUSH(hw); 665 666 /* Allow module to change analog characteristics (10G->1G) */ 667 msec_delay(40); 668 669 status = ixgbe_setup_mac_link_82599(hw, 670 IXGBE_LINK_SPEED_1GB_FULL, 671 autoneg, 672 autoneg_wait_to_complete); 673 if (status != IXGBE_SUCCESS) 674 return status; 675 676 /* Flap the tx laser if it has not already been done */ 677 ixgbe_flap_tx_laser(hw); 678 679 /* Wait for the link partner to also set speed */ 680 msec_delay(100); 681 682 /* If we have link, just jump out */ 683 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 684 if (status != IXGBE_SUCCESS) 685 return status; 686 687 if (link_up) 688 goto out; 689 } 690 691 /* 692 * We didn't get link. Configure back to the highest speed we tried, 693 * (if there was more than one). We call ourselves back with just the 694 * single highest speed that the user requested. 695 */ 696 if (speedcnt > 1) 697 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 698 highest_link_speed, autoneg, autoneg_wait_to_complete); 699 700 out: 701 /* Set autoneg_advertised value based on input link speed */ 702 hw->phy.autoneg_advertised = 0; 703 704 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 705 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 706 707 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 708 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 709 710 return status; 711 } 712 713 /** 714 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed 715 * @hw: pointer to hardware structure 716 * @speed: new link speed 717 * @autoneg: TRUE if autonegotiation enabled 718 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 719 * 720 * Implements the Intel SmartSpeed algorithm. 721 **/ 722 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 723 ixgbe_link_speed speed, bool autoneg, 724 bool autoneg_wait_to_complete) 725 { 726 s32 status = IXGBE_SUCCESS; 727 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 728 s32 i, j; 729 bool link_up = FALSE; 730 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 731 732 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); 733 734 /* Set autoneg_advertised value based on input link speed */ 735 hw->phy.autoneg_advertised = 0; 736 737 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 738 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 739 740 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 741 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 742 743 if (speed & IXGBE_LINK_SPEED_100_FULL) 744 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 745 746 /* 747 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the 748 * autoneg advertisement if link is unable to be established at the 749 * highest negotiated rate. This can sometimes happen due to integrity 750 * issues with the physical media connection. 751 */ 752 753 /* First, try to get link with full advertisement */ 754 hw->phy.smart_speed_active = FALSE; 755 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 756 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 757 autoneg_wait_to_complete); 758 if (status != IXGBE_SUCCESS) 759 goto out; 760 761 /* 762 * Wait for the controller to acquire link. Per IEEE 802.3ap, 763 * Section 73.10.2, we may have to wait up to 500ms if KR is 764 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per 765 * Table 9 in the AN MAS. 766 */ 767 for (i = 0; i < 5; i++) { 768 msec_delay(100); 769 770 /* If we have link, just jump out */ 771 status = ixgbe_check_link(hw, &link_speed, &link_up, 772 FALSE); 773 if (status != IXGBE_SUCCESS) 774 goto out; 775 776 if (link_up) 777 goto out; 778 } 779 } 780 781 /* 782 * We didn't get link. If we advertised KR plus one of KX4/KX 783 * (or BX4/BX), then disable KR and try again. 784 */ 785 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || 786 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) 787 goto out; 788 789 /* Turn SmartSpeed on to disable KR support */ 790 hw->phy.smart_speed_active = TRUE; 791 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 792 autoneg_wait_to_complete); 793 if (status != IXGBE_SUCCESS) 794 goto out; 795 796 /* 797 * Wait for the controller to acquire link. 600ms will allow for 798 * the AN link_fail_inhibit_timer as well for multiple cycles of 799 * parallel detect, both 10g and 1g. This allows for the maximum 800 * connect attempts as defined in the AN MAS table 73-7. 801 */ 802 for (i = 0; i < 6; i++) { 803 msec_delay(100); 804 805 /* If we have link, just jump out */ 806 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 807 if (status != IXGBE_SUCCESS) 808 goto out; 809 810 if (link_up) 811 goto out; 812 } 813 814 /* We didn't get link. Turn SmartSpeed back off. */ 815 hw->phy.smart_speed_active = FALSE; 816 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 817 autoneg_wait_to_complete); 818 819 out: 820 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 821 DEBUGOUT("Smartspeed has downgraded the link speed " 822 "from the maximum advertised\n"); 823 return status; 824 } 825 826 /** 827 * ixgbe_setup_mac_link_82599 - Set MAC link speed 828 * @hw: pointer to hardware structure 829 * @speed: new link speed 830 * @autoneg: TRUE if autonegotiation enabled 831 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 832 * 833 * Set the link speed in the AUTOC register and restarts link. 834 **/ 835 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 836 ixgbe_link_speed speed, bool autoneg, 837 bool autoneg_wait_to_complete) 838 { 839 s32 status = IXGBE_SUCCESS; 840 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 841 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 842 u32 start_autoc = autoc; 843 u32 orig_autoc = 0; 844 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 845 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 846 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 847 u32 links_reg; 848 u32 i; 849 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 850 851 DEBUGFUNC("ixgbe_setup_mac_link_82599"); 852 853 /* Check to see if speed passed in is supported. */ 854 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 855 if (status != IXGBE_SUCCESS) 856 goto out; 857 858 speed &= link_capabilities; 859 860 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 861 status = IXGBE_ERR_LINK_SETUP; 862 goto out; 863 } 864 865 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 866 if (hw->mac.orig_link_settings_stored) 867 orig_autoc = hw->mac.orig_autoc; 868 else 869 orig_autoc = autoc; 870 871 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 872 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 873 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 874 /* Set KX4/KX/KR support according to speed requested */ 875 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 876 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 877 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 878 autoc |= IXGBE_AUTOC_KX4_SUPP; 879 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && 880 (hw->phy.smart_speed_active == FALSE)) 881 autoc |= IXGBE_AUTOC_KR_SUPP; 882 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 883 autoc |= IXGBE_AUTOC_KX_SUPP; 884 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 885 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 886 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 887 /* Switch from 1G SFI to 10G SFI if requested */ 888 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 889 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 890 autoc &= ~IXGBE_AUTOC_LMS_MASK; 891 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 892 } 893 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 894 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 895 /* Switch from 10G SFI to 1G SFI if requested */ 896 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 897 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 898 autoc &= ~IXGBE_AUTOC_LMS_MASK; 899 if (autoneg) 900 autoc |= IXGBE_AUTOC_LMS_1G_AN; 901 else 902 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 903 } 904 } 905 906 if (autoc != start_autoc) { 907 /* Restart link */ 908 autoc |= IXGBE_AUTOC_AN_RESTART; 909 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 910 911 /* Only poll for autoneg to complete if specified to do so */ 912 if (autoneg_wait_to_complete) { 913 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 914 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 915 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 916 links_reg = 0; /*Just in case Autoneg time=0*/ 917 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 918 links_reg = 919 IXGBE_READ_REG(hw, IXGBE_LINKS); 920 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 921 break; 922 msec_delay(100); 923 } 924 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 925 status = 926 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 927 DEBUGOUT("Autoneg did not complete.\n"); 928 } 929 } 930 } 931 932 /* Add delay to filter out noises during initial link setup */ 933 msec_delay(50); 934 } 935 936 out: 937 return status; 938 } 939 940 /** 941 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field 942 * @hw: pointer to hardware structure 943 * @speed: new link speed 944 * @autoneg: TRUE if autonegotiation enabled 945 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 946 * 947 * Restarts link on PHY and MAC based on settings passed in. 948 **/ 949 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 950 ixgbe_link_speed speed, 951 bool autoneg, 952 bool autoneg_wait_to_complete) 953 { 954 s32 status; 955 956 DEBUGFUNC("ixgbe_setup_copper_link_82599"); 957 958 /* Setup the PHY according to input speed */ 959 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 960 autoneg_wait_to_complete); 961 /* Set up MAC */ 962 (void) ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 963 964 return status; 965 } 966 967 /** 968 * ixgbe_reset_hw_82599 - Perform hardware reset 969 * @hw: pointer to hardware structure 970 * 971 * Resets the hardware by resetting the transmit and receive units, masks 972 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 973 * reset. 974 **/ 975 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 976 { 977 s32 status = IXGBE_SUCCESS; 978 u32 ctrl; 979 u32 i; 980 u32 autoc; 981 u32 autoc2; 982 983 DEBUGFUNC("ixgbe_reset_hw_82599"); 984 985 /* Call adapter stop to disable tx/rx and clear interrupts */ 986 hw->mac.ops.stop_adapter(hw); 987 988 /* PHY ops must be identified and initialized prior to reset */ 989 990 /* Identify PHY and related function pointers */ 991 status = hw->phy.ops.init(hw); 992 993 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 994 goto reset_hw_out; 995 996 /* Setup SFP module if there is one present. */ 997 if (hw->phy.sfp_setup_needed) { 998 status = hw->mac.ops.setup_sfp(hw); 999 hw->phy.sfp_setup_needed = FALSE; 1000 } 1001 1002 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1003 goto reset_hw_out; 1004 1005 /* Reset PHY */ 1006 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) 1007 hw->phy.ops.reset(hw); 1008 1009 /* 1010 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 1011 * access and verify no pending requests before reset 1012 */ 1013 (void) ixgbe_disable_pcie_master(hw); 1014 1015 mac_reset_top: 1016 /* 1017 * Issue global reset to the MAC. This needs to be a SW reset. 1018 * If link reset is used, it might reset the MAC when mng is using it 1019 */ 1020 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1021 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); 1022 IXGBE_WRITE_FLUSH(hw); 1023 1024 /* Poll for reset bit to self-clear indicating reset is complete */ 1025 for (i = 0; i < 10; i++) { 1026 usec_delay(1); 1027 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1028 if (!(ctrl & IXGBE_CTRL_RST)) 1029 break; 1030 } 1031 if (ctrl & IXGBE_CTRL_RST) { 1032 status = IXGBE_ERR_RESET_FAILED; 1033 DEBUGOUT("Reset polling failed to complete.\n"); 1034 } 1035 1036 /* 1037 * Double resets are required for recovery from certain error 1038 * conditions. Between resets, it is necessary to stall to allow time 1039 * for any pending HW events to complete. We use 1usec since that is 1040 * what is needed for ixgbe_disable_pcie_master(). The second reset 1041 * then clears out any effects of those events. 1042 */ 1043 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 1044 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 1045 usec_delay(1); 1046 goto mac_reset_top; 1047 } 1048 1049 msec_delay(50); 1050 1051 /* 1052 * Store the original AUTOC/AUTOC2 values if they have not been 1053 * stored off yet. Otherwise restore the stored original 1054 * values since the reset operation sets back to defaults. 1055 */ 1056 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1057 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1058 if (hw->mac.orig_link_settings_stored == FALSE) { 1059 hw->mac.orig_autoc = autoc; 1060 hw->mac.orig_autoc2 = autoc2; 1061 hw->mac.orig_link_settings_stored = TRUE; 1062 } else { 1063 if (autoc != hw->mac.orig_autoc) 1064 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | 1065 IXGBE_AUTOC_AN_RESTART)); 1066 1067 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1068 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1069 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1070 autoc2 |= (hw->mac.orig_autoc2 & 1071 IXGBE_AUTOC2_UPPER_MASK); 1072 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1073 } 1074 } 1075 1076 /* Store the permanent mac address */ 1077 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 1078 1079 /* 1080 * Store MAC address from RAR0, clear receive address registers, and 1081 * clear the multicast table. Also reset num_rar_entries to 128, 1082 * since we modify this value when programming the SAN MAC address. 1083 */ 1084 hw->mac.num_rar_entries = 128; 1085 hw->mac.ops.init_rx_addrs(hw); 1086 1087 /* Store the permanent SAN mac address */ 1088 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1089 1090 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1091 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 1092 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1093 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1094 1095 /* Reserve the last RAR for the SAN MAC address */ 1096 hw->mac.num_rar_entries--; 1097 } 1098 1099 /* Store the alternative WWNN/WWPN prefix */ 1100 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1101 &hw->mac.wwpn_prefix); 1102 1103 reset_hw_out: 1104 return status; 1105 } 1106 1107 /** 1108 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1109 * @hw: pointer to hardware structure 1110 **/ 1111 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1112 { 1113 int i; 1114 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1115 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1116 1117 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); 1118 1119 /* 1120 * Before starting reinitialization process, 1121 * FDIRCMD.CMD must be zero. 1122 */ 1123 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { 1124 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1125 IXGBE_FDIRCMD_CMD_MASK)) 1126 break; 1127 usec_delay(10); 1128 } 1129 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1130 DEBUGOUT("Flow Director previous command isn't complete, " 1131 "aborting table re-initialization. \n"); 1132 return IXGBE_ERR_FDIR_REINIT_FAILED; 1133 } 1134 1135 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1136 IXGBE_WRITE_FLUSH(hw); 1137 /* 1138 * 82599 adapters flow director init flow cannot be restarted, 1139 * Workaround 82599 silicon errata by performing the following steps 1140 * before re-writing the FDIRCTRL control register with the same value. 1141 * - write 1 to bit 8 of FDIRCMD register & 1142 * - write 0 to bit 8 of FDIRCMD register 1143 */ 1144 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1145 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1146 IXGBE_FDIRCMD_CLEARHT)); 1147 IXGBE_WRITE_FLUSH(hw); 1148 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1149 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1150 ~IXGBE_FDIRCMD_CLEARHT)); 1151 IXGBE_WRITE_FLUSH(hw); 1152 /* 1153 * Clear FDIR Hash register to clear any leftover hashes 1154 * waiting to be programmed. 1155 */ 1156 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); 1157 IXGBE_WRITE_FLUSH(hw); 1158 1159 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1160 IXGBE_WRITE_FLUSH(hw); 1161 1162 /* Poll init-done after we write FDIRCTRL register */ 1163 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1164 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1165 IXGBE_FDIRCTRL_INIT_DONE) 1166 break; 1167 usec_delay(10); 1168 } 1169 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1170 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1171 return IXGBE_ERR_FDIR_REINIT_FAILED; 1172 } 1173 1174 /* Clear FDIR statistics registers (read to clear) */ 1175 (void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); 1176 (void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); 1177 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 1178 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 1179 (void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN); 1180 1181 return IXGBE_SUCCESS; 1182 } 1183 1184 /** 1185 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1186 * @hw: pointer to hardware structure 1187 * @pballoc: which mode to allocate filters with 1188 **/ 1189 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) 1190 { 1191 u32 fdirctrl = 0; 1192 u32 pbsize; 1193 int i; 1194 1195 DEBUGFUNC("ixgbe_init_fdir_signature_82599"); 1196 1197 /* 1198 * Before enabling Flow Director, the Rx Packet Buffer size 1199 * must be reduced. The new value is the current size minus 1200 * flow director memory usage size. 1201 */ 1202 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); 1203 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 1204 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); 1205 1206 /* 1207 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1208 * intialized to zero for non DCB mode otherwise actual total RX PB 1209 * would be bigger than programmed and filter space would run into 1210 * the PB 0 region. 1211 */ 1212 for (i = 1; i < 8; i++) 1213 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 1214 1215 /* Send interrupt when 64 filters are left */ 1216 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; 1217 1218 /* Set the maximum length per hash bucket to 0xA filters */ 1219 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT; 1220 1221 switch (pballoc) { 1222 case IXGBE_FDIR_PBALLOC_64K: 1223 /* 8k - 1 signature filters */ 1224 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; 1225 break; 1226 case IXGBE_FDIR_PBALLOC_128K: 1227 /* 16k - 1 signature filters */ 1228 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; 1229 break; 1230 case IXGBE_FDIR_PBALLOC_256K: 1231 /* 32k - 1 signature filters */ 1232 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; 1233 break; 1234 default: 1235 /* bad value */ 1236 return IXGBE_ERR_CONFIG; 1237 }; 1238 1239 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1240 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1241 1242 1243 /* Prime the keys for hashing */ 1244 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1245 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1246 1247 /* 1248 * Poll init-done after we write the register. Estimated times: 1249 * 10G: PBALLOC = 11b, timing is 60us 1250 * 1G: PBALLOC = 11b, timing is 600us 1251 * 100M: PBALLOC = 11b, timing is 6ms 1252 * 1253 * Multiple these timings by 4 if under full Rx load 1254 * 1255 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1256 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1257 * this might not finish in our poll time, but we can live with that 1258 * for now. 1259 */ 1260 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1261 IXGBE_WRITE_FLUSH(hw); 1262 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1263 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1264 IXGBE_FDIRCTRL_INIT_DONE) 1265 break; 1266 msec_delay(1); 1267 } 1268 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1269 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1270 1271 return IXGBE_SUCCESS; 1272 } 1273 1274 /** 1275 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1276 * @hw: pointer to hardware structure 1277 * @pballoc: which mode to allocate filters with 1278 **/ 1279 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) 1280 { 1281 u32 fdirctrl = 0; 1282 u32 pbsize; 1283 int i; 1284 1285 DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); 1286 1287 /* 1288 * Before enabling Flow Director, the Rx Packet Buffer size 1289 * must be reduced. The new value is the current size minus 1290 * flow director memory usage size. 1291 */ 1292 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); 1293 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 1294 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); 1295 1296 /* 1297 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1298 * intialized to zero for non DCB mode otherwise actual total RX PB 1299 * would be bigger than programmed and filter space would run into 1300 * the PB 0 region. 1301 */ 1302 for (i = 1; i < 8; i++) 1303 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 1304 1305 /* Send interrupt when 64 filters are left */ 1306 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; 1307 1308 /* Initialize the drop queue to Rx queue 127 */ 1309 fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT); 1310 1311 switch (pballoc) { 1312 case IXGBE_FDIR_PBALLOC_64K: 1313 /* 2k - 1 perfect filters */ 1314 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; 1315 break; 1316 case IXGBE_FDIR_PBALLOC_128K: 1317 /* 4k - 1 perfect filters */ 1318 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; 1319 break; 1320 case IXGBE_FDIR_PBALLOC_256K: 1321 /* 8k - 1 perfect filters */ 1322 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; 1323 break; 1324 default: 1325 /* bad value */ 1326 return IXGBE_ERR_CONFIG; 1327 }; 1328 1329 /* Turn perfect match filtering on */ 1330 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; 1331 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; 1332 1333 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1334 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1335 1336 /* Prime the keys for hashing */ 1337 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1338 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,IXGBE_ATR_SIGNATURE_HASH_KEY); 1339 1340 /* 1341 * Poll init-done after we write the register. Estimated times: 1342 * 10G: PBALLOC = 11b, timing is 60us 1343 * 1G: PBALLOC = 11b, timing is 600us 1344 * 100M: PBALLOC = 11b, timing is 6ms 1345 * 1346 * Multiple these timings by 4 if under full Rx load 1347 * 1348 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1349 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1350 * this might not finish in our poll time, but we can live with that 1351 * for now. 1352 */ 1353 1354 /* Set the maximum length per hash bucket to 0xA filters */ 1355 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT); 1356 1357 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1358 IXGBE_WRITE_FLUSH(hw); 1359 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1360 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1361 IXGBE_FDIRCTRL_INIT_DONE) 1362 break; 1363 msec_delay(1); 1364 } 1365 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1366 DEBUGOUT("Flow Director Perfect poll time exceeded!\n"); 1367 1368 return IXGBE_SUCCESS; 1369 } 1370 1371 /** 1372 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR 1373 * @stream: input bitstream to compute the hash on 1374 * @key: 32-bit hash key 1375 **/ 1376 u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, 1377 u32 key) 1378 { 1379 /* 1380 * The algorithm is as follows: 1381 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 1382 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] 1383 * and A[n] x B[n] is bitwise AND between same length strings 1384 * 1385 * K[n] is 16 bits, defined as: 1386 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] 1387 * for n modulo 32 < 15, K[n] = 1388 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] 1389 * 1390 * S[n] is 16 bits, defined as: 1391 * for n >= 15, S[n] = S[n:n - 15] 1392 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] 1393 * 1394 * To simplify for programming, the algorithm is implemented 1395 * in software this way: 1396 * 1397 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] 1398 * 1399 * for (i = 0; i < 352; i+=32) 1400 * hi_hash_dword[31:0] ^= Stream[(i+31):i]; 1401 * 1402 * lo_hash_dword[15:0] ^= Stream[15:0]; 1403 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; 1404 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; 1405 * 1406 * hi_hash_dword[31:0] ^= Stream[351:320]; 1407 * 1408 * if(key[0]) 1409 * hash[15:0] ^= Stream[15:0]; 1410 * 1411 * for (i = 0; i < 16; i++) { 1412 * if (key[i]) 1413 * hash[15:0] ^= lo_hash_dword[(i+15):i]; 1414 * if (key[i + 16]) 1415 * hash[15:0] ^= hi_hash_dword[(i+15):i]; 1416 * } 1417 * 1418 */ 1419 __be32 common_hash_dword = 0; 1420 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1421 u32 hash_result = 0; 1422 u8 i; 1423 1424 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1425 flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]); 1426 1427 /* generate common hash dword */ 1428 for (i = 10; i; i -= 2) 1429 common_hash_dword ^= atr_input->dword_stream[i] ^ 1430 atr_input->dword_stream[i - 1]; 1431 1432 hi_hash_dword = IXGBE_NTOHL(common_hash_dword); 1433 1434 /* low dword is word swapped version of common */ 1435 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1436 1437 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1438 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1439 1440 /* Process bits 0 and 16 */ 1441 if (key & 0x0001) hash_result ^= lo_hash_dword; 1442 if (key & 0x00010000) hash_result ^= hi_hash_dword; 1443 1444 /* 1445 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1446 * delay this because bit 0 of the stream should not be processed 1447 * so we do not add the vlan until after bit 0 was processed 1448 */ 1449 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1450 1451 1452 /* process the remaining 30 bits in the key 2 bits at a time */ 1453 for (i = 15; i; i-- ) { 1454 if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; 1455 if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; 1456 } 1457 1458 return hash_result & IXGBE_ATR_HASH_MASK; 1459 } 1460 1461 /* 1462 * These defines allow us to quickly generate all of the necessary instructions 1463 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION 1464 * for values 0 through 15 1465 */ 1466 #define IXGBE_ATR_COMMON_HASH_KEY \ 1467 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) 1468 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ 1469 { \ 1470 u32 n = (_n); \ 1471 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ 1472 common_hash ^= lo_hash_dword >> n; \ 1473 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1474 bucket_hash ^= lo_hash_dword >> n; \ 1475 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ 1476 sig_hash ^= lo_hash_dword << (16 - n); \ 1477 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ 1478 common_hash ^= hi_hash_dword >> n; \ 1479 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1480 bucket_hash ^= hi_hash_dword >> n; \ 1481 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1482 sig_hash ^= hi_hash_dword << (16 - n); \ 1483 } 1484 1485 /** 1486 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1487 * @stream: input bitstream to compute the hash on 1488 * 1489 * This function is almost identical to the function above but contains 1490 * several optomizations such as unwinding all of the loops, letting the 1491 * compiler work out all of the conditional ifs since the keys are static 1492 * defines, and computing two keys at once since the hashed dword stream 1493 * will be the same for both keys. 1494 **/ 1495 static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 1496 union ixgbe_atr_hash_dword common) 1497 { 1498 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1499 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; 1500 1501 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1502 flow_vm_vlan = IXGBE_NTOHL(input.dword); 1503 1504 /* generate common hash dword */ 1505 hi_hash_dword = IXGBE_NTOHL(common.dword); 1506 1507 /* low dword is word swapped version of common */ 1508 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1509 1510 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1511 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1512 1513 /* Process bits 0 and 16 */ 1514 IXGBE_COMPUTE_SIG_HASH_ITERATION(0); 1515 1516 /* 1517 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1518 * delay this because bit 0 of the stream should not be processed 1519 * so we do not add the vlan until after bit 0 was processed 1520 */ 1521 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1522 1523 /* Process remaining 30 bit of the key */ 1524 IXGBE_COMPUTE_SIG_HASH_ITERATION(1); 1525 IXGBE_COMPUTE_SIG_HASH_ITERATION(2); 1526 IXGBE_COMPUTE_SIG_HASH_ITERATION(3); 1527 IXGBE_COMPUTE_SIG_HASH_ITERATION(4); 1528 IXGBE_COMPUTE_SIG_HASH_ITERATION(5); 1529 IXGBE_COMPUTE_SIG_HASH_ITERATION(6); 1530 IXGBE_COMPUTE_SIG_HASH_ITERATION(7); 1531 IXGBE_COMPUTE_SIG_HASH_ITERATION(8); 1532 IXGBE_COMPUTE_SIG_HASH_ITERATION(9); 1533 IXGBE_COMPUTE_SIG_HASH_ITERATION(10); 1534 IXGBE_COMPUTE_SIG_HASH_ITERATION(11); 1535 IXGBE_COMPUTE_SIG_HASH_ITERATION(12); 1536 IXGBE_COMPUTE_SIG_HASH_ITERATION(13); 1537 IXGBE_COMPUTE_SIG_HASH_ITERATION(14); 1538 IXGBE_COMPUTE_SIG_HASH_ITERATION(15); 1539 1540 /* combine common_hash result with signature and bucket hashes */ 1541 bucket_hash ^= common_hash; 1542 bucket_hash &= IXGBE_ATR_HASH_MASK; 1543 1544 sig_hash ^= common_hash << 16; 1545 sig_hash &= IXGBE_ATR_HASH_MASK << 16; 1546 1547 /* return completed signature hash */ 1548 return sig_hash ^ bucket_hash; 1549 } 1550 1551 /** 1552 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1553 * @hw: pointer to hardware structure 1554 * @stream: input bitstream 1555 * @queue: queue index to direct traffic to 1556 **/ 1557 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1558 union ixgbe_atr_hash_dword input, 1559 union ixgbe_atr_hash_dword common, 1560 u8 queue) 1561 { 1562 u64 fdirhashcmd; 1563 u32 fdircmd; 1564 1565 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); 1566 1567 /* 1568 * Get the flow_type in order to program FDIRCMD properly 1569 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 1570 */ 1571 switch (input.formatted.flow_type) { 1572 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1573 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1574 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1575 case IXGBE_ATR_FLOW_TYPE_TCPV6: 1576 case IXGBE_ATR_FLOW_TYPE_UDPV6: 1577 case IXGBE_ATR_FLOW_TYPE_SCTPV6: 1578 break; 1579 default: 1580 DEBUGOUT(" Error on flow type input\n"); 1581 return IXGBE_ERR_CONFIG; 1582 } 1583 1584 /* configure FDIRCMD register */ 1585 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1586 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1587 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1588 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1589 1590 /* 1591 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1592 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1593 */ 1594 fdirhashcmd = (u64)fdircmd << 32; 1595 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); 1596 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1597 1598 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); 1599 1600 return IXGBE_SUCCESS; 1601 } 1602 1603 /** 1604 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks 1605 * @input_mask: mask to be bit swapped 1606 * 1607 * The source and destination port masks for flow director are bit swapped 1608 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to 1609 * generate a correctly swapped value we need to bit swap the mask and that 1610 * is what is accomplished by this function. 1611 **/ 1612 static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) 1613 { 1614 u32 mask = IXGBE_NTOHS(input_masks->dst_port_mask); 1615 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1616 mask |= IXGBE_NTOHS(input_masks->src_port_mask); 1617 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1618 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1619 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1620 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); 1621 } 1622 1623 /* 1624 * These two macros are meant to address the fact that we have registers 1625 * that are either all or in part big-endian. As a result on big-endian 1626 * systems we will end up byte swapping the value to little-endian before 1627 * it is byte swapped again and written to the hardware in the original 1628 * big-endian format. 1629 */ 1630 #define IXGBE_STORE_AS_BE32(_value) \ 1631 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ 1632 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) 1633 1634 #define IXGBE_WRITE_REG_BE32(a, reg, value) \ 1635 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) 1636 1637 #define IXGBE_STORE_AS_BE16(_value) \ 1638 (((u16)(_value) >> 8) | ((u16)(_value) << 8)) 1639 1640 1641 /** 1642 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1643 * @hw: pointer to hardware structure 1644 * @input: input bitstream 1645 * @input_masks: masks for the input bitstream 1646 * @soft_id: software index for the filters 1647 * @queue: queue index to direct traffic to 1648 * 1649 * Note that the caller to this function must lock before calling, since the 1650 * hardware writes must be protected from one another. 1651 **/ 1652 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1653 union ixgbe_atr_input *input, 1654 struct ixgbe_atr_input_masks *input_masks, 1655 u16 soft_id, u8 queue) 1656 { 1657 u32 fdirhash; 1658 u32 fdircmd; 1659 u32 fdirport, fdirtcpm; 1660 u32 fdirvlan; 1661 /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */ 1662 u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX | 1663 IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; 1664 1665 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); 1666 1667 /* 1668 * Check flow_type formatting, and bail out before we touch the hardware 1669 * if there's a configuration issue 1670 */ 1671 switch (input->formatted.flow_type) { 1672 case IXGBE_ATR_FLOW_TYPE_IPV4: 1673 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ 1674 fdirm |= IXGBE_FDIRM_L4P; 1675 /* FALLTHRU */ 1676 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1677 if (input_masks->dst_port_mask || input_masks->src_port_mask) { 1678 DEBUGOUT(" Error on src/dst port mask\n"); 1679 return IXGBE_ERR_CONFIG; 1680 } 1681 break; 1682 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1683 break; 1684 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1685 break; 1686 default: 1687 DEBUGOUT(" Error on flow type input\n"); 1688 return IXGBE_ERR_CONFIG; 1689 } 1690 1691 /* 1692 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1693 * are zero, then assume a full mask for that field. Also assume that 1694 * a VLAN of 0 is unspecified, so mask that out as well. L4type 1695 * cannot be masked out in this implementation. 1696 * 1697 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1698 * point in time. 1699 */ 1700 1701 /* Program FDIRM */ 1702 switch (IXGBE_NTOHS(input_masks->vlan_id_mask) & 0xEFFF) { 1703 case 0xEFFF: 1704 /* Unmask VLAN ID - bit 0 and fall through to unmask prio */ 1705 fdirm &= ~IXGBE_FDIRM_VLANID; 1706 /* FALLTHRU */ 1707 case 0xE000: 1708 /* Unmask VLAN prio - bit 1 */ 1709 fdirm &= ~IXGBE_FDIRM_VLANP; 1710 break; 1711 case 0x0FFF: 1712 /* Unmask VLAN ID - bit 0 */ 1713 fdirm &= ~IXGBE_FDIRM_VLANID; 1714 break; 1715 case 0x0000: 1716 /* do nothing, vlans already masked */ 1717 break; 1718 default: 1719 DEBUGOUT(" Error on VLAN mask\n"); 1720 return IXGBE_ERR_CONFIG; 1721 } 1722 1723 if (input_masks->flex_mask & 0xFFFF) { 1724 if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) { 1725 DEBUGOUT(" Error on flexible byte mask\n"); 1726 return IXGBE_ERR_CONFIG; 1727 } 1728 /* Unmask Flex Bytes - bit 4 */ 1729 fdirm &= ~IXGBE_FDIRM_FLEX; 1730 } 1731 1732 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1733 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1734 1735 /* store the TCP/UDP port masks, bit reversed from port layout */ 1736 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks); 1737 1738 /* write both the same so that UDP and TCP use the same mask */ 1739 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1740 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); 1741 1742 /* store source and destination IP masks (big-enian) */ 1743 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1744 ~input_masks->src_ip_mask[0]); 1745 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1746 ~input_masks->dst_ip_mask[0]); 1747 1748 /* Apply masks to input data */ 1749 input->formatted.vlan_id &= input_masks->vlan_id_mask; 1750 input->formatted.flex_bytes &= input_masks->flex_mask; 1751 input->formatted.src_port &= input_masks->src_port_mask; 1752 input->formatted.dst_port &= input_masks->dst_port_mask; 1753 input->formatted.src_ip[0] &= input_masks->src_ip_mask[0]; 1754 input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0]; 1755 1756 /* record vlan (little-endian) and flex_bytes(big-endian) */ 1757 fdirvlan = 1758 IXGBE_STORE_AS_BE16(IXGBE_NTOHS(input->formatted.flex_bytes)); 1759 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1760 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); 1761 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1762 1763 /* record source and destination port (little-endian)*/ 1764 fdirport = IXGBE_NTOHS(input->formatted.dst_port); 1765 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; 1766 fdirport |= IXGBE_NTOHS(input->formatted.src_port); 1767 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1768 1769 /* record the first 32 bits of the destination address (big-endian) */ 1770 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); 1771 1772 /* record the source address (big-endian) */ 1773 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); 1774 1775 /* configure FDIRCMD register */ 1776 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1777 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1778 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1779 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1780 1781 /* we only want the bucket hash so drop the upper 16 bits */ 1782 fdirhash = ixgbe_atr_compute_hash_82599(input, 1783 IXGBE_ATR_BUCKET_HASH_KEY); 1784 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1785 1786 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1787 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1788 1789 return IXGBE_SUCCESS; 1790 } 1791 1792 /** 1793 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1794 * @hw: pointer to hardware structure 1795 * @reg: analog register to read 1796 * @val: read value 1797 * 1798 * Performs read operation to Omer analog register specified. 1799 **/ 1800 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) 1801 { 1802 u32 core_ctl; 1803 1804 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); 1805 1806 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 1807 (reg << 8)); 1808 IXGBE_WRITE_FLUSH(hw); 1809 usec_delay(10); 1810 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 1811 *val = (u8)core_ctl; 1812 1813 return IXGBE_SUCCESS; 1814 } 1815 1816 /** 1817 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 1818 * @hw: pointer to hardware structure 1819 * @reg: atlas register to write 1820 * @val: value to write 1821 * 1822 * Performs write operation to Omer analog register specified. 1823 **/ 1824 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) 1825 { 1826 u32 core_ctl; 1827 1828 DEBUGFUNC("ixgbe_write_analog_reg8_82599"); 1829 1830 core_ctl = (reg << 8) | val; 1831 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 1832 IXGBE_WRITE_FLUSH(hw); 1833 usec_delay(10); 1834 1835 return IXGBE_SUCCESS; 1836 } 1837 1838 /** 1839 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx 1840 * @hw: pointer to hardware structure 1841 * 1842 * Starts the hardware using the generic start_hw function 1843 * and the generation start_hw function. 1844 * Then performs revision-specific operations, if any. 1845 **/ 1846 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw) 1847 { 1848 s32 ret_val = IXGBE_SUCCESS; 1849 1850 DEBUGFUNC("ixgbe_start_hw_rev_1__82599"); 1851 1852 ret_val = ixgbe_start_hw_generic(hw); 1853 if (ret_val != IXGBE_SUCCESS) 1854 goto out; 1855 1856 ret_val = ixgbe_start_hw_gen2(hw); 1857 if (ret_val != IXGBE_SUCCESS) 1858 goto out; 1859 1860 /* We need to run link autotry after the driver loads */ 1861 hw->mac.autotry_restart = TRUE; 1862 1863 if (ret_val == IXGBE_SUCCESS) 1864 ret_val = ixgbe_verify_fw_version_82599(hw); 1865 out: 1866 return ret_val; 1867 } 1868 1869 /** 1870 * ixgbe_identify_phy_82599 - Get physical layer module 1871 * @hw: pointer to hardware structure 1872 * 1873 * Determines the physical layer module found on the current adapter. 1874 * If PHY already detected, maintains current PHY type in hw struct, 1875 * otherwise executes the PHY detection routine. 1876 **/ 1877 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1878 { 1879 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1880 1881 DEBUGFUNC("ixgbe_identify_phy_82599"); 1882 1883 /* Detect PHY if not unknown - returns success if already detected. */ 1884 status = ixgbe_identify_phy_generic(hw); 1885 if (status != IXGBE_SUCCESS) { 1886 /* 82599 10GBASE-T requires an external PHY */ 1887 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 1888 goto out; 1889 else 1890 status = ixgbe_identify_sfp_module_generic(hw); 1891 } 1892 1893 /* Set PHY type none if no PHY detected */ 1894 if (hw->phy.type == ixgbe_phy_unknown) { 1895 hw->phy.type = ixgbe_phy_none; 1896 status = IXGBE_SUCCESS; 1897 } 1898 1899 /* Return error if SFP module has been detected but is not supported */ 1900 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 1901 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1902 1903 out: 1904 return status; 1905 } 1906 1907 /** 1908 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type 1909 * @hw: pointer to hardware structure 1910 * 1911 * Determines physical layer capabilities of the current configuration. 1912 **/ 1913 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 1914 { 1915 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1916 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1917 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1918 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 1919 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1920 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1921 u16 ext_ability = 0; 1922 u8 comp_codes_10g = 0; 1923 u8 comp_codes_1g = 0; 1924 1925 DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); 1926 1927 hw->phy.ops.identify(hw); 1928 1929 switch (hw->phy.type) { 1930 case ixgbe_phy_tn: 1931 case ixgbe_phy_aq: 1932 case ixgbe_phy_cu_unknown: 1933 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 1934 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 1935 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 1936 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1937 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 1938 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1939 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 1940 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1941 goto out; 1942 default: 1943 break; 1944 } 1945 1946 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1947 case IXGBE_AUTOC_LMS_1G_AN: 1948 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1949 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { 1950 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | 1951 IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1952 goto out; 1953 } 1954 /* SFI mode so read SFP module */ 1955 goto sfp_check; 1956 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1957 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) 1958 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1959 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) 1960 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1961 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) 1962 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; 1963 goto out; 1964 case IXGBE_AUTOC_LMS_10G_SERIAL: 1965 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { 1966 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; 1967 goto out; 1968 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) 1969 goto sfp_check; 1970 break; 1971 case IXGBE_AUTOC_LMS_KX4_KX_KR: 1972 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 1973 if (autoc & IXGBE_AUTOC_KX_SUPP) 1974 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1975 if (autoc & IXGBE_AUTOC_KX4_SUPP) 1976 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1977 if (autoc & IXGBE_AUTOC_KR_SUPP) 1978 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; 1979 goto out; 1980 default: 1981 goto out; 1982 } 1983 1984 sfp_check: 1985 /* SFP check must be done last since DA modules are sometimes used to 1986 * test KR mode - we need to id KR mode correctly before SFP module. 1987 * Call identify_sfp because the pluggable module may have changed */ 1988 hw->phy.ops.identify_sfp(hw); 1989 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) 1990 goto out; 1991 1992 switch (hw->phy.type) { 1993 case ixgbe_phy_sfp_passive_tyco: 1994 case ixgbe_phy_sfp_passive_unknown: 1995 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1996 break; 1997 case ixgbe_phy_sfp_ftl_active: 1998 case ixgbe_phy_sfp_active_unknown: 1999 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; 2000 break; 2001 case ixgbe_phy_sfp_avago: 2002 case ixgbe_phy_sfp_ftl: 2003 case ixgbe_phy_sfp_intel: 2004 case ixgbe_phy_sfp_unknown: 2005 hw->phy.ops.read_i2c_eeprom(hw, 2006 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); 2007 hw->phy.ops.read_i2c_eeprom(hw, 2008 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); 2009 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 2010 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 2011 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 2012 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 2013 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) 2014 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; 2015 break; 2016 default: 2017 break; 2018 } 2019 2020 out: 2021 return physical_layer; 2022 } 2023 2024 /** 2025 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 2026 * @hw: pointer to hardware structure 2027 * @regval: register value to write to RXCTRL 2028 * 2029 * Enables the Rx DMA unit for 82599 2030 **/ 2031 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 2032 { 2033 #define IXGBE_MAX_SECRX_POLL 30 2034 int i; 2035 int secrxreg; 2036 2037 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); 2038 2039 /* 2040 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2041 * If traffic is incoming before we enable the Rx unit, it could hang 2042 * the Rx DMA unit. Therefore, make sure the security engine is 2043 * completely disabled prior to enabling the Rx unit. 2044 */ 2045 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2046 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2047 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2048 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 2049 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 2050 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 2051 break; 2052 else 2053 /* Use interrupt-safe sleep just in case */ 2054 usec_delay(10); 2055 } 2056 2057 /* For informational purposes only */ 2058 if (i >= IXGBE_MAX_SECRX_POLL) 2059 DEBUGOUT("Rx unit being enabled before security " 2060 "path fully disabled. Continuing with init.\n"); 2061 2062 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2063 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2064 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2065 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2066 IXGBE_WRITE_FLUSH(hw); 2067 2068 return IXGBE_SUCCESS; 2069 } 2070 2071 /** 2072 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 2073 * @hw: pointer to hardware structure 2074 * 2075 * Verifies that installed the firmware version is 0.6 or higher 2076 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. 2077 * 2078 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or 2079 * if the FW version is not supported. 2080 **/ 2081 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) 2082 { 2083 s32 status = IXGBE_ERR_EEPROM_VERSION; 2084 u16 fw_offset, fw_ptp_cfg_offset; 2085 u16 fw_version = 0; 2086 2087 DEBUGFUNC("ixgbe_verify_fw_version_82599"); 2088 2089 /* firmware check is only necessary for SFI devices */ 2090 if (hw->phy.media_type != ixgbe_media_type_fiber) { 2091 status = IXGBE_SUCCESS; 2092 goto fw_version_out; 2093 } 2094 2095 /* get the offset to the Firmware Module block */ 2096 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2097 2098 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2099 goto fw_version_out; 2100 2101 /* get the offset to the Pass Through Patch Configuration block */ 2102 hw->eeprom.ops.read(hw, (fw_offset + 2103 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), 2104 &fw_ptp_cfg_offset); 2105 2106 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2107 goto fw_version_out; 2108 2109 /* get the firmware version */ 2110 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + 2111 IXGBE_FW_PATCH_VERSION_4), 2112 &fw_version); 2113 2114 if (fw_version > 0x5) 2115 status = IXGBE_SUCCESS; 2116 2117 fw_version_out: 2118 return status; 2119 } 2120 2121 /** 2122 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. 2123 * @hw: pointer to hardware structure 2124 * 2125 * Returns TRUE if the LESM FW module is present and enabled. Otherwise 2126 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled. 2127 **/ 2128 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2129 { 2130 bool lesm_enabled = FALSE; 2131 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2132 s32 status; 2133 2134 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); 2135 2136 /* get the offset to the Firmware Module block */ 2137 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2138 2139 if ((status != IXGBE_SUCCESS) || 2140 (fw_offset == 0) || (fw_offset == 0xFFFF)) 2141 goto out; 2142 2143 /* get the offset to the LESM Parameters block */ 2144 status = hw->eeprom.ops.read(hw, (fw_offset + 2145 IXGBE_FW_LESM_PARAMETERS_PTR), 2146 &fw_lesm_param_offset); 2147 2148 if ((status != IXGBE_SUCCESS) || 2149 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) 2150 goto out; 2151 2152 /* get the lesm state word */ 2153 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + 2154 IXGBE_FW_LESM_STATE_1), 2155 &fw_lesm_state); 2156 2157 if ((status == IXGBE_SUCCESS) && 2158 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) 2159 lesm_enabled = TRUE; 2160 2161 out: 2162 return lesm_enabled; 2163 } 2164 2165 2166