1 /****************************************************************************** 2 3 Copyright (c) 2001-2012, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixgbe_type.h" 36 #include "ixgbe_82599.h" 37 #include "ixgbe_api.h" 38 #include "ixgbe_common.h" 39 #include "ixgbe_phy.h" 40 41 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 42 ixgbe_link_speed speed, 43 bool autoneg, 44 bool autoneg_wait_to_complete); 45 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 46 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 47 u16 offset, u16 *data); 48 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 49 u16 words, u16 *data); 50 51 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 52 { 53 struct ixgbe_mac_info *mac = &hw->mac; 54 55 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 56 57 /* enable the laser control functions for SFP+ fiber */ 58 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { 59 mac->ops.disable_tx_laser = 60 &ixgbe_disable_tx_laser_multispeed_fiber; 61 mac->ops.enable_tx_laser = 62 &ixgbe_enable_tx_laser_multispeed_fiber; 63 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 64 65 } else { 66 mac->ops.disable_tx_laser = NULL; 67 mac->ops.enable_tx_laser = NULL; 68 mac->ops.flap_tx_laser = NULL; 69 } 70 71 if (hw->phy.multispeed_fiber) { 72 /* Set up dual speed SFP+ support */ 73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 74 } else { 75 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && 76 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 77 hw->phy.smart_speed == ixgbe_smart_speed_on) && 78 !ixgbe_verify_lesm_fw_enabled_82599(hw)) { 79 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; 80 } else { 81 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; 82 } 83 } 84 } 85 86 /** 87 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 88 * @hw: pointer to hardware structure 89 * 90 * Initialize any function pointers that were not able to be 91 * set during init_shared_code because the PHY/SFP type was 92 * not known. Perform the SFP init if necessary. 93 * 94 **/ 95 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 96 { 97 struct ixgbe_mac_info *mac = &hw->mac; 98 struct ixgbe_phy_info *phy = &hw->phy; 99 s32 ret_val = IXGBE_SUCCESS; 100 101 DEBUGFUNC("ixgbe_init_phy_ops_82599"); 102 103 /* Identify the PHY or SFP module */ 104 ret_val = phy->ops.identify(hw); 105 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) 106 goto init_phy_ops_out; 107 108 /* Setup function pointers based on detected SFP module and speeds */ 109 ixgbe_init_mac_link_ops_82599(hw); 110 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) 111 hw->phy.ops.reset = NULL; 112 113 /* If copper media, overwrite with copper function pointers */ 114 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 115 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 116 mac->ops.get_link_capabilities = 117 &ixgbe_get_copper_link_capabilities_generic; 118 } 119 120 /* Set necessary function pointers based on phy type */ 121 switch (hw->phy.type) { 122 case ixgbe_phy_tn: 123 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 124 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 125 phy->ops.get_firmware_version = 126 &ixgbe_get_phy_firmware_version_tnx; 127 break; 128 default: 129 break; 130 } 131 init_phy_ops_out: 132 return ret_val; 133 } 134 135 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 136 { 137 s32 ret_val = IXGBE_SUCCESS; 138 u32 reg_anlp1 = 0; 139 u32 i = 0; 140 u16 list_offset, data_offset, data_value; 141 142 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); 143 144 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 145 ixgbe_init_mac_link_ops_82599(hw); 146 147 hw->phy.ops.reset = NULL; 148 149 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 150 &data_offset); 151 if (ret_val != IXGBE_SUCCESS) 152 goto setup_sfp_out; 153 154 /* PHY config will finish before releasing the semaphore */ 155 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 156 IXGBE_GSSR_MAC_CSR_SM); 157 if (ret_val != IXGBE_SUCCESS) { 158 ret_val = IXGBE_ERR_SWFW_SYNC; 159 goto setup_sfp_out; 160 } 161 162 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 163 while (data_value != 0xffff) { 164 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 165 IXGBE_WRITE_FLUSH(hw); 166 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 167 } 168 169 /* Release the semaphore */ 170 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 171 /* Delay obtaining semaphore again to allow FW access */ 172 msec_delay(hw->eeprom.semaphore_delay); 173 174 /* Now restart DSP by setting Restart_AN and clearing LMS */ 175 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, 176 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | 177 IXGBE_AUTOC_AN_RESTART)); 178 179 /* Wait for AN to leave state 0 */ 180 for (i = 0; i < 10; i++) { 181 msec_delay(4); 182 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); 183 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) 184 break; 185 } 186 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { 187 DEBUGOUT("sfp module setup not complete\n"); 188 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 189 goto setup_sfp_out; 190 } 191 192 /* Restart DSP by setting Restart_AN and return to SFI mode */ 193 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, 194 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | 195 IXGBE_AUTOC_AN_RESTART)); 196 } 197 198 setup_sfp_out: 199 return ret_val; 200 } 201 202 /** 203 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type 204 * @hw: pointer to hardware structure 205 * 206 * Initialize the function pointers and assign the MAC type for 82599. 207 * Does not touch the hardware. 208 **/ 209 210 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) 211 { 212 struct ixgbe_mac_info *mac = &hw->mac; 213 struct ixgbe_phy_info *phy = &hw->phy; 214 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 215 s32 ret_val; 216 217 DEBUGFUNC("ixgbe_init_ops_82599"); 218 219 ret_val = ixgbe_init_phy_ops_generic(hw); 220 ret_val = ixgbe_init_ops_generic(hw); 221 222 /* PHY */ 223 phy->ops.identify = &ixgbe_identify_phy_82599; 224 phy->ops.init = &ixgbe_init_phy_ops_82599; 225 226 /* MAC */ 227 mac->ops.reset_hw = &ixgbe_reset_hw_82599; 228 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2; 229 mac->ops.get_media_type = &ixgbe_get_media_type_82599; 230 mac->ops.get_supported_physical_layer = 231 &ixgbe_get_supported_physical_layer_82599; 232 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic; 233 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic; 234 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; 235 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; 236 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; 237 mac->ops.start_hw = &ixgbe_start_hw_82599; 238 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; 239 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; 240 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic; 241 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; 242 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; 243 244 /* RAR, Multicast, VLAN */ 245 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; 246 mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic; 247 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; 248 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; 249 mac->rar_highwater = 1; 250 mac->ops.set_vfta = &ixgbe_set_vfta_generic; 251 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic; 252 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; 253 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; 254 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; 255 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing; 256 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing; 257 258 /* Link */ 259 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; 260 mac->ops.check_link = &ixgbe_check_mac_link_generic; 261 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic; 262 ixgbe_init_mac_link_ops_82599(hw); 263 264 mac->mcft_size = 128; 265 mac->vft_size = 128; 266 mac->num_rar_entries = 128; 267 mac->rx_pb_size = 512; 268 mac->max_tx_queues = 128; 269 mac->max_rx_queues = 128; 270 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 271 272 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) & 273 IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE; 274 275 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; 276 277 /* EEPROM */ 278 eeprom->ops.read = &ixgbe_read_eeprom_82599; 279 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599; 280 281 /* Manageability interface */ 282 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic; 283 284 285 return ret_val; 286 } 287 288 /** 289 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 290 * @hw: pointer to hardware structure 291 * @speed: pointer to link speed 292 * @negotiation: TRUE when autoneg or autotry is enabled 293 * 294 * Determines the link capabilities by reading the AUTOC register. 295 **/ 296 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 297 ixgbe_link_speed *speed, 298 bool *negotiation) 299 { 300 s32 status = IXGBE_SUCCESS; 301 u32 autoc = 0; 302 303 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); 304 305 306 /* Check if 1G SFP module. */ 307 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 308 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 309 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || 310 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { 311 *speed = IXGBE_LINK_SPEED_1GB_FULL; 312 *negotiation = TRUE; 313 goto out; 314 } 315 316 /* 317 * Determine link capabilities based on the stored value of AUTOC, 318 * which represents EEPROM defaults. If AUTOC value has not 319 * been stored, use the current register values. 320 */ 321 if (hw->mac.orig_link_settings_stored) 322 autoc = hw->mac.orig_autoc; 323 else 324 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 325 326 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 327 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 328 *speed = IXGBE_LINK_SPEED_1GB_FULL; 329 *negotiation = FALSE; 330 break; 331 332 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 333 *speed = IXGBE_LINK_SPEED_10GB_FULL; 334 *negotiation = FALSE; 335 break; 336 337 case IXGBE_AUTOC_LMS_1G_AN: 338 *speed = IXGBE_LINK_SPEED_1GB_FULL; 339 *negotiation = TRUE; 340 break; 341 342 case IXGBE_AUTOC_LMS_10G_SERIAL: 343 *speed = IXGBE_LINK_SPEED_10GB_FULL; 344 *negotiation = FALSE; 345 break; 346 347 case IXGBE_AUTOC_LMS_KX4_KX_KR: 348 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 349 *speed = IXGBE_LINK_SPEED_UNKNOWN; 350 if (autoc & IXGBE_AUTOC_KR_SUPP) 351 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 352 if (autoc & IXGBE_AUTOC_KX4_SUPP) 353 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 354 if (autoc & IXGBE_AUTOC_KX_SUPP) 355 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 356 *negotiation = TRUE; 357 break; 358 359 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 360 *speed = IXGBE_LINK_SPEED_100_FULL; 361 if (autoc & IXGBE_AUTOC_KR_SUPP) 362 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 363 if (autoc & IXGBE_AUTOC_KX4_SUPP) 364 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 365 if (autoc & IXGBE_AUTOC_KX_SUPP) 366 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 367 *negotiation = TRUE; 368 break; 369 370 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 371 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 372 *negotiation = FALSE; 373 break; 374 375 default: 376 status = IXGBE_ERR_LINK_SETUP; 377 goto out; 378 break; 379 } 380 381 if (hw->phy.multispeed_fiber) { 382 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 383 IXGBE_LINK_SPEED_1GB_FULL; 384 *negotiation = TRUE; 385 } 386 387 out: 388 return status; 389 } 390 391 /** 392 * ixgbe_get_media_type_82599 - Get media type 393 * @hw: pointer to hardware structure 394 * 395 * Returns the media type (fiber, copper, backplane) 396 **/ 397 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 398 { 399 enum ixgbe_media_type media_type; 400 401 DEBUGFUNC("ixgbe_get_media_type_82599"); 402 403 /* Detect if there is a copper PHY attached. */ 404 switch (hw->phy.type) { 405 case ixgbe_phy_cu_unknown: 406 case ixgbe_phy_tn: 407 media_type = ixgbe_media_type_copper; 408 goto out; 409 default: 410 break; 411 } 412 413 switch (hw->device_id) { 414 case IXGBE_DEV_ID_82599_KX4: 415 case IXGBE_DEV_ID_82599_KX4_MEZZ: 416 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 417 case IXGBE_DEV_ID_82599_KR: 418 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: 419 case IXGBE_DEV_ID_82599_XAUI_LOM: 420 /* Default device ID is mezzanine card KX/KX4 */ 421 media_type = ixgbe_media_type_backplane; 422 break; 423 case IXGBE_DEV_ID_82599_SFP: 424 case IXGBE_DEV_ID_82599_SFP_FCOE: 425 case IXGBE_DEV_ID_82599_SFP_EM: 426 case IXGBE_DEV_ID_82599_SFP_SF2: 427 case IXGBE_DEV_ID_82599EN_SFP: 428 media_type = ixgbe_media_type_fiber; 429 break; 430 case IXGBE_DEV_ID_82599_CX4: 431 media_type = ixgbe_media_type_cx4; 432 break; 433 case IXGBE_DEV_ID_82599_T3_LOM: 434 media_type = ixgbe_media_type_copper; 435 break; 436 default: 437 media_type = ixgbe_media_type_unknown; 438 break; 439 } 440 out: 441 return media_type; 442 } 443 444 /** 445 * ixgbe_start_mac_link_82599 - Setup MAC link settings 446 * @hw: pointer to hardware structure 447 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 448 * 449 * Configures link settings based on values in the ixgbe_hw struct. 450 * Restarts the link. Performs autonegotiation if needed. 451 **/ 452 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 453 bool autoneg_wait_to_complete) 454 { 455 u32 autoc_reg; 456 u32 links_reg; 457 u32 i; 458 s32 status = IXGBE_SUCCESS; 459 460 DEBUGFUNC("ixgbe_start_mac_link_82599"); 461 462 463 /* Restart link */ 464 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 465 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 466 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 467 468 /* Only poll for autoneg to complete if specified to do so */ 469 if (autoneg_wait_to_complete) { 470 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 471 IXGBE_AUTOC_LMS_KX4_KX_KR || 472 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 473 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 474 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 475 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 476 links_reg = 0; /* Just in case Autoneg time = 0 */ 477 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 478 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 479 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 480 break; 481 msec_delay(100); 482 } 483 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 484 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 485 DEBUGOUT("Autoneg did not complete.\n"); 486 } 487 } 488 } 489 490 /* Add delay to filter out noises during initial link setup */ 491 msec_delay(50); 492 493 return status; 494 } 495 496 /** 497 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 498 * @hw: pointer to hardware structure 499 * 500 * The base drivers may require better control over SFP+ module 501 * PHY states. This includes selectively shutting down the Tx 502 * laser on the PHY, effectively halting physical link. 503 **/ 504 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 505 { 506 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 507 508 /* Disable tx laser; allow 100us to go dark per spec */ 509 esdp_reg |= IXGBE_ESDP_SDP3; 510 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 511 IXGBE_WRITE_FLUSH(hw); 512 usec_delay(100); 513 } 514 515 /** 516 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser 517 * @hw: pointer to hardware structure 518 * 519 * The base drivers may require better control over SFP+ module 520 * PHY states. This includes selectively turning on the Tx 521 * laser on the PHY, effectively starting physical link. 522 **/ 523 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 524 { 525 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 526 527 /* Enable tx laser; allow 100ms to light up */ 528 esdp_reg &= ~IXGBE_ESDP_SDP3; 529 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 530 IXGBE_WRITE_FLUSH(hw); 531 msec_delay(100); 532 } 533 534 /** 535 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 536 * @hw: pointer to hardware structure 537 * 538 * When the driver changes the link speeds that it can support, 539 * it sets autotry_restart to TRUE to indicate that we need to 540 * initiate a new autotry session with the link partner. To do 541 * so, we set the speed then disable and re-enable the tx laser, to 542 * alert the link partner that it also needs to restart autotry on its 543 * end. This is consistent with TRUE clause 37 autoneg, which also 544 * involves a loss of signal. 545 **/ 546 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 547 { 548 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); 549 550 if (hw->mac.autotry_restart) { 551 ixgbe_disable_tx_laser_multispeed_fiber(hw); 552 ixgbe_enable_tx_laser_multispeed_fiber(hw); 553 hw->mac.autotry_restart = FALSE; 554 } 555 } 556 557 /** 558 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 559 * @hw: pointer to hardware structure 560 * @speed: new link speed 561 * @autoneg: TRUE if autonegotiation enabled 562 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 563 * 564 * Set the link speed in the AUTOC register and restarts link. 565 **/ 566 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 567 ixgbe_link_speed speed, bool autoneg, 568 bool autoneg_wait_to_complete) 569 { 570 s32 status = IXGBE_SUCCESS; 571 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 572 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 573 u32 speedcnt = 0; 574 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 575 u32 i = 0; 576 bool link_up = FALSE; 577 bool negotiation; 578 579 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 580 581 /* Mask off requested but non-supported speeds */ 582 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); 583 if (status != IXGBE_SUCCESS) 584 return status; 585 586 speed &= link_speed; 587 588 /* 589 * Try each speed one by one, highest priority first. We do this in 590 * software because 10gb fiber doesn't support speed autonegotiation. 591 */ 592 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 593 speedcnt++; 594 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 595 596 /* If we already have link at this speed, just jump out */ 597 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 598 if (status != IXGBE_SUCCESS) 599 return status; 600 601 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 602 goto out; 603 604 /* Set the module link speed */ 605 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 606 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 607 IXGBE_WRITE_FLUSH(hw); 608 609 /* Allow module to change analog characteristics (1G->10G) */ 610 msec_delay(40); 611 612 status = ixgbe_setup_mac_link_82599(hw, 613 IXGBE_LINK_SPEED_10GB_FULL, 614 autoneg, 615 autoneg_wait_to_complete); 616 if (status != IXGBE_SUCCESS) 617 return status; 618 619 /* Flap the tx laser if it has not already been done */ 620 ixgbe_flap_tx_laser(hw); 621 622 /* 623 * Wait for the controller to acquire link. Per IEEE 802.3ap, 624 * Section 73.10.2, we may have to wait up to 500ms if KR is 625 * attempted. 82599 uses the same timing for 10g SFI. 626 */ 627 for (i = 0; i < 5; i++) { 628 /* Wait for the link partner to also set speed */ 629 msec_delay(100); 630 631 /* If we have link, just jump out */ 632 status = ixgbe_check_link(hw, &link_speed, 633 &link_up, FALSE); 634 if (status != IXGBE_SUCCESS) 635 return status; 636 637 if (link_up) 638 goto out; 639 } 640 } 641 642 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 643 speedcnt++; 644 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 645 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 646 647 /* If we already have link at this speed, just jump out */ 648 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 649 if (status != IXGBE_SUCCESS) 650 return status; 651 652 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 653 goto out; 654 655 /* Set the module link speed */ 656 esdp_reg &= ~IXGBE_ESDP_SDP5; 657 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 658 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 659 IXGBE_WRITE_FLUSH(hw); 660 661 /* Allow module to change analog characteristics (10G->1G) */ 662 msec_delay(40); 663 664 status = ixgbe_setup_mac_link_82599(hw, 665 IXGBE_LINK_SPEED_1GB_FULL, 666 autoneg, 667 autoneg_wait_to_complete); 668 if (status != IXGBE_SUCCESS) 669 return status; 670 671 /* Flap the tx laser if it has not already been done */ 672 ixgbe_flap_tx_laser(hw); 673 674 /* Wait for the link partner to also set speed */ 675 msec_delay(100); 676 677 /* If we have link, just jump out */ 678 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 679 if (status != IXGBE_SUCCESS) 680 return status; 681 682 if (link_up) 683 goto out; 684 } 685 686 /* 687 * We didn't get link. Configure back to the highest speed we tried, 688 * (if there was more than one). We call ourselves back with just the 689 * single highest speed that the user requested. 690 */ 691 if (speedcnt > 1) 692 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 693 highest_link_speed, autoneg, autoneg_wait_to_complete); 694 695 out: 696 /* Set autoneg_advertised value based on input link speed */ 697 hw->phy.autoneg_advertised = 0; 698 699 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 700 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 701 702 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 703 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 704 705 return status; 706 } 707 708 /** 709 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed 710 * @hw: pointer to hardware structure 711 * @speed: new link speed 712 * @autoneg: TRUE if autonegotiation enabled 713 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 714 * 715 * Implements the Intel SmartSpeed algorithm. 716 **/ 717 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 718 ixgbe_link_speed speed, bool autoneg, 719 bool autoneg_wait_to_complete) 720 { 721 s32 status = IXGBE_SUCCESS; 722 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 723 s32 i, j; 724 bool link_up = FALSE; 725 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 726 727 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); 728 729 /* Set autoneg_advertised value based on input link speed */ 730 hw->phy.autoneg_advertised = 0; 731 732 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 733 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 734 735 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 736 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 737 738 if (speed & IXGBE_LINK_SPEED_100_FULL) 739 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 740 741 /* 742 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the 743 * autoneg advertisement if link is unable to be established at the 744 * highest negotiated rate. This can sometimes happen due to integrity 745 * issues with the physical media connection. 746 */ 747 748 /* First, try to get link with full advertisement */ 749 hw->phy.smart_speed_active = FALSE; 750 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 751 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 752 autoneg_wait_to_complete); 753 if (status != IXGBE_SUCCESS) 754 goto out; 755 756 /* 757 * Wait for the controller to acquire link. Per IEEE 802.3ap, 758 * Section 73.10.2, we may have to wait up to 500ms if KR is 759 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per 760 * Table 9 in the AN MAS. 761 */ 762 for (i = 0; i < 5; i++) { 763 msec_delay(100); 764 765 /* If we have link, just jump out */ 766 status = ixgbe_check_link(hw, &link_speed, &link_up, 767 FALSE); 768 if (status != IXGBE_SUCCESS) 769 goto out; 770 771 if (link_up) 772 goto out; 773 } 774 } 775 776 /* 777 * We didn't get link. If we advertised KR plus one of KX4/KX 778 * (or BX4/BX), then disable KR and try again. 779 */ 780 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || 781 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) 782 goto out; 783 784 /* Turn SmartSpeed on to disable KR support */ 785 hw->phy.smart_speed_active = TRUE; 786 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 787 autoneg_wait_to_complete); 788 if (status != IXGBE_SUCCESS) 789 goto out; 790 791 /* 792 * Wait for the controller to acquire link. 600ms will allow for 793 * the AN link_fail_inhibit_timer as well for multiple cycles of 794 * parallel detect, both 10g and 1g. This allows for the maximum 795 * connect attempts as defined in the AN MAS table 73-7. 796 */ 797 for (i = 0; i < 6; i++) { 798 msec_delay(100); 799 800 /* If we have link, just jump out */ 801 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 802 if (status != IXGBE_SUCCESS) 803 goto out; 804 805 if (link_up) 806 goto out; 807 } 808 809 /* We didn't get link. Turn SmartSpeed back off. */ 810 hw->phy.smart_speed_active = FALSE; 811 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 812 autoneg_wait_to_complete); 813 814 out: 815 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 816 DEBUGOUT("Smartspeed has downgraded the link speed " 817 "from the maximum advertised\n"); 818 return status; 819 } 820 821 /** 822 * ixgbe_setup_mac_link_82599 - Set MAC link speed 823 * @hw: pointer to hardware structure 824 * @speed: new link speed 825 * @autoneg: TRUE if autonegotiation enabled 826 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 827 * 828 * Set the link speed in the AUTOC register and restarts link. 829 **/ 830 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 831 ixgbe_link_speed speed, bool autoneg, 832 bool autoneg_wait_to_complete) 833 { 834 s32 status = IXGBE_SUCCESS; 835 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 836 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 837 u32 start_autoc = autoc; 838 u32 orig_autoc = 0; 839 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 840 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 841 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 842 u32 links_reg; 843 u32 i; 844 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 845 846 DEBUGFUNC("ixgbe_setup_mac_link_82599"); 847 848 /* Check to see if speed passed in is supported. */ 849 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 850 if (status != IXGBE_SUCCESS) 851 goto out; 852 853 speed &= link_capabilities; 854 855 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 856 status = IXGBE_ERR_LINK_SETUP; 857 goto out; 858 } 859 860 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 861 if (hw->mac.orig_link_settings_stored) 862 orig_autoc = hw->mac.orig_autoc; 863 else 864 orig_autoc = autoc; 865 866 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 867 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 868 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 869 /* Set KX4/KX/KR support according to speed requested */ 870 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 871 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 872 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 873 autoc |= IXGBE_AUTOC_KX4_SUPP; 874 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && 875 (hw->phy.smart_speed_active == FALSE)) 876 autoc |= IXGBE_AUTOC_KR_SUPP; 877 } 878 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 879 autoc |= IXGBE_AUTOC_KX_SUPP; 880 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 881 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 882 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 883 /* Switch from 1G SFI to 10G SFI if requested */ 884 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 885 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 886 autoc &= ~IXGBE_AUTOC_LMS_MASK; 887 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 888 } 889 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 890 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 891 /* Switch from 10G SFI to 1G SFI if requested */ 892 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 893 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 894 autoc &= ~IXGBE_AUTOC_LMS_MASK; 895 if (autoneg) 896 autoc |= IXGBE_AUTOC_LMS_1G_AN; 897 else 898 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 899 } 900 } 901 902 if (autoc != start_autoc) { 903 /* Restart link */ 904 autoc |= IXGBE_AUTOC_AN_RESTART; 905 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 906 907 /* Only poll for autoneg to complete if specified to do so */ 908 if (autoneg_wait_to_complete) { 909 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 910 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 911 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 912 links_reg = 0; /*Just in case Autoneg time=0*/ 913 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 914 links_reg = 915 IXGBE_READ_REG(hw, IXGBE_LINKS); 916 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 917 break; 918 msec_delay(100); 919 } 920 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 921 status = 922 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 923 DEBUGOUT("Autoneg did not complete.\n"); 924 } 925 } 926 } 927 928 /* Add delay to filter out noises during initial link setup */ 929 msec_delay(50); 930 } 931 932 out: 933 return status; 934 } 935 936 /** 937 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field 938 * @hw: pointer to hardware structure 939 * @speed: new link speed 940 * @autoneg: TRUE if autonegotiation enabled 941 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 942 * 943 * Restarts link on PHY and MAC based on settings passed in. 944 **/ 945 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 946 ixgbe_link_speed speed, 947 bool autoneg, 948 bool autoneg_wait_to_complete) 949 { 950 s32 status; 951 952 DEBUGFUNC("ixgbe_setup_copper_link_82599"); 953 954 /* Setup the PHY according to input speed */ 955 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 956 autoneg_wait_to_complete); 957 /* Set up MAC */ 958 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 959 960 return status; 961 } 962 963 /** 964 * ixgbe_reset_hw_82599 - Perform hardware reset 965 * @hw: pointer to hardware structure 966 * 967 * Resets the hardware by resetting the transmit and receive units, masks 968 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 969 * reset. 970 **/ 971 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 972 { 973 ixgbe_link_speed link_speed; 974 s32 status; 975 u32 ctrl, i, autoc, autoc2; 976 bool link_up = FALSE; 977 978 DEBUGFUNC("ixgbe_reset_hw_82599"); 979 980 /* Call adapter stop to disable tx/rx and clear interrupts */ 981 status = hw->mac.ops.stop_adapter(hw); 982 if (status != IXGBE_SUCCESS) 983 goto reset_hw_out; 984 985 /* flush pending Tx transactions */ 986 ixgbe_clear_tx_pending(hw); 987 988 /* PHY ops must be identified and initialized prior to reset */ 989 990 /* Identify PHY and related function pointers */ 991 status = hw->phy.ops.init(hw); 992 993 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 994 goto reset_hw_out; 995 996 /* Setup SFP module if there is one present. */ 997 if (hw->phy.sfp_setup_needed) { 998 status = hw->mac.ops.setup_sfp(hw); 999 hw->phy.sfp_setup_needed = FALSE; 1000 } 1001 1002 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1003 goto reset_hw_out; 1004 1005 /* Reset PHY */ 1006 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) 1007 hw->phy.ops.reset(hw); 1008 1009 mac_reset_top: 1010 /* 1011 * Issue global reset to the MAC. Needs to be SW reset if link is up. 1012 * If link reset is used when link is up, it might reset the PHY when 1013 * mng is using it. If link is down or the flag to force full link 1014 * reset is set, then perform link reset. 1015 */ 1016 ctrl = IXGBE_CTRL_LNK_RST; 1017 if (!hw->force_full_reset) { 1018 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); 1019 if (link_up) 1020 ctrl = IXGBE_CTRL_RST; 1021 } 1022 1023 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); 1024 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 1025 IXGBE_WRITE_FLUSH(hw); 1026 1027 /* Poll for reset bit to self-clear indicating reset is complete */ 1028 for (i = 0; i < 10; i++) { 1029 usec_delay(1); 1030 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1031 if (!(ctrl & IXGBE_CTRL_RST_MASK)) 1032 break; 1033 } 1034 1035 if (ctrl & IXGBE_CTRL_RST_MASK) { 1036 status = IXGBE_ERR_RESET_FAILED; 1037 DEBUGOUT("Reset polling failed to complete.\n"); 1038 } 1039 1040 msec_delay(50); 1041 1042 /* 1043 * Double resets are required for recovery from certain error 1044 * conditions. Between resets, it is necessary to stall to allow time 1045 * for any pending HW events to complete. 1046 */ 1047 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 1048 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 1049 goto mac_reset_top; 1050 } 1051 1052 /* 1053 * Store the original AUTOC/AUTOC2 values if they have not been 1054 * stored off yet. Otherwise restore the stored original 1055 * values since the reset operation sets back to defaults. 1056 */ 1057 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1058 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1059 if (hw->mac.orig_link_settings_stored == FALSE) { 1060 hw->mac.orig_autoc = autoc; 1061 hw->mac.orig_autoc2 = autoc2; 1062 hw->mac.orig_link_settings_stored = TRUE; 1063 } else { 1064 if (autoc != hw->mac.orig_autoc) 1065 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | 1066 IXGBE_AUTOC_AN_RESTART)); 1067 1068 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1069 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1070 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1071 autoc2 |= (hw->mac.orig_autoc2 & 1072 IXGBE_AUTOC2_UPPER_MASK); 1073 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1074 } 1075 } 1076 1077 /* Store the permanent mac address */ 1078 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 1079 1080 /* 1081 * Store MAC address from RAR0, clear receive address registers, and 1082 * clear the multicast table. Also reset num_rar_entries to 128, 1083 * since we modify this value when programming the SAN MAC address. 1084 */ 1085 hw->mac.num_rar_entries = 128; 1086 hw->mac.ops.init_rx_addrs(hw); 1087 1088 /* Store the permanent SAN mac address */ 1089 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1090 1091 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1092 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 1093 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1094 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1095 1096 /* Save the SAN MAC RAR index */ 1097 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 1098 1099 /* Reserve the last RAR for the SAN MAC address */ 1100 hw->mac.num_rar_entries--; 1101 } 1102 1103 /* Store the alternative WWNN/WWPN prefix */ 1104 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1105 &hw->mac.wwpn_prefix); 1106 1107 reset_hw_out: 1108 return status; 1109 } 1110 1111 /** 1112 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1113 * @hw: pointer to hardware structure 1114 **/ 1115 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1116 { 1117 int i; 1118 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1119 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1120 1121 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); 1122 1123 /* 1124 * Before starting reinitialization process, 1125 * FDIRCMD.CMD must be zero. 1126 */ 1127 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { 1128 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1129 IXGBE_FDIRCMD_CMD_MASK)) 1130 break; 1131 usec_delay(10); 1132 } 1133 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1134 DEBUGOUT("Flow Director previous command isn't complete, " 1135 "aborting table re-initialization.\n"); 1136 return IXGBE_ERR_FDIR_REINIT_FAILED; 1137 } 1138 1139 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1140 IXGBE_WRITE_FLUSH(hw); 1141 /* 1142 * 82599 adapters flow director init flow cannot be restarted, 1143 * Workaround 82599 silicon errata by performing the following steps 1144 * before re-writing the FDIRCTRL control register with the same value. 1145 * - write 1 to bit 8 of FDIRCMD register & 1146 * - write 0 to bit 8 of FDIRCMD register 1147 */ 1148 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1149 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1150 IXGBE_FDIRCMD_CLEARHT)); 1151 IXGBE_WRITE_FLUSH(hw); 1152 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1153 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1154 ~IXGBE_FDIRCMD_CLEARHT)); 1155 IXGBE_WRITE_FLUSH(hw); 1156 /* 1157 * Clear FDIR Hash register to clear any leftover hashes 1158 * waiting to be programmed. 1159 */ 1160 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); 1161 IXGBE_WRITE_FLUSH(hw); 1162 1163 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1164 IXGBE_WRITE_FLUSH(hw); 1165 1166 /* Poll init-done after we write FDIRCTRL register */ 1167 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1168 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1169 IXGBE_FDIRCTRL_INIT_DONE) 1170 break; 1171 usec_delay(10); 1172 } 1173 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1174 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1175 return IXGBE_ERR_FDIR_REINIT_FAILED; 1176 } 1177 1178 /* Clear FDIR statistics registers (read to clear) */ 1179 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); 1180 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); 1181 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 1182 IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 1183 IXGBE_READ_REG(hw, IXGBE_FDIRLEN); 1184 1185 return IXGBE_SUCCESS; 1186 } 1187 1188 /** 1189 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers 1190 * @hw: pointer to hardware structure 1191 * @fdirctrl: value to write to flow director control register 1192 **/ 1193 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1194 { 1195 int i; 1196 1197 DEBUGFUNC("ixgbe_fdir_enable_82599"); 1198 1199 /* Prime the keys for hashing */ 1200 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1201 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1202 1203 /* 1204 * Poll init-done after we write the register. Estimated times: 1205 * 10G: PBALLOC = 11b, timing is 60us 1206 * 1G: PBALLOC = 11b, timing is 600us 1207 * 100M: PBALLOC = 11b, timing is 6ms 1208 * 1209 * Multiple these timings by 4 if under full Rx load 1210 * 1211 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1212 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1213 * this might not finish in our poll time, but we can live with that 1214 * for now. 1215 */ 1216 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1217 IXGBE_WRITE_FLUSH(hw); 1218 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1219 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1220 IXGBE_FDIRCTRL_INIT_DONE) 1221 break; 1222 msec_delay(1); 1223 } 1224 1225 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1226 DEBUGOUT("Flow Director poll time exceeded!\n"); 1227 } 1228 1229 /** 1230 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1231 * @hw: pointer to hardware structure 1232 * @fdirctrl: value to write to flow director control register, initially 1233 * contains just the value of the Rx packet buffer allocation 1234 **/ 1235 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1236 { 1237 DEBUGFUNC("ixgbe_init_fdir_signature_82599"); 1238 1239 /* 1240 * Continue setup of fdirctrl register bits: 1241 * Move the flexible bytes to use the ethertype - shift 6 words 1242 * Set the maximum length per hash bucket to 0xA filters 1243 * Send interrupt when 64 filters are left 1244 */ 1245 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1246 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1247 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1248 1249 /* write hashes and fdirctrl register, poll for completion */ 1250 ixgbe_fdir_enable_82599(hw, fdirctrl); 1251 1252 return IXGBE_SUCCESS; 1253 } 1254 1255 /** 1256 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1257 * @hw: pointer to hardware structure 1258 * @fdirctrl: value to write to flow director control register, initially 1259 * contains just the value of the Rx packet buffer allocation 1260 **/ 1261 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1262 { 1263 DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); 1264 1265 /* 1266 * Continue setup of fdirctrl register bits: 1267 * Turn perfect match filtering on 1268 * Report hash in RSS field of Rx wb descriptor 1269 * Initialize the drop queue 1270 * Move the flexible bytes to use the ethertype - shift 6 words 1271 * Set the maximum length per hash bucket to 0xA filters 1272 * Send interrupt when 64 (0x4 * 16) filters are left 1273 */ 1274 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | 1275 IXGBE_FDIRCTRL_REPORT_STATUS | 1276 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | 1277 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1278 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1279 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1280 1281 /* write hashes and fdirctrl register, poll for completion */ 1282 ixgbe_fdir_enable_82599(hw, fdirctrl); 1283 1284 return IXGBE_SUCCESS; 1285 } 1286 1287 /* 1288 * These defines allow us to quickly generate all of the necessary instructions 1289 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION 1290 * for values 0 through 15 1291 */ 1292 #define IXGBE_ATR_COMMON_HASH_KEY \ 1293 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) 1294 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ 1295 do { \ 1296 u32 n = (_n); \ 1297 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ 1298 common_hash ^= lo_hash_dword >> n; \ 1299 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1300 bucket_hash ^= lo_hash_dword >> n; \ 1301 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ 1302 sig_hash ^= lo_hash_dword << (16 - n); \ 1303 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ 1304 common_hash ^= hi_hash_dword >> n; \ 1305 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1306 bucket_hash ^= hi_hash_dword >> n; \ 1307 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1308 sig_hash ^= hi_hash_dword << (16 - n); \ 1309 } while (0); 1310 1311 /** 1312 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1313 * @stream: input bitstream to compute the hash on 1314 * 1315 * This function is almost identical to the function above but contains 1316 * several optomizations such as unwinding all of the loops, letting the 1317 * compiler work out all of the conditional ifs since the keys are static 1318 * defines, and computing two keys at once since the hashed dword stream 1319 * will be the same for both keys. 1320 **/ 1321 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 1322 union ixgbe_atr_hash_dword common) 1323 { 1324 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1325 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; 1326 1327 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1328 flow_vm_vlan = IXGBE_NTOHL(input.dword); 1329 1330 /* generate common hash dword */ 1331 hi_hash_dword = IXGBE_NTOHL(common.dword); 1332 1333 /* low dword is word swapped version of common */ 1334 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1335 1336 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1337 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1338 1339 /* Process bits 0 and 16 */ 1340 IXGBE_COMPUTE_SIG_HASH_ITERATION(0); 1341 1342 /* 1343 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1344 * delay this because bit 0 of the stream should not be processed 1345 * so we do not add the vlan until after bit 0 was processed 1346 */ 1347 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1348 1349 /* Process remaining 30 bit of the key */ 1350 IXGBE_COMPUTE_SIG_HASH_ITERATION(1); 1351 IXGBE_COMPUTE_SIG_HASH_ITERATION(2); 1352 IXGBE_COMPUTE_SIG_HASH_ITERATION(3); 1353 IXGBE_COMPUTE_SIG_HASH_ITERATION(4); 1354 IXGBE_COMPUTE_SIG_HASH_ITERATION(5); 1355 IXGBE_COMPUTE_SIG_HASH_ITERATION(6); 1356 IXGBE_COMPUTE_SIG_HASH_ITERATION(7); 1357 IXGBE_COMPUTE_SIG_HASH_ITERATION(8); 1358 IXGBE_COMPUTE_SIG_HASH_ITERATION(9); 1359 IXGBE_COMPUTE_SIG_HASH_ITERATION(10); 1360 IXGBE_COMPUTE_SIG_HASH_ITERATION(11); 1361 IXGBE_COMPUTE_SIG_HASH_ITERATION(12); 1362 IXGBE_COMPUTE_SIG_HASH_ITERATION(13); 1363 IXGBE_COMPUTE_SIG_HASH_ITERATION(14); 1364 IXGBE_COMPUTE_SIG_HASH_ITERATION(15); 1365 1366 /* combine common_hash result with signature and bucket hashes */ 1367 bucket_hash ^= common_hash; 1368 bucket_hash &= IXGBE_ATR_HASH_MASK; 1369 1370 sig_hash ^= common_hash << 16; 1371 sig_hash &= IXGBE_ATR_HASH_MASK << 16; 1372 1373 /* return completed signature hash */ 1374 return sig_hash ^ bucket_hash; 1375 } 1376 1377 /** 1378 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1379 * @hw: pointer to hardware structure 1380 * @input: unique input dword 1381 * @common: compressed common input dword 1382 * @queue: queue index to direct traffic to 1383 **/ 1384 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1385 union ixgbe_atr_hash_dword input, 1386 union ixgbe_atr_hash_dword common, 1387 u8 queue) 1388 { 1389 u64 fdirhashcmd; 1390 u32 fdircmd; 1391 1392 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); 1393 1394 /* 1395 * Get the flow_type in order to program FDIRCMD properly 1396 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 1397 */ 1398 switch (input.formatted.flow_type) { 1399 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1400 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1401 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1402 case IXGBE_ATR_FLOW_TYPE_TCPV6: 1403 case IXGBE_ATR_FLOW_TYPE_UDPV6: 1404 case IXGBE_ATR_FLOW_TYPE_SCTPV6: 1405 break; 1406 default: 1407 DEBUGOUT(" Error on flow type input\n"); 1408 return IXGBE_ERR_CONFIG; 1409 } 1410 1411 /* configure FDIRCMD register */ 1412 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1413 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1414 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1415 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1416 1417 /* 1418 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1419 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1420 */ 1421 fdirhashcmd = (u64)fdircmd << 32; 1422 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); 1423 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1424 1425 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); 1426 1427 return IXGBE_SUCCESS; 1428 } 1429 1430 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ 1431 do { \ 1432 u32 n = (_n); \ 1433 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1434 bucket_hash ^= lo_hash_dword >> n; \ 1435 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1436 bucket_hash ^= hi_hash_dword >> n; \ 1437 } while (0); 1438 1439 /** 1440 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash 1441 * @atr_input: input bitstream to compute the hash on 1442 * @input_mask: mask for the input bitstream 1443 * 1444 * This function serves two main purposes. First it applys the input_mask 1445 * to the atr_input resulting in a cleaned up atr_input data stream. 1446 * Secondly it computes the hash and stores it in the bkt_hash field at 1447 * the end of the input byte stream. This way it will be available for 1448 * future use without needing to recompute the hash. 1449 **/ 1450 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 1451 union ixgbe_atr_input *input_mask) 1452 { 1453 1454 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1455 u32 bucket_hash = 0; 1456 1457 /* Apply masks to input data */ 1458 input->dword_stream[0] &= input_mask->dword_stream[0]; 1459 input->dword_stream[1] &= input_mask->dword_stream[1]; 1460 input->dword_stream[2] &= input_mask->dword_stream[2]; 1461 input->dword_stream[3] &= input_mask->dword_stream[3]; 1462 input->dword_stream[4] &= input_mask->dword_stream[4]; 1463 input->dword_stream[5] &= input_mask->dword_stream[5]; 1464 input->dword_stream[6] &= input_mask->dword_stream[6]; 1465 input->dword_stream[7] &= input_mask->dword_stream[7]; 1466 input->dword_stream[8] &= input_mask->dword_stream[8]; 1467 input->dword_stream[9] &= input_mask->dword_stream[9]; 1468 input->dword_stream[10] &= input_mask->dword_stream[10]; 1469 1470 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1471 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); 1472 1473 /* generate common hash dword */ 1474 hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^ 1475 input->dword_stream[2] ^ 1476 input->dword_stream[3] ^ 1477 input->dword_stream[4] ^ 1478 input->dword_stream[5] ^ 1479 input->dword_stream[6] ^ 1480 input->dword_stream[7] ^ 1481 input->dword_stream[8] ^ 1482 input->dword_stream[9] ^ 1483 input->dword_stream[10]); 1484 1485 /* low dword is word swapped version of common */ 1486 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1487 1488 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1489 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1490 1491 /* Process bits 0 and 16 */ 1492 IXGBE_COMPUTE_BKT_HASH_ITERATION(0); 1493 1494 /* 1495 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1496 * delay this because bit 0 of the stream should not be processed 1497 * so we do not add the vlan until after bit 0 was processed 1498 */ 1499 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1500 1501 /* Process remaining 30 bit of the key */ 1502 IXGBE_COMPUTE_BKT_HASH_ITERATION(1); 1503 IXGBE_COMPUTE_BKT_HASH_ITERATION(2); 1504 IXGBE_COMPUTE_BKT_HASH_ITERATION(3); 1505 IXGBE_COMPUTE_BKT_HASH_ITERATION(4); 1506 IXGBE_COMPUTE_BKT_HASH_ITERATION(5); 1507 IXGBE_COMPUTE_BKT_HASH_ITERATION(6); 1508 IXGBE_COMPUTE_BKT_HASH_ITERATION(7); 1509 IXGBE_COMPUTE_BKT_HASH_ITERATION(8); 1510 IXGBE_COMPUTE_BKT_HASH_ITERATION(9); 1511 IXGBE_COMPUTE_BKT_HASH_ITERATION(10); 1512 IXGBE_COMPUTE_BKT_HASH_ITERATION(11); 1513 IXGBE_COMPUTE_BKT_HASH_ITERATION(12); 1514 IXGBE_COMPUTE_BKT_HASH_ITERATION(13); 1515 IXGBE_COMPUTE_BKT_HASH_ITERATION(14); 1516 IXGBE_COMPUTE_BKT_HASH_ITERATION(15); 1517 1518 /* 1519 * Limit hash to 13 bits since max bucket count is 8K. 1520 * Store result at the end of the input stream. 1521 */ 1522 input->formatted.bkt_hash = bucket_hash & 0x1FFF; 1523 } 1524 1525 /** 1526 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks 1527 * @input_mask: mask to be bit swapped 1528 * 1529 * The source and destination port masks for flow director are bit swapped 1530 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to 1531 * generate a correctly swapped value we need to bit swap the mask and that 1532 * is what is accomplished by this function. 1533 **/ 1534 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) 1535 { 1536 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); 1537 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1538 mask |= IXGBE_NTOHS(input_mask->formatted.src_port); 1539 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1540 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1541 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1542 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); 1543 } 1544 1545 /* 1546 * These two macros are meant to address the fact that we have registers 1547 * that are either all or in part big-endian. As a result on big-endian 1548 * systems we will end up byte swapping the value to little-endian before 1549 * it is byte swapped again and written to the hardware in the original 1550 * big-endian format. 1551 */ 1552 #define IXGBE_STORE_AS_BE32(_value) \ 1553 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ 1554 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) 1555 1556 #define IXGBE_WRITE_REG_BE32(a, reg, value) \ 1557 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) 1558 1559 #define IXGBE_STORE_AS_BE16(_value) \ 1560 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) 1561 1562 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, 1563 union ixgbe_atr_input *input_mask) 1564 { 1565 /* mask IPv6 since it is currently not supported */ 1566 u32 fdirm = IXGBE_FDIRM_DIPv6; 1567 u32 fdirtcpm; 1568 1569 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); 1570 1571 /* 1572 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1573 * are zero, then assume a full mask for that field. Also assume that 1574 * a VLAN of 0 is unspecified, so mask that out as well. L4type 1575 * cannot be masked out in this implementation. 1576 * 1577 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1578 * point in time. 1579 */ 1580 1581 /* verify bucket hash is cleared on hash generation */ 1582 if (input_mask->formatted.bkt_hash) 1583 DEBUGOUT(" bucket hash should always be 0 in mask\n"); 1584 1585 /* Program FDIRM and verify partial masks */ 1586 switch (input_mask->formatted.vm_pool & 0x7F) { 1587 case 0x0: 1588 fdirm |= IXGBE_FDIRM_POOL; 1589 case 0x7F: 1590 break; 1591 default: 1592 DEBUGOUT(" Error on vm pool mask\n"); 1593 return IXGBE_ERR_CONFIG; 1594 } 1595 1596 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { 1597 case 0x0: 1598 fdirm |= IXGBE_FDIRM_L4P; 1599 if (input_mask->formatted.dst_port || 1600 input_mask->formatted.src_port) { 1601 DEBUGOUT(" Error on src/dst port mask\n"); 1602 return IXGBE_ERR_CONFIG; 1603 } 1604 case IXGBE_ATR_L4TYPE_MASK: 1605 break; 1606 default: 1607 DEBUGOUT(" Error on flow type mask\n"); 1608 return IXGBE_ERR_CONFIG; 1609 } 1610 1611 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { 1612 case 0x0000: 1613 /* mask VLAN ID, fall through to mask VLAN priority */ 1614 fdirm |= IXGBE_FDIRM_VLANID; 1615 case 0x0FFF: 1616 /* mask VLAN priority */ 1617 fdirm |= IXGBE_FDIRM_VLANP; 1618 break; 1619 case 0xE000: 1620 /* mask VLAN ID only, fall through */ 1621 fdirm |= IXGBE_FDIRM_VLANID; 1622 case 0xEFFF: 1623 /* no VLAN fields masked */ 1624 break; 1625 default: 1626 DEBUGOUT(" Error on VLAN mask\n"); 1627 return IXGBE_ERR_CONFIG; 1628 } 1629 1630 switch (input_mask->formatted.flex_bytes & 0xFFFF) { 1631 case 0x0000: 1632 /* Mask Flex Bytes, fall through */ 1633 fdirm |= IXGBE_FDIRM_FLEX; 1634 case 0xFFFF: 1635 break; 1636 default: 1637 DEBUGOUT(" Error on flexible byte mask\n"); 1638 return IXGBE_ERR_CONFIG; 1639 } 1640 1641 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1642 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1643 1644 /* store the TCP/UDP port masks, bit reversed from port layout */ 1645 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); 1646 1647 /* write both the same so that UDP and TCP use the same mask */ 1648 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1649 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); 1650 1651 /* store source and destination IP masks (big-enian) */ 1652 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1653 ~input_mask->formatted.src_ip[0]); 1654 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1655 ~input_mask->formatted.dst_ip[0]); 1656 1657 return IXGBE_SUCCESS; 1658 } 1659 1660 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, 1661 union ixgbe_atr_input *input, 1662 u16 soft_id, u8 queue) 1663 { 1664 u32 fdirport, fdirvlan, fdirhash, fdircmd; 1665 1666 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); 1667 1668 /* currently IPv6 is not supported, must be programmed with 0 */ 1669 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), 1670 input->formatted.src_ip[0]); 1671 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), 1672 input->formatted.src_ip[1]); 1673 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), 1674 input->formatted.src_ip[2]); 1675 1676 /* record the source address (big-endian) */ 1677 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); 1678 1679 /* record the first 32 bits of the destination address (big-endian) */ 1680 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); 1681 1682 /* record source and destination port (little-endian)*/ 1683 fdirport = IXGBE_NTOHS(input->formatted.dst_port); 1684 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; 1685 fdirport |= IXGBE_NTOHS(input->formatted.src_port); 1686 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1687 1688 /* record vlan (little-endian) and flex_bytes(big-endian) */ 1689 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); 1690 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1691 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); 1692 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1693 1694 /* configure FDIRHASH register */ 1695 fdirhash = input->formatted.bkt_hash; 1696 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1697 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1698 1699 /* 1700 * flush all previous writes to make certain registers are 1701 * programmed prior to issuing the command 1702 */ 1703 IXGBE_WRITE_FLUSH(hw); 1704 1705 /* configure FDIRCMD register */ 1706 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1707 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1708 if (queue == IXGBE_FDIR_DROP_QUEUE) 1709 fdircmd |= IXGBE_FDIRCMD_DROP; 1710 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1711 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1712 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; 1713 1714 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1715 1716 return IXGBE_SUCCESS; 1717 } 1718 1719 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, 1720 union ixgbe_atr_input *input, 1721 u16 soft_id) 1722 { 1723 u32 fdirhash; 1724 u32 fdircmd = 0; 1725 u32 retry_count; 1726 s32 err = IXGBE_SUCCESS; 1727 1728 /* configure FDIRHASH register */ 1729 fdirhash = input->formatted.bkt_hash; 1730 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1731 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1732 1733 /* flush hash to HW */ 1734 IXGBE_WRITE_FLUSH(hw); 1735 1736 /* Query if filter is present */ 1737 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); 1738 1739 for (retry_count = 10; retry_count; retry_count--) { 1740 /* allow 10us for query to process */ 1741 usec_delay(10); 1742 /* verify query completed successfully */ 1743 fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); 1744 if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) 1745 break; 1746 } 1747 1748 if (!retry_count) 1749 err = IXGBE_ERR_FDIR_REINIT_FAILED; 1750 1751 /* if filter exists in hardware then remove it */ 1752 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { 1753 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1754 IXGBE_WRITE_FLUSH(hw); 1755 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1756 IXGBE_FDIRCMD_CMD_REMOVE_FLOW); 1757 } 1758 1759 return err; 1760 } 1761 1762 /** 1763 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1764 * @hw: pointer to hardware structure 1765 * @input: input bitstream 1766 * @input_mask: mask for the input bitstream 1767 * @soft_id: software index for the filters 1768 * @queue: queue index to direct traffic to 1769 * 1770 * Note that the caller to this function must lock before calling, since the 1771 * hardware writes must be protected from one another. 1772 **/ 1773 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1774 union ixgbe_atr_input *input, 1775 union ixgbe_atr_input *input_mask, 1776 u16 soft_id, u8 queue) 1777 { 1778 s32 err = IXGBE_ERR_CONFIG; 1779 1780 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); 1781 1782 /* 1783 * Check flow_type formatting, and bail out before we touch the hardware 1784 * if there's a configuration issue 1785 */ 1786 switch (input->formatted.flow_type) { 1787 case IXGBE_ATR_FLOW_TYPE_IPV4: 1788 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; 1789 if (input->formatted.dst_port || input->formatted.src_port) { 1790 DEBUGOUT(" Error on src/dst port\n"); 1791 return IXGBE_ERR_CONFIG; 1792 } 1793 break; 1794 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1795 if (input->formatted.dst_port || input->formatted.src_port) { 1796 DEBUGOUT(" Error on src/dst port\n"); 1797 return IXGBE_ERR_CONFIG; 1798 } 1799 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1800 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1801 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 1802 IXGBE_ATR_L4TYPE_MASK; 1803 break; 1804 default: 1805 DEBUGOUT(" Error on flow type input\n"); 1806 return err; 1807 } 1808 1809 /* program input mask into the HW */ 1810 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask); 1811 if (err) 1812 return err; 1813 1814 /* apply mask and compute/store hash */ 1815 ixgbe_atr_compute_perfect_hash_82599(input, input_mask); 1816 1817 /* program filters to filter memory */ 1818 return ixgbe_fdir_write_perfect_filter_82599(hw, input, 1819 soft_id, queue); 1820 } 1821 1822 /** 1823 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1824 * @hw: pointer to hardware structure 1825 * @reg: analog register to read 1826 * @val: read value 1827 * 1828 * Performs read operation to Omer analog register specified. 1829 **/ 1830 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) 1831 { 1832 u32 core_ctl; 1833 1834 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); 1835 1836 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 1837 (reg << 8)); 1838 IXGBE_WRITE_FLUSH(hw); 1839 usec_delay(10); 1840 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 1841 *val = (u8)core_ctl; 1842 1843 return IXGBE_SUCCESS; 1844 } 1845 1846 /** 1847 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 1848 * @hw: pointer to hardware structure 1849 * @reg: atlas register to write 1850 * @val: value to write 1851 * 1852 * Performs write operation to Omer analog register specified. 1853 **/ 1854 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) 1855 { 1856 u32 core_ctl; 1857 1858 DEBUGFUNC("ixgbe_write_analog_reg8_82599"); 1859 1860 core_ctl = (reg << 8) | val; 1861 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 1862 IXGBE_WRITE_FLUSH(hw); 1863 usec_delay(10); 1864 1865 return IXGBE_SUCCESS; 1866 } 1867 1868 /** 1869 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx 1870 * @hw: pointer to hardware structure 1871 * 1872 * Starts the hardware using the generic start_hw function 1873 * and the generation start_hw function. 1874 * Then performs revision-specific operations, if any. 1875 **/ 1876 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) 1877 { 1878 s32 ret_val = IXGBE_SUCCESS; 1879 1880 DEBUGFUNC("ixgbe_start_hw_82599"); 1881 1882 ret_val = ixgbe_start_hw_generic(hw); 1883 if (ret_val != IXGBE_SUCCESS) 1884 goto out; 1885 1886 ret_val = ixgbe_start_hw_gen2(hw); 1887 if (ret_val != IXGBE_SUCCESS) 1888 goto out; 1889 1890 /* We need to run link autotry after the driver loads */ 1891 hw->mac.autotry_restart = TRUE; 1892 1893 if (ret_val == IXGBE_SUCCESS) 1894 ret_val = ixgbe_verify_fw_version_82599(hw); 1895 out: 1896 return ret_val; 1897 } 1898 1899 /** 1900 * ixgbe_identify_phy_82599 - Get physical layer module 1901 * @hw: pointer to hardware structure 1902 * 1903 * Determines the physical layer module found on the current adapter. 1904 * If PHY already detected, maintains current PHY type in hw struct, 1905 * otherwise executes the PHY detection routine. 1906 **/ 1907 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1908 { 1909 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1910 1911 DEBUGFUNC("ixgbe_identify_phy_82599"); 1912 1913 /* Detect PHY if not unknown - returns success if already detected. */ 1914 status = ixgbe_identify_phy_generic(hw); 1915 if (status != IXGBE_SUCCESS) { 1916 /* 82599 10GBASE-T requires an external PHY */ 1917 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 1918 goto out; 1919 else 1920 status = ixgbe_identify_module_generic(hw); 1921 } 1922 1923 /* Set PHY type none if no PHY detected */ 1924 if (hw->phy.type == ixgbe_phy_unknown) { 1925 hw->phy.type = ixgbe_phy_none; 1926 status = IXGBE_SUCCESS; 1927 } 1928 1929 /* Return error if SFP module has been detected but is not supported */ 1930 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 1931 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1932 1933 out: 1934 return status; 1935 } 1936 1937 /** 1938 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type 1939 * @hw: pointer to hardware structure 1940 * 1941 * Determines physical layer capabilities of the current configuration. 1942 **/ 1943 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 1944 { 1945 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1946 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1947 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1948 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 1949 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1950 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1951 u16 ext_ability = 0; 1952 u8 comp_codes_10g = 0; 1953 u8 comp_codes_1g = 0; 1954 1955 DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); 1956 1957 hw->phy.ops.identify(hw); 1958 1959 switch (hw->phy.type) { 1960 case ixgbe_phy_tn: 1961 case ixgbe_phy_cu_unknown: 1962 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 1963 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 1964 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 1965 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1966 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 1967 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1968 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 1969 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1970 goto out; 1971 default: 1972 break; 1973 } 1974 1975 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1976 case IXGBE_AUTOC_LMS_1G_AN: 1977 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1978 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { 1979 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | 1980 IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1981 goto out; 1982 } else 1983 /* SFI mode so read SFP module */ 1984 goto sfp_check; 1985 break; 1986 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1987 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) 1988 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1989 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) 1990 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1991 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) 1992 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; 1993 goto out; 1994 break; 1995 case IXGBE_AUTOC_LMS_10G_SERIAL: 1996 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { 1997 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; 1998 goto out; 1999 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) 2000 goto sfp_check; 2001 break; 2002 case IXGBE_AUTOC_LMS_KX4_KX_KR: 2003 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 2004 if (autoc & IXGBE_AUTOC_KX_SUPP) 2005 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 2006 if (autoc & IXGBE_AUTOC_KX4_SUPP) 2007 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2008 if (autoc & IXGBE_AUTOC_KR_SUPP) 2009 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2010 goto out; 2011 break; 2012 default: 2013 goto out; 2014 break; 2015 } 2016 2017 sfp_check: 2018 /* SFP check must be done last since DA modules are sometimes used to 2019 * test KR mode - we need to id KR mode correctly before SFP module. 2020 * Call identify_sfp because the pluggable module may have changed */ 2021 hw->phy.ops.identify_sfp(hw); 2022 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) 2023 goto out; 2024 2025 switch (hw->phy.type) { 2026 case ixgbe_phy_sfp_passive_tyco: 2027 case ixgbe_phy_sfp_passive_unknown: 2028 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 2029 break; 2030 case ixgbe_phy_sfp_ftl_active: 2031 case ixgbe_phy_sfp_active_unknown: 2032 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; 2033 break; 2034 case ixgbe_phy_sfp_avago: 2035 case ixgbe_phy_sfp_ftl: 2036 case ixgbe_phy_sfp_intel: 2037 case ixgbe_phy_sfp_unknown: 2038 hw->phy.ops.read_i2c_eeprom(hw, 2039 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); 2040 hw->phy.ops.read_i2c_eeprom(hw, 2041 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); 2042 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 2043 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 2044 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 2045 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 2046 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) 2047 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; 2048 else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) 2049 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX; 2050 break; 2051 default: 2052 break; 2053 } 2054 2055 out: 2056 return physical_layer; 2057 } 2058 2059 /** 2060 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 2061 * @hw: pointer to hardware structure 2062 * @regval: register value to write to RXCTRL 2063 * 2064 * Enables the Rx DMA unit for 82599 2065 **/ 2066 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 2067 { 2068 2069 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); 2070 2071 /* 2072 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2073 * If traffic is incoming before we enable the Rx unit, it could hang 2074 * the Rx DMA unit. Therefore, make sure the security engine is 2075 * completely disabled prior to enabling the Rx unit. 2076 */ 2077 2078 hw->mac.ops.disable_sec_rx_path(hw); 2079 2080 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2081 2082 hw->mac.ops.enable_sec_rx_path(hw); 2083 2084 return IXGBE_SUCCESS; 2085 } 2086 2087 /** 2088 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 2089 * @hw: pointer to hardware structure 2090 * 2091 * Verifies that installed the firmware version is 0.6 or higher 2092 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. 2093 * 2094 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or 2095 * if the FW version is not supported. 2096 **/ 2097 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) 2098 { 2099 s32 status = IXGBE_ERR_EEPROM_VERSION; 2100 u16 fw_offset, fw_ptp_cfg_offset; 2101 u16 fw_version = 0; 2102 2103 DEBUGFUNC("ixgbe_verify_fw_version_82599"); 2104 2105 /* firmware check is only necessary for SFI devices */ 2106 if (hw->phy.media_type != ixgbe_media_type_fiber) { 2107 status = IXGBE_SUCCESS; 2108 goto fw_version_out; 2109 } 2110 2111 /* get the offset to the Firmware Module block */ 2112 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2113 2114 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2115 goto fw_version_out; 2116 2117 /* get the offset to the Pass Through Patch Configuration block */ 2118 hw->eeprom.ops.read(hw, (fw_offset + 2119 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), 2120 &fw_ptp_cfg_offset); 2121 2122 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2123 goto fw_version_out; 2124 2125 /* get the firmware version */ 2126 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + 2127 IXGBE_FW_PATCH_VERSION_4), &fw_version); 2128 2129 if (fw_version > 0x5) 2130 status = IXGBE_SUCCESS; 2131 2132 fw_version_out: 2133 return status; 2134 } 2135 2136 /** 2137 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. 2138 * @hw: pointer to hardware structure 2139 * 2140 * Returns TRUE if the LESM FW module is present and enabled. Otherwise 2141 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled. 2142 **/ 2143 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2144 { 2145 bool lesm_enabled = FALSE; 2146 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2147 s32 status; 2148 2149 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); 2150 2151 /* get the offset to the Firmware Module block */ 2152 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2153 2154 if ((status != IXGBE_SUCCESS) || 2155 (fw_offset == 0) || (fw_offset == 0xFFFF)) 2156 goto out; 2157 2158 /* get the offset to the LESM Parameters block */ 2159 status = hw->eeprom.ops.read(hw, (fw_offset + 2160 IXGBE_FW_LESM_PARAMETERS_PTR), 2161 &fw_lesm_param_offset); 2162 2163 if ((status != IXGBE_SUCCESS) || 2164 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) 2165 goto out; 2166 2167 /* get the lesm state word */ 2168 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + 2169 IXGBE_FW_LESM_STATE_1), 2170 &fw_lesm_state); 2171 2172 if ((status == IXGBE_SUCCESS) && 2173 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) 2174 lesm_enabled = TRUE; 2175 2176 out: 2177 return lesm_enabled; 2178 } 2179 2180 /** 2181 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using 2182 * fastest available method 2183 * 2184 * @hw: pointer to hardware structure 2185 * @offset: offset of word in EEPROM to read 2186 * @words: number of words 2187 * @data: word(s) read from the EEPROM 2188 * 2189 * Retrieves 16 bit word(s) read from EEPROM 2190 **/ 2191 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 2192 u16 words, u16 *data) 2193 { 2194 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2195 s32 ret_val = IXGBE_ERR_CONFIG; 2196 2197 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); 2198 2199 /* 2200 * If EEPROM is detected and can be addressed using 14 bits, 2201 * use EERD otherwise use bit bang 2202 */ 2203 if ((eeprom->type == ixgbe_eeprom_spi) && 2204 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) 2205 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, 2206 data); 2207 else 2208 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, 2209 words, 2210 data); 2211 2212 return ret_val; 2213 } 2214 2215 /** 2216 * ixgbe_read_eeprom_82599 - Read EEPROM word using 2217 * fastest available method 2218 * 2219 * @hw: pointer to hardware structure 2220 * @offset: offset of word in the EEPROM to read 2221 * @data: word read from the EEPROM 2222 * 2223 * Reads a 16 bit word from the EEPROM 2224 **/ 2225 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 2226 u16 offset, u16 *data) 2227 { 2228 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2229 s32 ret_val = IXGBE_ERR_CONFIG; 2230 2231 DEBUGFUNC("ixgbe_read_eeprom_82599"); 2232 2233 /* 2234 * If EEPROM is detected and can be addressed using 14 bits, 2235 * use EERD otherwise use bit bang 2236 */ 2237 if ((eeprom->type == ixgbe_eeprom_spi) && 2238 (offset <= IXGBE_EERD_MAX_ADDR)) 2239 ret_val = ixgbe_read_eerd_generic(hw, offset, data); 2240 else 2241 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); 2242 2243 return ret_val; 2244 } 2245 2246 2247