1 /****************************************************************************** 2 SPDX-License-Identifier: BSD-3-Clause 3 4 Copyright (c) 2001-2017, Intel Corporation 5 All rights reserved. 6 7 Redistribution and use in source and binary forms, with or without 8 modification, are permitted provided that the following conditions are met: 9 10 1. Redistributions of source code must retain the above copyright notice, 11 this list of conditions and the following disclaimer. 12 13 2. Redistributions in binary form must reproduce the above copyright 14 notice, this list of conditions and the following disclaimer in the 15 documentation and/or other materials provided with the distribution. 16 17 3. Neither the name of the Intel Corporation nor the names of its 18 contributors may be used to endorse or promote products derived from 19 this software without specific prior written permission. 20 21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 POSSIBILITY OF SUCH DAMAGE. 32 33 ******************************************************************************/ 34 /*$FreeBSD$*/ 35 36 #include "ixgbe_type.h" 37 #include "ixgbe_82599.h" 38 #include "ixgbe_api.h" 39 #include "ixgbe_common.h" 40 #include "ixgbe_phy.h" 41 42 #define IXGBE_82599_MAX_TX_QUEUES 128 43 #define IXGBE_82599_MAX_RX_QUEUES 128 44 #define IXGBE_82599_RAR_ENTRIES 128 45 #define IXGBE_82599_MC_TBL_SIZE 128 46 #define IXGBE_82599_VFT_TBL_SIZE 128 47 #define IXGBE_82599_RX_PB_SIZE 512 48 49 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 50 ixgbe_link_speed speed, 51 bool autoneg_wait_to_complete); 52 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 53 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 54 u16 offset, u16 *data); 55 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 56 u16 words, u16 *data); 57 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 58 u8 dev_addr, u8 *data); 59 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 60 u8 dev_addr, u8 data); 61 62 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 63 { 64 struct ixgbe_mac_info *mac = &hw->mac; 65 66 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 67 68 /* 69 * enable the laser control functions for SFP+ fiber 70 * and MNG not enabled 71 */ 72 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && 73 !ixgbe_mng_enabled(hw)) { 74 mac->ops.disable_tx_laser = 75 ixgbe_disable_tx_laser_multispeed_fiber; 76 mac->ops.enable_tx_laser = 77 ixgbe_enable_tx_laser_multispeed_fiber; 78 mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber; 79 80 } else { 81 mac->ops.disable_tx_laser = NULL; 82 mac->ops.enable_tx_laser = NULL; 83 mac->ops.flap_tx_laser = NULL; 84 } 85 86 if (hw->phy.multispeed_fiber) { 87 /* Set up dual speed SFP+ support */ 88 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; 89 mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; 90 mac->ops.set_rate_select_speed = 91 ixgbe_set_hard_rate_select_speed; 92 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed) 93 mac->ops.set_rate_select_speed = 94 ixgbe_set_soft_rate_select_speed; 95 } else { 96 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && 97 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 98 hw->phy.smart_speed == ixgbe_smart_speed_on) && 99 !ixgbe_verify_lesm_fw_enabled_82599(hw)) { 100 mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed; 101 } else { 102 mac->ops.setup_link = ixgbe_setup_mac_link_82599; 103 } 104 } 105 } 106 107 /** 108 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 109 * @hw: pointer to hardware structure 110 * 111 * Initialize any function pointers that were not able to be 112 * set during init_shared_code because the PHY/SFP type was 113 * not known. Perform the SFP init if necessary. 114 * 115 **/ 116 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 117 { 118 struct ixgbe_mac_info *mac = &hw->mac; 119 struct ixgbe_phy_info *phy = &hw->phy; 120 s32 ret_val = IXGBE_SUCCESS; 121 u32 esdp; 122 123 DEBUGFUNC("ixgbe_init_phy_ops_82599"); 124 125 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { 126 /* Store flag indicating I2C bus access control unit. */ 127 hw->phy.qsfp_shared_i2c_bus = TRUE; 128 129 /* Initialize access to QSFP+ I2C bus */ 130 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 131 esdp |= IXGBE_ESDP_SDP0_DIR; 132 esdp &= ~IXGBE_ESDP_SDP1_DIR; 133 esdp &= ~IXGBE_ESDP_SDP0; 134 esdp &= ~IXGBE_ESDP_SDP0_NATIVE; 135 esdp &= ~IXGBE_ESDP_SDP1_NATIVE; 136 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 137 IXGBE_WRITE_FLUSH(hw); 138 139 phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599; 140 phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599; 141 } 142 /* Identify the PHY or SFP module */ 143 ret_val = phy->ops.identify(hw); 144 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) 145 goto init_phy_ops_out; 146 147 /* Setup function pointers based on detected SFP module and speeds */ 148 ixgbe_init_mac_link_ops_82599(hw); 149 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) 150 hw->phy.ops.reset = NULL; 151 152 /* If copper media, overwrite with copper function pointers */ 153 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 154 mac->ops.setup_link = ixgbe_setup_copper_link_82599; 155 mac->ops.get_link_capabilities = 156 ixgbe_get_copper_link_capabilities_generic; 157 } 158 159 /* Set necessary function pointers based on PHY type */ 160 switch (hw->phy.type) { 161 case ixgbe_phy_tn: 162 phy->ops.setup_link = ixgbe_setup_phy_link_tnx; 163 phy->ops.check_link = ixgbe_check_phy_link_tnx; 164 phy->ops.get_firmware_version = 165 ixgbe_get_phy_firmware_version_tnx; 166 break; 167 default: 168 break; 169 } 170 init_phy_ops_out: 171 return ret_val; 172 } 173 174 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 175 { 176 s32 ret_val = IXGBE_SUCCESS; 177 u16 list_offset, data_offset, data_value; 178 179 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); 180 181 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 182 ixgbe_init_mac_link_ops_82599(hw); 183 184 hw->phy.ops.reset = NULL; 185 186 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 187 &data_offset); 188 if (ret_val != IXGBE_SUCCESS) 189 goto setup_sfp_out; 190 191 /* PHY config will finish before releasing the semaphore */ 192 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 193 IXGBE_GSSR_MAC_CSR_SM); 194 if (ret_val != IXGBE_SUCCESS) { 195 ret_val = IXGBE_ERR_SWFW_SYNC; 196 goto setup_sfp_out; 197 } 198 199 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) 200 goto setup_sfp_err; 201 while (data_value != 0xffff) { 202 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 203 IXGBE_WRITE_FLUSH(hw); 204 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) 205 goto setup_sfp_err; 206 } 207 208 /* Release the semaphore */ 209 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 210 /* Delay obtaining semaphore again to allow FW access 211 * prot_autoc_write uses the semaphore too. 212 */ 213 msec_delay(hw->eeprom.semaphore_delay); 214 215 /* Restart DSP and set SFI mode */ 216 ret_val = hw->mac.ops.prot_autoc_write(hw, 217 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, 218 FALSE); 219 220 if (ret_val) { 221 DEBUGOUT("sfp module setup not complete\n"); 222 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 223 goto setup_sfp_out; 224 } 225 226 } 227 228 setup_sfp_out: 229 return ret_val; 230 231 setup_sfp_err: 232 /* Release the semaphore */ 233 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 234 /* Delay obtaining semaphore again to allow FW access */ 235 msec_delay(hw->eeprom.semaphore_delay); 236 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 237 "eeprom read at offset %d failed", data_offset); 238 return IXGBE_ERR_PHY; 239 } 240 241 /** 242 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read 243 * @hw: pointer to hardware structure 244 * @locked: Return the if we locked for this read. 245 * @reg_val: Value we read from AUTOC 246 * 247 * For this part (82599) we need to wrap read-modify-writes with a possible 248 * FW/SW lock. It is assumed this lock will be freed with the next 249 * prot_autoc_write_82599(). 250 */ 251 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 252 { 253 s32 ret_val; 254 255 *locked = FALSE; 256 /* If LESM is on then we need to hold the SW/FW semaphore. */ 257 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { 258 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 259 IXGBE_GSSR_MAC_CSR_SM); 260 if (ret_val != IXGBE_SUCCESS) 261 return IXGBE_ERR_SWFW_SYNC; 262 263 *locked = TRUE; 264 } 265 266 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 267 return IXGBE_SUCCESS; 268 } 269 270 /** 271 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write 272 * @hw: pointer to hardware structure 273 * @reg_val: value to write to AUTOC 274 * @locked: bool to indicate whether the SW/FW lock was already taken by 275 * previous proc_autoc_read_82599. 276 * 277 * This part (82599) may need to hold the SW/FW lock around all writes to 278 * AUTOC. Likewise after a write we need to do a pipeline reset. 279 */ 280 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) 281 { 282 s32 ret_val = IXGBE_SUCCESS; 283 284 /* Blocked by MNG FW so bail */ 285 if (ixgbe_check_reset_blocked(hw)) 286 goto out; 287 288 /* We only need to get the lock if: 289 * - We didn't do it already (in the read part of a read-modify-write) 290 * - LESM is enabled. 291 */ 292 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { 293 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 294 IXGBE_GSSR_MAC_CSR_SM); 295 if (ret_val != IXGBE_SUCCESS) 296 return IXGBE_ERR_SWFW_SYNC; 297 298 locked = TRUE; 299 } 300 301 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 302 ret_val = ixgbe_reset_pipeline_82599(hw); 303 304 out: 305 /* Free the SW/FW semaphore as we either grabbed it here or 306 * already had it when this function was called. 307 */ 308 if (locked) 309 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 310 311 return ret_val; 312 } 313 314 /** 315 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type 316 * @hw: pointer to hardware structure 317 * 318 * Initialize the function pointers and assign the MAC type for 82599. 319 * Does not touch the hardware. 320 **/ 321 322 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) 323 { 324 struct ixgbe_mac_info *mac = &hw->mac; 325 struct ixgbe_phy_info *phy = &hw->phy; 326 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 327 s32 ret_val; 328 329 DEBUGFUNC("ixgbe_init_ops_82599"); 330 331 ixgbe_init_phy_ops_generic(hw); 332 ret_val = ixgbe_init_ops_generic(hw); 333 334 /* PHY */ 335 phy->ops.identify = ixgbe_identify_phy_82599; 336 phy->ops.init = ixgbe_init_phy_ops_82599; 337 338 /* MAC */ 339 mac->ops.reset_hw = ixgbe_reset_hw_82599; 340 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; 341 mac->ops.get_media_type = ixgbe_get_media_type_82599; 342 mac->ops.get_supported_physical_layer = 343 ixgbe_get_supported_physical_layer_82599; 344 mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; 345 mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; 346 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599; 347 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599; 348 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599; 349 mac->ops.start_hw = ixgbe_start_hw_82599; 350 mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; 351 mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; 352 mac->ops.get_device_caps = ixgbe_get_device_caps_generic; 353 mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; 354 mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; 355 mac->ops.prot_autoc_read = prot_autoc_read_82599; 356 mac->ops.prot_autoc_write = prot_autoc_write_82599; 357 358 /* RAR, Multicast, VLAN */ 359 mac->ops.set_vmdq = ixgbe_set_vmdq_generic; 360 mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; 361 mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; 362 mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; 363 mac->rar_highwater = 1; 364 mac->ops.set_vfta = ixgbe_set_vfta_generic; 365 mac->ops.set_vlvf = ixgbe_set_vlvf_generic; 366 mac->ops.clear_vfta = ixgbe_clear_vfta_generic; 367 mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; 368 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599; 369 mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; 370 mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; 371 372 /* Link */ 373 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599; 374 mac->ops.check_link = ixgbe_check_mac_link_generic; 375 mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; 376 ixgbe_init_mac_link_ops_82599(hw); 377 378 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; 379 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; 380 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; 381 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; 382 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; 383 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; 384 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 385 386 mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) 387 & IXGBE_FWSM_MODE_MASK); 388 389 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; 390 391 /* EEPROM */ 392 eeprom->ops.read = ixgbe_read_eeprom_82599; 393 eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599; 394 395 /* Manageability interface */ 396 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; 397 398 mac->ops.bypass_rw = ixgbe_bypass_rw_generic; 399 mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic; 400 mac->ops.bypass_set = ixgbe_bypass_set_generic; 401 mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic; 402 403 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; 404 405 return ret_val; 406 } 407 408 /** 409 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 410 * @hw: pointer to hardware structure 411 * @speed: pointer to link speed 412 * @autoneg: TRUE when autoneg or autotry is enabled 413 * 414 * Determines the link capabilities by reading the AUTOC register. 415 **/ 416 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 417 ixgbe_link_speed *speed, 418 bool *autoneg) 419 { 420 s32 status = IXGBE_SUCCESS; 421 u32 autoc = 0; 422 423 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); 424 425 426 /* Check if 1G SFP module. */ 427 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 428 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 429 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || 430 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || 431 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || 432 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { 433 *speed = IXGBE_LINK_SPEED_1GB_FULL; 434 *autoneg = TRUE; 435 goto out; 436 } 437 438 /* 439 * Determine link capabilities based on the stored value of AUTOC, 440 * which represents EEPROM defaults. If AUTOC value has not 441 * been stored, use the current register values. 442 */ 443 if (hw->mac.orig_link_settings_stored) 444 autoc = hw->mac.orig_autoc; 445 else 446 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 447 448 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 449 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 450 *speed = IXGBE_LINK_SPEED_1GB_FULL; 451 *autoneg = FALSE; 452 break; 453 454 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 455 *speed = IXGBE_LINK_SPEED_10GB_FULL; 456 *autoneg = FALSE; 457 break; 458 459 case IXGBE_AUTOC_LMS_1G_AN: 460 *speed = IXGBE_LINK_SPEED_1GB_FULL; 461 *autoneg = TRUE; 462 break; 463 464 case IXGBE_AUTOC_LMS_10G_SERIAL: 465 *speed = IXGBE_LINK_SPEED_10GB_FULL; 466 *autoneg = FALSE; 467 break; 468 469 case IXGBE_AUTOC_LMS_KX4_KX_KR: 470 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 471 *speed = IXGBE_LINK_SPEED_UNKNOWN; 472 if (autoc & IXGBE_AUTOC_KR_SUPP) 473 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 474 if (autoc & IXGBE_AUTOC_KX4_SUPP) 475 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 476 if (autoc & IXGBE_AUTOC_KX_SUPP) 477 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 478 *autoneg = TRUE; 479 break; 480 481 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 482 *speed = IXGBE_LINK_SPEED_100_FULL; 483 if (autoc & IXGBE_AUTOC_KR_SUPP) 484 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 485 if (autoc & IXGBE_AUTOC_KX4_SUPP) 486 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 487 if (autoc & IXGBE_AUTOC_KX_SUPP) 488 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 489 *autoneg = TRUE; 490 break; 491 492 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 493 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 494 *autoneg = FALSE; 495 break; 496 497 default: 498 status = IXGBE_ERR_LINK_SETUP; 499 goto out; 500 break; 501 } 502 503 if (hw->phy.multispeed_fiber) { 504 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 505 IXGBE_LINK_SPEED_1GB_FULL; 506 507 /* QSFP must not enable full auto-negotiation 508 * Limited autoneg is enabled at 1G 509 */ 510 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) 511 *autoneg = FALSE; 512 else 513 *autoneg = TRUE; 514 } 515 516 out: 517 return status; 518 } 519 520 /** 521 * ixgbe_get_media_type_82599 - Get media type 522 * @hw: pointer to hardware structure 523 * 524 * Returns the media type (fiber, copper, backplane) 525 **/ 526 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 527 { 528 enum ixgbe_media_type media_type; 529 530 DEBUGFUNC("ixgbe_get_media_type_82599"); 531 532 /* Detect if there is a copper PHY attached. */ 533 switch (hw->phy.type) { 534 case ixgbe_phy_cu_unknown: 535 case ixgbe_phy_tn: 536 media_type = ixgbe_media_type_copper; 537 goto out; 538 default: 539 break; 540 } 541 542 switch (hw->device_id) { 543 case IXGBE_DEV_ID_82599_KX4: 544 case IXGBE_DEV_ID_82599_KX4_MEZZ: 545 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 546 case IXGBE_DEV_ID_82599_KR: 547 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: 548 case IXGBE_DEV_ID_82599_XAUI_LOM: 549 /* Default device ID is mezzanine card KX/KX4 */ 550 media_type = ixgbe_media_type_backplane; 551 break; 552 case IXGBE_DEV_ID_82599_SFP: 553 case IXGBE_DEV_ID_82599_SFP_FCOE: 554 case IXGBE_DEV_ID_82599_SFP_EM: 555 case IXGBE_DEV_ID_82599_SFP_SF2: 556 case IXGBE_DEV_ID_82599_SFP_SF_QP: 557 case IXGBE_DEV_ID_82599EN_SFP: 558 media_type = ixgbe_media_type_fiber; 559 break; 560 case IXGBE_DEV_ID_82599_CX4: 561 media_type = ixgbe_media_type_cx4; 562 break; 563 case IXGBE_DEV_ID_82599_T3_LOM: 564 media_type = ixgbe_media_type_copper; 565 break; 566 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 567 media_type = ixgbe_media_type_fiber_qsfp; 568 break; 569 case IXGBE_DEV_ID_82599_BYPASS: 570 media_type = ixgbe_media_type_fiber_fixed; 571 hw->phy.multispeed_fiber = TRUE; 572 break; 573 default: 574 media_type = ixgbe_media_type_unknown; 575 break; 576 } 577 out: 578 return media_type; 579 } 580 581 /** 582 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 583 * @hw: pointer to hardware structure 584 * 585 * Disables link during D3 power down sequence. 586 * 587 **/ 588 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) 589 { 590 u32 autoc2_reg; 591 u16 ee_ctrl_2 = 0; 592 593 DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); 594 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); 595 596 if (!ixgbe_mng_present(hw) && !hw->wol_enabled && 597 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { 598 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 599 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; 600 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); 601 } 602 } 603 604 /** 605 * ixgbe_start_mac_link_82599 - Setup MAC link settings 606 * @hw: pointer to hardware structure 607 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 608 * 609 * Configures link settings based on values in the ixgbe_hw struct. 610 * Restarts the link. Performs autonegotiation if needed. 611 **/ 612 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 613 bool autoneg_wait_to_complete) 614 { 615 u32 autoc_reg; 616 u32 links_reg; 617 u32 i; 618 s32 status = IXGBE_SUCCESS; 619 bool got_lock = FALSE; 620 621 DEBUGFUNC("ixgbe_start_mac_link_82599"); 622 623 624 /* reset_pipeline requires us to hold this lock as it writes to 625 * AUTOC. 626 */ 627 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { 628 status = hw->mac.ops.acquire_swfw_sync(hw, 629 IXGBE_GSSR_MAC_CSR_SM); 630 if (status != IXGBE_SUCCESS) 631 goto out; 632 633 got_lock = TRUE; 634 } 635 636 /* Restart link */ 637 ixgbe_reset_pipeline_82599(hw); 638 639 if (got_lock) 640 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 641 642 /* Only poll for autoneg to complete if specified to do so */ 643 if (autoneg_wait_to_complete) { 644 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 645 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 646 IXGBE_AUTOC_LMS_KX4_KX_KR || 647 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 648 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 649 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 650 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 651 links_reg = 0; /* Just in case Autoneg time = 0 */ 652 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 653 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 654 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 655 break; 656 msec_delay(100); 657 } 658 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 659 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 660 DEBUGOUT("Autoneg did not complete.\n"); 661 } 662 } 663 } 664 665 /* Add delay to filter out noises during initial link setup */ 666 msec_delay(50); 667 668 out: 669 return status; 670 } 671 672 /** 673 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 674 * @hw: pointer to hardware structure 675 * 676 * The base drivers may require better control over SFP+ module 677 * PHY states. This includes selectively shutting down the Tx 678 * laser on the PHY, effectively halting physical link. 679 **/ 680 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 681 { 682 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 683 684 /* Blocked by MNG FW so bail */ 685 if (ixgbe_check_reset_blocked(hw)) 686 return; 687 688 /* Disable Tx laser; allow 100us to go dark per spec */ 689 esdp_reg |= IXGBE_ESDP_SDP3; 690 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 691 IXGBE_WRITE_FLUSH(hw); 692 usec_delay(100); 693 } 694 695 /** 696 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser 697 * @hw: pointer to hardware structure 698 * 699 * The base drivers may require better control over SFP+ module 700 * PHY states. This includes selectively turning on the Tx 701 * laser on the PHY, effectively starting physical link. 702 **/ 703 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 704 { 705 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 706 707 /* Enable Tx laser; allow 100ms to light up */ 708 esdp_reg &= ~IXGBE_ESDP_SDP3; 709 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 710 IXGBE_WRITE_FLUSH(hw); 711 msec_delay(100); 712 } 713 714 /** 715 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 716 * @hw: pointer to hardware structure 717 * 718 * When the driver changes the link speeds that it can support, 719 * it sets autotry_restart to TRUE to indicate that we need to 720 * initiate a new autotry session with the link partner. To do 721 * so, we set the speed then disable and re-enable the Tx laser, to 722 * alert the link partner that it also needs to restart autotry on its 723 * end. This is consistent with TRUE clause 37 autoneg, which also 724 * involves a loss of signal. 725 **/ 726 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 727 { 728 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); 729 730 /* Blocked by MNG FW so bail */ 731 if (ixgbe_check_reset_blocked(hw)) 732 return; 733 734 if (hw->mac.autotry_restart) { 735 ixgbe_disable_tx_laser_multispeed_fiber(hw); 736 ixgbe_enable_tx_laser_multispeed_fiber(hw); 737 hw->mac.autotry_restart = FALSE; 738 } 739 } 740 741 /** 742 * ixgbe_set_hard_rate_select_speed - Set module link speed 743 * @hw: pointer to hardware structure 744 * @speed: link speed to set 745 * 746 * Set module link speed via RS0/RS1 rate select pins. 747 */ 748 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, 749 ixgbe_link_speed speed) 750 { 751 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 752 753 switch (speed) { 754 case IXGBE_LINK_SPEED_10GB_FULL: 755 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 756 break; 757 case IXGBE_LINK_SPEED_1GB_FULL: 758 esdp_reg &= ~IXGBE_ESDP_SDP5; 759 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 760 break; 761 default: 762 DEBUGOUT("Invalid fixed module speed\n"); 763 return; 764 } 765 766 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 767 IXGBE_WRITE_FLUSH(hw); 768 } 769 770 /** 771 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed 772 * @hw: pointer to hardware structure 773 * @speed: new link speed 774 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 775 * 776 * Implements the Intel SmartSpeed algorithm. 777 **/ 778 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 779 ixgbe_link_speed speed, 780 bool autoneg_wait_to_complete) 781 { 782 s32 status = IXGBE_SUCCESS; 783 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 784 s32 i, j; 785 bool link_up = FALSE; 786 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 787 788 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); 789 790 /* Set autoneg_advertised value based on input link speed */ 791 hw->phy.autoneg_advertised = 0; 792 793 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 794 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 795 796 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 797 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 798 799 if (speed & IXGBE_LINK_SPEED_100_FULL) 800 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 801 802 /* 803 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the 804 * autoneg advertisement if link is unable to be established at the 805 * highest negotiated rate. This can sometimes happen due to integrity 806 * issues with the physical media connection. 807 */ 808 809 /* First, try to get link with full advertisement */ 810 hw->phy.smart_speed_active = FALSE; 811 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 812 status = ixgbe_setup_mac_link_82599(hw, speed, 813 autoneg_wait_to_complete); 814 if (status != IXGBE_SUCCESS) 815 goto out; 816 817 /* 818 * Wait for the controller to acquire link. Per IEEE 802.3ap, 819 * Section 73.10.2, we may have to wait up to 500ms if KR is 820 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per 821 * Table 9 in the AN MAS. 822 */ 823 for (i = 0; i < 5; i++) { 824 msec_delay(100); 825 826 /* If we have link, just jump out */ 827 status = ixgbe_check_link(hw, &link_speed, &link_up, 828 FALSE); 829 if (status != IXGBE_SUCCESS) 830 goto out; 831 832 if (link_up) 833 goto out; 834 } 835 } 836 837 /* 838 * We didn't get link. If we advertised KR plus one of KX4/KX 839 * (or BX4/BX), then disable KR and try again. 840 */ 841 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || 842 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) 843 goto out; 844 845 /* Turn SmartSpeed on to disable KR support */ 846 hw->phy.smart_speed_active = TRUE; 847 status = ixgbe_setup_mac_link_82599(hw, speed, 848 autoneg_wait_to_complete); 849 if (status != IXGBE_SUCCESS) 850 goto out; 851 852 /* 853 * Wait for the controller to acquire link. 600ms will allow for 854 * the AN link_fail_inhibit_timer as well for multiple cycles of 855 * parallel detect, both 10g and 1g. This allows for the maximum 856 * connect attempts as defined in the AN MAS table 73-7. 857 */ 858 for (i = 0; i < 6; i++) { 859 msec_delay(100); 860 861 /* If we have link, just jump out */ 862 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 863 if (status != IXGBE_SUCCESS) 864 goto out; 865 866 if (link_up) 867 goto out; 868 } 869 870 /* We didn't get link. Turn SmartSpeed back off. */ 871 hw->phy.smart_speed_active = FALSE; 872 status = ixgbe_setup_mac_link_82599(hw, speed, 873 autoneg_wait_to_complete); 874 875 out: 876 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 877 DEBUGOUT("Smartspeed has downgraded the link speed " 878 "from the maximum advertised\n"); 879 return status; 880 } 881 882 /** 883 * ixgbe_setup_mac_link_82599 - Set MAC link speed 884 * @hw: pointer to hardware structure 885 * @speed: new link speed 886 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 887 * 888 * Set the link speed in the AUTOC register and restarts link. 889 **/ 890 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 891 ixgbe_link_speed speed, 892 bool autoneg_wait_to_complete) 893 { 894 bool autoneg = FALSE; 895 s32 status = IXGBE_SUCCESS; 896 u32 pma_pmd_1g, link_mode; 897 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */ 898 u32 orig_autoc = 0; /* holds the cached value of AUTOC register */ 899 u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */ 900 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 901 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 902 u32 links_reg; 903 u32 i; 904 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 905 906 DEBUGFUNC("ixgbe_setup_mac_link_82599"); 907 908 /* Check to see if speed passed in is supported. */ 909 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 910 if (status) 911 goto out; 912 913 speed &= link_capabilities; 914 915 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 916 status = IXGBE_ERR_LINK_SETUP; 917 goto out; 918 } 919 920 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 921 if (hw->mac.orig_link_settings_stored) 922 orig_autoc = hw->mac.orig_autoc; 923 else 924 orig_autoc = autoc; 925 926 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 927 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 928 929 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 930 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 931 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 932 /* Set KX4/KX/KR support according to speed requested */ 933 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 934 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 935 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 936 autoc |= IXGBE_AUTOC_KX4_SUPP; 937 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && 938 (hw->phy.smart_speed_active == FALSE)) 939 autoc |= IXGBE_AUTOC_KR_SUPP; 940 } 941 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 942 autoc |= IXGBE_AUTOC_KX_SUPP; 943 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 944 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 945 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 946 /* Switch from 1G SFI to 10G SFI if requested */ 947 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 948 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 949 autoc &= ~IXGBE_AUTOC_LMS_MASK; 950 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 951 } 952 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 953 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 954 /* Switch from 10G SFI to 1G SFI if requested */ 955 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 956 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 957 autoc &= ~IXGBE_AUTOC_LMS_MASK; 958 if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel) 959 autoc |= IXGBE_AUTOC_LMS_1G_AN; 960 else 961 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 962 } 963 } 964 965 if (autoc != current_autoc) { 966 /* Restart link */ 967 status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE); 968 if (status != IXGBE_SUCCESS) 969 goto out; 970 971 /* Only poll for autoneg to complete if specified to do so */ 972 if (autoneg_wait_to_complete) { 973 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 974 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 975 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 976 links_reg = 0; /*Just in case Autoneg time=0*/ 977 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 978 links_reg = 979 IXGBE_READ_REG(hw, IXGBE_LINKS); 980 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 981 break; 982 msec_delay(100); 983 } 984 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 985 status = 986 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 987 DEBUGOUT("Autoneg did not complete.\n"); 988 } 989 } 990 } 991 992 /* Add delay to filter out noises during initial link setup */ 993 msec_delay(50); 994 } 995 996 out: 997 return status; 998 } 999 1000 /** 1001 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field 1002 * @hw: pointer to hardware structure 1003 * @speed: new link speed 1004 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 1005 * 1006 * Restarts link on PHY and MAC based on settings passed in. 1007 **/ 1008 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 1009 ixgbe_link_speed speed, 1010 bool autoneg_wait_to_complete) 1011 { 1012 s32 status; 1013 1014 DEBUGFUNC("ixgbe_setup_copper_link_82599"); 1015 1016 /* Setup the PHY according to input speed */ 1017 status = hw->phy.ops.setup_link_speed(hw, speed, 1018 autoneg_wait_to_complete); 1019 /* Set up MAC */ 1020 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 1021 1022 return status; 1023 } 1024 1025 /** 1026 * ixgbe_reset_hw_82599 - Perform hardware reset 1027 * @hw: pointer to hardware structure 1028 * 1029 * Resets the hardware by resetting the transmit and receive units, masks 1030 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 1031 * reset. 1032 **/ 1033 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 1034 { 1035 ixgbe_link_speed link_speed; 1036 s32 status; 1037 u32 ctrl = 0; 1038 u32 i, autoc, autoc2; 1039 u32 curr_lms; 1040 bool link_up = FALSE; 1041 1042 DEBUGFUNC("ixgbe_reset_hw_82599"); 1043 1044 /* Call adapter stop to disable tx/rx and clear interrupts */ 1045 status = hw->mac.ops.stop_adapter(hw); 1046 if (status != IXGBE_SUCCESS) 1047 goto reset_hw_out; 1048 1049 /* flush pending Tx transactions */ 1050 ixgbe_clear_tx_pending(hw); 1051 1052 /* PHY ops must be identified and initialized prior to reset */ 1053 1054 /* Identify PHY and related function pointers */ 1055 status = hw->phy.ops.init(hw); 1056 1057 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1058 goto reset_hw_out; 1059 1060 /* Setup SFP module if there is one present. */ 1061 if (hw->phy.sfp_setup_needed) { 1062 status = hw->mac.ops.setup_sfp(hw); 1063 hw->phy.sfp_setup_needed = FALSE; 1064 } 1065 1066 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1067 goto reset_hw_out; 1068 1069 /* Reset PHY */ 1070 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) 1071 hw->phy.ops.reset(hw); 1072 1073 /* remember AUTOC from before we reset */ 1074 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; 1075 1076 mac_reset_top: 1077 /* 1078 * Issue global reset to the MAC. Needs to be SW reset if link is up. 1079 * If link reset is used when link is up, it might reset the PHY when 1080 * mng is using it. If link is down or the flag to force full link 1081 * reset is set, then perform link reset. 1082 */ 1083 ctrl = IXGBE_CTRL_LNK_RST; 1084 if (!hw->force_full_reset) { 1085 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); 1086 if (link_up) 1087 ctrl = IXGBE_CTRL_RST; 1088 } 1089 1090 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); 1091 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 1092 IXGBE_WRITE_FLUSH(hw); 1093 1094 /* Poll for reset bit to self-clear meaning reset is complete */ 1095 for (i = 0; i < 10; i++) { 1096 usec_delay(1); 1097 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1098 if (!(ctrl & IXGBE_CTRL_RST_MASK)) 1099 break; 1100 } 1101 1102 if (ctrl & IXGBE_CTRL_RST_MASK) { 1103 status = IXGBE_ERR_RESET_FAILED; 1104 DEBUGOUT("Reset polling failed to complete.\n"); 1105 } 1106 1107 msec_delay(50); 1108 1109 /* 1110 * Double resets are required for recovery from certain error 1111 * conditions. Between resets, it is necessary to stall to 1112 * allow time for any pending HW events to complete. 1113 */ 1114 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 1115 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 1116 goto mac_reset_top; 1117 } 1118 1119 /* 1120 * Store the original AUTOC/AUTOC2 values if they have not been 1121 * stored off yet. Otherwise restore the stored original 1122 * values since the reset operation sets back to defaults. 1123 */ 1124 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1125 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1126 1127 /* Enable link if disabled in NVM */ 1128 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) { 1129 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; 1130 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1131 IXGBE_WRITE_FLUSH(hw); 1132 } 1133 1134 if (hw->mac.orig_link_settings_stored == FALSE) { 1135 hw->mac.orig_autoc = autoc; 1136 hw->mac.orig_autoc2 = autoc2; 1137 hw->mac.orig_link_settings_stored = TRUE; 1138 } else { 1139 1140 /* If MNG FW is running on a multi-speed device that 1141 * doesn't autoneg with out driver support we need to 1142 * leave LMS in the state it was before we MAC reset. 1143 * Likewise if we support WoL we don't want change the 1144 * LMS state. 1145 */ 1146 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || 1147 hw->wol_enabled) 1148 hw->mac.orig_autoc = 1149 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | 1150 curr_lms; 1151 1152 if (autoc != hw->mac.orig_autoc) { 1153 status = hw->mac.ops.prot_autoc_write(hw, 1154 hw->mac.orig_autoc, 1155 FALSE); 1156 if (status != IXGBE_SUCCESS) 1157 goto reset_hw_out; 1158 } 1159 1160 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1161 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1162 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1163 autoc2 |= (hw->mac.orig_autoc2 & 1164 IXGBE_AUTOC2_UPPER_MASK); 1165 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1166 } 1167 } 1168 1169 /* Store the permanent mac address */ 1170 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 1171 1172 /* 1173 * Store MAC address from RAR0, clear receive address registers, and 1174 * clear the multicast table. Also reset num_rar_entries to 128, 1175 * since we modify this value when programming the SAN MAC address. 1176 */ 1177 hw->mac.num_rar_entries = 128; 1178 hw->mac.ops.init_rx_addrs(hw); 1179 1180 /* Store the permanent SAN mac address */ 1181 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1182 1183 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1184 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 1185 /* Save the SAN MAC RAR index */ 1186 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 1187 1188 hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, 1189 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1190 1191 /* clear VMDq pool/queue selection for this RAR */ 1192 hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, 1193 IXGBE_CLEAR_VMDQ_ALL); 1194 1195 /* Reserve the last RAR for the SAN MAC address */ 1196 hw->mac.num_rar_entries--; 1197 } 1198 1199 /* Store the alternative WWNN/WWPN prefix */ 1200 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1201 &hw->mac.wwpn_prefix); 1202 1203 reset_hw_out: 1204 return status; 1205 } 1206 1207 /** 1208 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete 1209 * @hw: pointer to hardware structure 1210 * @fdircmd: current value of FDIRCMD register 1211 */ 1212 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) 1213 { 1214 int i; 1215 1216 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { 1217 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); 1218 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) 1219 return IXGBE_SUCCESS; 1220 usec_delay(10); 1221 } 1222 1223 return IXGBE_ERR_FDIR_CMD_INCOMPLETE; 1224 } 1225 1226 /** 1227 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1228 * @hw: pointer to hardware structure 1229 **/ 1230 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1231 { 1232 s32 err; 1233 int i; 1234 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1235 u32 fdircmd; 1236 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1237 1238 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); 1239 1240 /* 1241 * Before starting reinitialization process, 1242 * FDIRCMD.CMD must be zero. 1243 */ 1244 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1245 if (err) { 1246 DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n"); 1247 return err; 1248 } 1249 1250 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1251 IXGBE_WRITE_FLUSH(hw); 1252 /* 1253 * 82599 adapters flow director init flow cannot be restarted, 1254 * Workaround 82599 silicon errata by performing the following steps 1255 * before re-writing the FDIRCTRL control register with the same value. 1256 * - write 1 to bit 8 of FDIRCMD register & 1257 * - write 0 to bit 8 of FDIRCMD register 1258 */ 1259 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1260 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1261 IXGBE_FDIRCMD_CLEARHT)); 1262 IXGBE_WRITE_FLUSH(hw); 1263 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1264 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1265 ~IXGBE_FDIRCMD_CLEARHT)); 1266 IXGBE_WRITE_FLUSH(hw); 1267 /* 1268 * Clear FDIR Hash register to clear any leftover hashes 1269 * waiting to be programmed. 1270 */ 1271 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); 1272 IXGBE_WRITE_FLUSH(hw); 1273 1274 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1275 IXGBE_WRITE_FLUSH(hw); 1276 1277 /* Poll init-done after we write FDIRCTRL register */ 1278 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1279 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1280 IXGBE_FDIRCTRL_INIT_DONE) 1281 break; 1282 msec_delay(1); 1283 } 1284 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1285 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1286 return IXGBE_ERR_FDIR_REINIT_FAILED; 1287 } 1288 1289 /* Clear FDIR statistics registers (read to clear) */ 1290 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); 1291 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); 1292 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 1293 IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 1294 IXGBE_READ_REG(hw, IXGBE_FDIRLEN); 1295 1296 return IXGBE_SUCCESS; 1297 } 1298 1299 /** 1300 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers 1301 * @hw: pointer to hardware structure 1302 * @fdirctrl: value to write to flow director control register 1303 **/ 1304 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1305 { 1306 int i; 1307 1308 DEBUGFUNC("ixgbe_fdir_enable_82599"); 1309 1310 /* Prime the keys for hashing */ 1311 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1312 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1313 1314 /* 1315 * Poll init-done after we write the register. Estimated times: 1316 * 10G: PBALLOC = 11b, timing is 60us 1317 * 1G: PBALLOC = 11b, timing is 600us 1318 * 100M: PBALLOC = 11b, timing is 6ms 1319 * 1320 * Multiple these timings by 4 if under full Rx load 1321 * 1322 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1323 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1324 * this might not finish in our poll time, but we can live with that 1325 * for now. 1326 */ 1327 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1328 IXGBE_WRITE_FLUSH(hw); 1329 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1330 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1331 IXGBE_FDIRCTRL_INIT_DONE) 1332 break; 1333 msec_delay(1); 1334 } 1335 1336 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1337 DEBUGOUT("Flow Director poll time exceeded!\n"); 1338 } 1339 1340 /** 1341 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1342 * @hw: pointer to hardware structure 1343 * @fdirctrl: value to write to flow director control register, initially 1344 * contains just the value of the Rx packet buffer allocation 1345 **/ 1346 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1347 { 1348 DEBUGFUNC("ixgbe_init_fdir_signature_82599"); 1349 1350 /* 1351 * Continue setup of fdirctrl register bits: 1352 * Move the flexible bytes to use the ethertype - shift 6 words 1353 * Set the maximum length per hash bucket to 0xA filters 1354 * Send interrupt when 64 filters are left 1355 */ 1356 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1357 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1358 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1359 1360 /* write hashes and fdirctrl register, poll for completion */ 1361 ixgbe_fdir_enable_82599(hw, fdirctrl); 1362 1363 return IXGBE_SUCCESS; 1364 } 1365 1366 /** 1367 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1368 * @hw: pointer to hardware structure 1369 * @fdirctrl: value to write to flow director control register, initially 1370 * contains just the value of the Rx packet buffer allocation 1371 * @cloud_mode: TRUE - cloud mode, FALSE - other mode 1372 **/ 1373 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, 1374 bool cloud_mode) 1375 { 1376 DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); 1377 1378 /* 1379 * Continue setup of fdirctrl register bits: 1380 * Turn perfect match filtering on 1381 * Report hash in RSS field of Rx wb descriptor 1382 * Initialize the drop queue to queue 127 1383 * Move the flexible bytes to use the ethertype - shift 6 words 1384 * Set the maximum length per hash bucket to 0xA filters 1385 * Send interrupt when 64 (0x4 * 16) filters are left 1386 */ 1387 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | 1388 IXGBE_FDIRCTRL_REPORT_STATUS | 1389 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | 1390 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1391 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1392 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1393 1394 if (cloud_mode) 1395 fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD << 1396 IXGBE_FDIRCTRL_FILTERMODE_SHIFT); 1397 1398 /* write hashes and fdirctrl register, poll for completion */ 1399 ixgbe_fdir_enable_82599(hw, fdirctrl); 1400 1401 return IXGBE_SUCCESS; 1402 } 1403 1404 /** 1405 * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue 1406 * @hw: pointer to hardware structure 1407 * @dropqueue: Rx queue index used for the dropped packets 1408 **/ 1409 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue) 1410 { 1411 u32 fdirctrl; 1412 1413 DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599"); 1414 /* Clear init done bit and drop queue field */ 1415 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1416 fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE); 1417 1418 /* Set drop queue */ 1419 fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); 1420 if ((hw->mac.type == ixgbe_mac_X550) || 1421 (hw->mac.type == ixgbe_mac_X550EM_x) || 1422 (hw->mac.type == ixgbe_mac_X550EM_a)) 1423 fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH; 1424 1425 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1426 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1427 IXGBE_FDIRCMD_CLEARHT)); 1428 IXGBE_WRITE_FLUSH(hw); 1429 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1430 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1431 ~IXGBE_FDIRCMD_CLEARHT)); 1432 IXGBE_WRITE_FLUSH(hw); 1433 1434 /* write hashes and fdirctrl register, poll for completion */ 1435 ixgbe_fdir_enable_82599(hw, fdirctrl); 1436 } 1437 1438 /* 1439 * These defines allow us to quickly generate all of the necessary instructions 1440 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION 1441 * for values 0 through 15 1442 */ 1443 #define IXGBE_ATR_COMMON_HASH_KEY \ 1444 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) 1445 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ 1446 do { \ 1447 u32 n = (_n); \ 1448 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ 1449 common_hash ^= lo_hash_dword >> n; \ 1450 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1451 bucket_hash ^= lo_hash_dword >> n; \ 1452 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ 1453 sig_hash ^= lo_hash_dword << (16 - n); \ 1454 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ 1455 common_hash ^= hi_hash_dword >> n; \ 1456 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1457 bucket_hash ^= hi_hash_dword >> n; \ 1458 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1459 sig_hash ^= hi_hash_dword << (16 - n); \ 1460 } while (0) 1461 1462 /** 1463 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1464 * @stream: input bitstream to compute the hash on 1465 * 1466 * This function is almost identical to the function above but contains 1467 * several optimizations such as unwinding all of the loops, letting the 1468 * compiler work out all of the conditional ifs since the keys are static 1469 * defines, and computing two keys at once since the hashed dword stream 1470 * will be the same for both keys. 1471 **/ 1472 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 1473 union ixgbe_atr_hash_dword common) 1474 { 1475 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1476 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; 1477 1478 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1479 flow_vm_vlan = IXGBE_NTOHL(input.dword); 1480 1481 /* generate common hash dword */ 1482 hi_hash_dword = IXGBE_NTOHL(common.dword); 1483 1484 /* low dword is word swapped version of common */ 1485 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1486 1487 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1488 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1489 1490 /* Process bits 0 and 16 */ 1491 IXGBE_COMPUTE_SIG_HASH_ITERATION(0); 1492 1493 /* 1494 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1495 * delay this because bit 0 of the stream should not be processed 1496 * so we do not add the VLAN until after bit 0 was processed 1497 */ 1498 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1499 1500 /* Process remaining 30 bit of the key */ 1501 IXGBE_COMPUTE_SIG_HASH_ITERATION(1); 1502 IXGBE_COMPUTE_SIG_HASH_ITERATION(2); 1503 IXGBE_COMPUTE_SIG_HASH_ITERATION(3); 1504 IXGBE_COMPUTE_SIG_HASH_ITERATION(4); 1505 IXGBE_COMPUTE_SIG_HASH_ITERATION(5); 1506 IXGBE_COMPUTE_SIG_HASH_ITERATION(6); 1507 IXGBE_COMPUTE_SIG_HASH_ITERATION(7); 1508 IXGBE_COMPUTE_SIG_HASH_ITERATION(8); 1509 IXGBE_COMPUTE_SIG_HASH_ITERATION(9); 1510 IXGBE_COMPUTE_SIG_HASH_ITERATION(10); 1511 IXGBE_COMPUTE_SIG_HASH_ITERATION(11); 1512 IXGBE_COMPUTE_SIG_HASH_ITERATION(12); 1513 IXGBE_COMPUTE_SIG_HASH_ITERATION(13); 1514 IXGBE_COMPUTE_SIG_HASH_ITERATION(14); 1515 IXGBE_COMPUTE_SIG_HASH_ITERATION(15); 1516 1517 /* combine common_hash result with signature and bucket hashes */ 1518 bucket_hash ^= common_hash; 1519 bucket_hash &= IXGBE_ATR_HASH_MASK; 1520 1521 sig_hash ^= common_hash << 16; 1522 sig_hash &= IXGBE_ATR_HASH_MASK << 16; 1523 1524 /* return completed signature hash */ 1525 return sig_hash ^ bucket_hash; 1526 } 1527 1528 /** 1529 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1530 * @hw: pointer to hardware structure 1531 * @input: unique input dword 1532 * @common: compressed common input dword 1533 * @queue: queue index to direct traffic to 1534 * 1535 * Note that the tunnel bit in input must not be set when the hardware 1536 * tunneling support does not exist. 1537 **/ 1538 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1539 union ixgbe_atr_hash_dword input, 1540 union ixgbe_atr_hash_dword common, 1541 u8 queue) 1542 { 1543 u64 fdirhashcmd; 1544 u8 flow_type; 1545 bool tunnel; 1546 u32 fdircmd; 1547 1548 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); 1549 1550 /* 1551 * Get the flow_type in order to program FDIRCMD properly 1552 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 1553 * fifth is FDIRCMD.TUNNEL_FILTER 1554 */ 1555 tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); 1556 flow_type = input.formatted.flow_type & 1557 (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); 1558 switch (flow_type) { 1559 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1560 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1561 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1562 case IXGBE_ATR_FLOW_TYPE_TCPV6: 1563 case IXGBE_ATR_FLOW_TYPE_UDPV6: 1564 case IXGBE_ATR_FLOW_TYPE_SCTPV6: 1565 break; 1566 default: 1567 DEBUGOUT(" Error on flow type input\n"); 1568 return; 1569 } 1570 1571 /* configure FDIRCMD register */ 1572 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1573 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1574 fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1575 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1576 if (tunnel) 1577 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; 1578 1579 /* 1580 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1581 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1582 */ 1583 fdirhashcmd = (u64)fdircmd << 32; 1584 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); 1585 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1586 1587 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); 1588 1589 return; 1590 } 1591 1592 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ 1593 do { \ 1594 u32 n = (_n); \ 1595 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1596 bucket_hash ^= lo_hash_dword >> n; \ 1597 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1598 bucket_hash ^= hi_hash_dword >> n; \ 1599 } while (0) 1600 1601 /** 1602 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash 1603 * @atr_input: input bitstream to compute the hash on 1604 * @input_mask: mask for the input bitstream 1605 * 1606 * This function serves two main purposes. First it applies the input_mask 1607 * to the atr_input resulting in a cleaned up atr_input data stream. 1608 * Secondly it computes the hash and stores it in the bkt_hash field at 1609 * the end of the input byte stream. This way it will be available for 1610 * future use without needing to recompute the hash. 1611 **/ 1612 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 1613 union ixgbe_atr_input *input_mask) 1614 { 1615 1616 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1617 u32 bucket_hash = 0; 1618 u32 hi_dword = 0; 1619 u32 i = 0; 1620 1621 /* Apply masks to input data */ 1622 for (i = 0; i < 14; i++) 1623 input->dword_stream[i] &= input_mask->dword_stream[i]; 1624 1625 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1626 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); 1627 1628 /* generate common hash dword */ 1629 for (i = 1; i <= 13; i++) 1630 hi_dword ^= input->dword_stream[i]; 1631 hi_hash_dword = IXGBE_NTOHL(hi_dword); 1632 1633 /* low dword is word swapped version of common */ 1634 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1635 1636 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1637 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1638 1639 /* Process bits 0 and 16 */ 1640 IXGBE_COMPUTE_BKT_HASH_ITERATION(0); 1641 1642 /* 1643 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1644 * delay this because bit 0 of the stream should not be processed 1645 * so we do not add the VLAN until after bit 0 was processed 1646 */ 1647 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1648 1649 /* Process remaining 30 bit of the key */ 1650 for (i = 1; i <= 15; i++) 1651 IXGBE_COMPUTE_BKT_HASH_ITERATION(i); 1652 1653 /* 1654 * Limit hash to 13 bits since max bucket count is 8K. 1655 * Store result at the end of the input stream. 1656 */ 1657 input->formatted.bkt_hash = bucket_hash & 0x1FFF; 1658 } 1659 1660 /** 1661 * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks 1662 * @input_mask: mask to be bit swapped 1663 * 1664 * The source and destination port masks for flow director are bit swapped 1665 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to 1666 * generate a correctly swapped value we need to bit swap the mask and that 1667 * is what is accomplished by this function. 1668 **/ 1669 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) 1670 { 1671 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); 1672 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1673 mask |= IXGBE_NTOHS(input_mask->formatted.src_port); 1674 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1675 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1676 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1677 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); 1678 } 1679 1680 /* 1681 * These two macros are meant to address the fact that we have registers 1682 * that are either all or in part big-endian. As a result on big-endian 1683 * systems we will end up byte swapping the value to little-endian before 1684 * it is byte swapped again and written to the hardware in the original 1685 * big-endian format. 1686 */ 1687 #define IXGBE_STORE_AS_BE32(_value) \ 1688 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ 1689 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) 1690 1691 #define IXGBE_WRITE_REG_BE32(a, reg, value) \ 1692 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) 1693 1694 #define IXGBE_STORE_AS_BE16(_value) \ 1695 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) 1696 1697 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, 1698 union ixgbe_atr_input *input_mask, bool cloud_mode) 1699 { 1700 /* mask IPv6 since it is currently not supported */ 1701 u32 fdirm = IXGBE_FDIRM_DIPv6; 1702 u32 fdirtcpm; 1703 u32 fdirip6m; 1704 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); 1705 1706 /* 1707 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1708 * are zero, then assume a full mask for that field. Also assume that 1709 * a VLAN of 0 is unspecified, so mask that out as well. L4type 1710 * cannot be masked out in this implementation. 1711 * 1712 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1713 * point in time. 1714 */ 1715 1716 /* verify bucket hash is cleared on hash generation */ 1717 if (input_mask->formatted.bkt_hash) 1718 DEBUGOUT(" bucket hash should always be 0 in mask\n"); 1719 1720 /* Program FDIRM and verify partial masks */ 1721 switch (input_mask->formatted.vm_pool & 0x7F) { 1722 case 0x0: 1723 fdirm |= IXGBE_FDIRM_POOL; 1724 case 0x7F: 1725 break; 1726 default: 1727 DEBUGOUT(" Error on vm pool mask\n"); 1728 return IXGBE_ERR_CONFIG; 1729 } 1730 1731 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { 1732 case 0x0: 1733 fdirm |= IXGBE_FDIRM_L4P; 1734 if (input_mask->formatted.dst_port || 1735 input_mask->formatted.src_port) { 1736 DEBUGOUT(" Error on src/dst port mask\n"); 1737 return IXGBE_ERR_CONFIG; 1738 } 1739 case IXGBE_ATR_L4TYPE_MASK: 1740 break; 1741 default: 1742 DEBUGOUT(" Error on flow type mask\n"); 1743 return IXGBE_ERR_CONFIG; 1744 } 1745 1746 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { 1747 case 0x0000: 1748 /* mask VLAN ID */ 1749 fdirm |= IXGBE_FDIRM_VLANID; 1750 /* fall through */ 1751 case 0x0FFF: 1752 /* mask VLAN priority */ 1753 fdirm |= IXGBE_FDIRM_VLANP; 1754 break; 1755 case 0xE000: 1756 /* mask VLAN ID only */ 1757 fdirm |= IXGBE_FDIRM_VLANID; 1758 /* fall through */ 1759 case 0xEFFF: 1760 /* no VLAN fields masked */ 1761 break; 1762 default: 1763 DEBUGOUT(" Error on VLAN mask\n"); 1764 return IXGBE_ERR_CONFIG; 1765 } 1766 1767 switch (input_mask->formatted.flex_bytes & 0xFFFF) { 1768 case 0x0000: 1769 /* Mask Flex Bytes */ 1770 fdirm |= IXGBE_FDIRM_FLEX; 1771 /* fall through */ 1772 case 0xFFFF: 1773 break; 1774 default: 1775 DEBUGOUT(" Error on flexible byte mask\n"); 1776 return IXGBE_ERR_CONFIG; 1777 } 1778 1779 if (cloud_mode) { 1780 fdirm |= IXGBE_FDIRM_L3P; 1781 fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); 1782 fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; 1783 1784 switch (input_mask->formatted.inner_mac[0] & 0xFF) { 1785 case 0x00: 1786 /* Mask inner MAC, fall through */ 1787 fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC; 1788 case 0xFF: 1789 break; 1790 default: 1791 DEBUGOUT(" Error on inner_mac byte mask\n"); 1792 return IXGBE_ERR_CONFIG; 1793 } 1794 1795 switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) { 1796 case 0x0: 1797 /* Mask vxlan id */ 1798 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI; 1799 break; 1800 case 0x00FFFFFF: 1801 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24; 1802 break; 1803 case 0xFFFFFFFF: 1804 break; 1805 default: 1806 DEBUGOUT(" Error on TNI/VNI byte mask\n"); 1807 return IXGBE_ERR_CONFIG; 1808 } 1809 1810 switch (input_mask->formatted.tunnel_type & 0xFFFF) { 1811 case 0x0: 1812 /* Mask turnnel type, fall through */ 1813 fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; 1814 case 0xFFFF: 1815 break; 1816 default: 1817 DEBUGOUT(" Error on tunnel type byte mask\n"); 1818 return IXGBE_ERR_CONFIG; 1819 } 1820 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); 1821 1822 /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM, 1823 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow 1824 * L3/L3 packets to tunnel. 1825 */ 1826 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); 1827 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); 1828 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); 1829 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); 1830 switch (hw->mac.type) { 1831 case ixgbe_mac_X550: 1832 case ixgbe_mac_X550EM_x: 1833 case ixgbe_mac_X550EM_a: 1834 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); 1835 break; 1836 default: 1837 break; 1838 } 1839 } 1840 1841 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1842 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1843 1844 if (!cloud_mode) { 1845 /* store the TCP/UDP port masks, bit reversed from port 1846 * layout */ 1847 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); 1848 1849 /* write both the same so that UDP and TCP use the same mask */ 1850 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1851 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); 1852 /* also use it for SCTP */ 1853 switch (hw->mac.type) { 1854 case ixgbe_mac_X550: 1855 case ixgbe_mac_X550EM_x: 1856 case ixgbe_mac_X550EM_a: 1857 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); 1858 break; 1859 default: 1860 break; 1861 } 1862 1863 /* store source and destination IP masks (big-enian) */ 1864 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1865 ~input_mask->formatted.src_ip[0]); 1866 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1867 ~input_mask->formatted.dst_ip[0]); 1868 } 1869 return IXGBE_SUCCESS; 1870 } 1871 1872 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, 1873 union ixgbe_atr_input *input, 1874 u16 soft_id, u8 queue, bool cloud_mode) 1875 { 1876 u32 fdirport, fdirvlan, fdirhash, fdircmd; 1877 u32 addr_low, addr_high; 1878 u32 cloud_type = 0; 1879 s32 err; 1880 1881 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); 1882 if (!cloud_mode) { 1883 /* currently IPv6 is not supported, must be programmed with 0 */ 1884 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), 1885 input->formatted.src_ip[0]); 1886 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), 1887 input->formatted.src_ip[1]); 1888 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), 1889 input->formatted.src_ip[2]); 1890 1891 /* record the source address (big-endian) */ 1892 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, 1893 input->formatted.src_ip[0]); 1894 1895 /* record the first 32 bits of the destination address 1896 * (big-endian) */ 1897 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, 1898 input->formatted.dst_ip[0]); 1899 1900 /* record source and destination port (little-endian)*/ 1901 fdirport = IXGBE_NTOHS(input->formatted.dst_port); 1902 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; 1903 fdirport |= IXGBE_NTOHS(input->formatted.src_port); 1904 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1905 } 1906 1907 /* record VLAN (little-endian) and flex_bytes(big-endian) */ 1908 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); 1909 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1910 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); 1911 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1912 1913 if (cloud_mode) { 1914 if (input->formatted.tunnel_type != 0) 1915 cloud_type = 0x80000000; 1916 1917 addr_low = ((u32)input->formatted.inner_mac[0] | 1918 ((u32)input->formatted.inner_mac[1] << 8) | 1919 ((u32)input->formatted.inner_mac[2] << 16) | 1920 ((u32)input->formatted.inner_mac[3] << 24)); 1921 addr_high = ((u32)input->formatted.inner_mac[4] | 1922 ((u32)input->formatted.inner_mac[5] << 8)); 1923 cloud_type |= addr_high; 1924 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low); 1925 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type); 1926 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); 1927 } 1928 1929 /* configure FDIRHASH register */ 1930 fdirhash = input->formatted.bkt_hash; 1931 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1932 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1933 1934 /* 1935 * flush all previous writes to make certain registers are 1936 * programmed prior to issuing the command 1937 */ 1938 IXGBE_WRITE_FLUSH(hw); 1939 1940 /* configure FDIRCMD register */ 1941 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1942 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1943 if (queue == IXGBE_FDIR_DROP_QUEUE) 1944 fdircmd |= IXGBE_FDIRCMD_DROP; 1945 if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK) 1946 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; 1947 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1948 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1949 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; 1950 1951 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1952 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1953 if (err) { 1954 DEBUGOUT("Flow Director command did not complete!\n"); 1955 return err; 1956 } 1957 1958 return IXGBE_SUCCESS; 1959 } 1960 1961 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, 1962 union ixgbe_atr_input *input, 1963 u16 soft_id) 1964 { 1965 u32 fdirhash; 1966 u32 fdircmd; 1967 s32 err; 1968 1969 /* configure FDIRHASH register */ 1970 fdirhash = input->formatted.bkt_hash; 1971 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1972 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1973 1974 /* flush hash to HW */ 1975 IXGBE_WRITE_FLUSH(hw); 1976 1977 /* Query if filter is present */ 1978 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); 1979 1980 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1981 if (err) { 1982 DEBUGOUT("Flow Director command did not complete!\n"); 1983 return err; 1984 } 1985 1986 /* if filter exists in hardware then remove it */ 1987 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { 1988 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1989 IXGBE_WRITE_FLUSH(hw); 1990 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1991 IXGBE_FDIRCMD_CMD_REMOVE_FLOW); 1992 } 1993 1994 return IXGBE_SUCCESS; 1995 } 1996 1997 /** 1998 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1999 * @hw: pointer to hardware structure 2000 * @input: input bitstream 2001 * @input_mask: mask for the input bitstream 2002 * @soft_id: software index for the filters 2003 * @queue: queue index to direct traffic to 2004 * 2005 * Note that the caller to this function must lock before calling, since the 2006 * hardware writes must be protected from one another. 2007 **/ 2008 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 2009 union ixgbe_atr_input *input, 2010 union ixgbe_atr_input *input_mask, 2011 u16 soft_id, u8 queue, bool cloud_mode) 2012 { 2013 s32 err = IXGBE_ERR_CONFIG; 2014 2015 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); 2016 2017 /* 2018 * Check flow_type formatting, and bail out before we touch the hardware 2019 * if there's a configuration issue 2020 */ 2021 switch (input->formatted.flow_type) { 2022 case IXGBE_ATR_FLOW_TYPE_IPV4: 2023 case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4: 2024 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; 2025 if (input->formatted.dst_port || input->formatted.src_port) { 2026 DEBUGOUT(" Error on src/dst port\n"); 2027 return IXGBE_ERR_CONFIG; 2028 } 2029 break; 2030 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 2031 case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4: 2032 if (input->formatted.dst_port || input->formatted.src_port) { 2033 DEBUGOUT(" Error on src/dst port\n"); 2034 return IXGBE_ERR_CONFIG; 2035 } 2036 /* fall through */ 2037 case IXGBE_ATR_FLOW_TYPE_TCPV4: 2038 case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: 2039 case IXGBE_ATR_FLOW_TYPE_UDPV4: 2040 case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4: 2041 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 2042 IXGBE_ATR_L4TYPE_MASK; 2043 break; 2044 default: 2045 DEBUGOUT(" Error on flow type input\n"); 2046 return err; 2047 } 2048 2049 /* program input mask into the HW */ 2050 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode); 2051 if (err) 2052 return err; 2053 2054 /* apply mask and compute/store hash */ 2055 ixgbe_atr_compute_perfect_hash_82599(input, input_mask); 2056 2057 /* program filters to filter memory */ 2058 return ixgbe_fdir_write_perfect_filter_82599(hw, input, 2059 soft_id, queue, cloud_mode); 2060 } 2061 2062 /** 2063 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 2064 * @hw: pointer to hardware structure 2065 * @reg: analog register to read 2066 * @val: read value 2067 * 2068 * Performs read operation to Omer analog register specified. 2069 **/ 2070 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) 2071 { 2072 u32 core_ctl; 2073 2074 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); 2075 2076 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 2077 (reg << 8)); 2078 IXGBE_WRITE_FLUSH(hw); 2079 usec_delay(10); 2080 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 2081 *val = (u8)core_ctl; 2082 2083 return IXGBE_SUCCESS; 2084 } 2085 2086 /** 2087 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 2088 * @hw: pointer to hardware structure 2089 * @reg: atlas register to write 2090 * @val: value to write 2091 * 2092 * Performs write operation to Omer analog register specified. 2093 **/ 2094 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) 2095 { 2096 u32 core_ctl; 2097 2098 DEBUGFUNC("ixgbe_write_analog_reg8_82599"); 2099 2100 core_ctl = (reg << 8) | val; 2101 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 2102 IXGBE_WRITE_FLUSH(hw); 2103 usec_delay(10); 2104 2105 return IXGBE_SUCCESS; 2106 } 2107 2108 /** 2109 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx 2110 * @hw: pointer to hardware structure 2111 * 2112 * Starts the hardware using the generic start_hw function 2113 * and the generation start_hw function. 2114 * Then performs revision-specific operations, if any. 2115 **/ 2116 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) 2117 { 2118 s32 ret_val = IXGBE_SUCCESS; 2119 2120 DEBUGFUNC("ixgbe_start_hw_82599"); 2121 2122 ret_val = ixgbe_start_hw_generic(hw); 2123 if (ret_val != IXGBE_SUCCESS) 2124 goto out; 2125 2126 ret_val = ixgbe_start_hw_gen2(hw); 2127 if (ret_val != IXGBE_SUCCESS) 2128 goto out; 2129 2130 /* We need to run link autotry after the driver loads */ 2131 hw->mac.autotry_restart = TRUE; 2132 2133 if (ret_val == IXGBE_SUCCESS) 2134 ret_val = ixgbe_verify_fw_version_82599(hw); 2135 out: 2136 return ret_val; 2137 } 2138 2139 /** 2140 * ixgbe_identify_phy_82599 - Get physical layer module 2141 * @hw: pointer to hardware structure 2142 * 2143 * Determines the physical layer module found on the current adapter. 2144 * If PHY already detected, maintains current PHY type in hw struct, 2145 * otherwise executes the PHY detection routine. 2146 **/ 2147 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 2148 { 2149 s32 status; 2150 2151 DEBUGFUNC("ixgbe_identify_phy_82599"); 2152 2153 /* Detect PHY if not unknown - returns success if already detected. */ 2154 status = ixgbe_identify_phy_generic(hw); 2155 if (status != IXGBE_SUCCESS) { 2156 /* 82599 10GBASE-T requires an external PHY */ 2157 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 2158 return status; 2159 else 2160 status = ixgbe_identify_module_generic(hw); 2161 } 2162 2163 /* Set PHY type none if no PHY detected */ 2164 if (hw->phy.type == ixgbe_phy_unknown) { 2165 hw->phy.type = ixgbe_phy_none; 2166 return IXGBE_SUCCESS; 2167 } 2168 2169 /* Return error if SFP module has been detected but is not supported */ 2170 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 2171 return IXGBE_ERR_SFP_NOT_SUPPORTED; 2172 2173 return status; 2174 } 2175 2176 /** 2177 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type 2178 * @hw: pointer to hardware structure 2179 * 2180 * Determines physical layer capabilities of the current configuration. 2181 **/ 2182 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 2183 { 2184 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 2185 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2186 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 2187 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 2188 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 2189 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 2190 u16 ext_ability = 0; 2191 2192 DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); 2193 2194 hw->phy.ops.identify(hw); 2195 2196 switch (hw->phy.type) { 2197 case ixgbe_phy_tn: 2198 case ixgbe_phy_cu_unknown: 2199 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 2200 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 2201 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 2202 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 2203 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 2204 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 2205 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 2206 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 2207 goto out; 2208 default: 2209 break; 2210 } 2211 2212 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 2213 case IXGBE_AUTOC_LMS_1G_AN: 2214 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 2215 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { 2216 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | 2217 IXGBE_PHYSICAL_LAYER_1000BASE_BX; 2218 goto out; 2219 } else 2220 /* SFI mode so read SFP module */ 2221 goto sfp_check; 2222 break; 2223 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 2224 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) 2225 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 2226 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) 2227 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2228 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) 2229 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; 2230 goto out; 2231 break; 2232 case IXGBE_AUTOC_LMS_10G_SERIAL: 2233 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { 2234 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2235 goto out; 2236 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) 2237 goto sfp_check; 2238 break; 2239 case IXGBE_AUTOC_LMS_KX4_KX_KR: 2240 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 2241 if (autoc & IXGBE_AUTOC_KX_SUPP) 2242 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 2243 if (autoc & IXGBE_AUTOC_KX4_SUPP) 2244 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2245 if (autoc & IXGBE_AUTOC_KR_SUPP) 2246 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2247 goto out; 2248 break; 2249 default: 2250 goto out; 2251 break; 2252 } 2253 2254 sfp_check: 2255 /* SFP check must be done last since DA modules are sometimes used to 2256 * test KR mode - we need to id KR mode correctly before SFP module. 2257 * Call identify_sfp because the pluggable module may have changed */ 2258 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); 2259 out: 2260 return physical_layer; 2261 } 2262 2263 /** 2264 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 2265 * @hw: pointer to hardware structure 2266 * @regval: register value to write to RXCTRL 2267 * 2268 * Enables the Rx DMA unit for 82599 2269 **/ 2270 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 2271 { 2272 2273 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); 2274 2275 /* 2276 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2277 * If traffic is incoming before we enable the Rx unit, it could hang 2278 * the Rx DMA unit. Therefore, make sure the security engine is 2279 * completely disabled prior to enabling the Rx unit. 2280 */ 2281 2282 hw->mac.ops.disable_sec_rx_path(hw); 2283 2284 if (regval & IXGBE_RXCTRL_RXEN) 2285 ixgbe_enable_rx(hw); 2286 else 2287 ixgbe_disable_rx(hw); 2288 2289 hw->mac.ops.enable_sec_rx_path(hw); 2290 2291 return IXGBE_SUCCESS; 2292 } 2293 2294 /** 2295 * ixgbe_verify_fw_version_82599 - verify FW version for 82599 2296 * @hw: pointer to hardware structure 2297 * 2298 * Verifies that installed the firmware version is 0.6 or higher 2299 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. 2300 * 2301 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or 2302 * if the FW version is not supported. 2303 **/ 2304 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) 2305 { 2306 s32 status = IXGBE_ERR_EEPROM_VERSION; 2307 u16 fw_offset, fw_ptp_cfg_offset; 2308 u16 fw_version; 2309 2310 DEBUGFUNC("ixgbe_verify_fw_version_82599"); 2311 2312 /* firmware check is only necessary for SFI devices */ 2313 if (hw->phy.media_type != ixgbe_media_type_fiber) { 2314 status = IXGBE_SUCCESS; 2315 goto fw_version_out; 2316 } 2317 2318 /* get the offset to the Firmware Module block */ 2319 if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) { 2320 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 2321 "eeprom read at offset %d failed", IXGBE_FW_PTR); 2322 return IXGBE_ERR_EEPROM_VERSION; 2323 } 2324 2325 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2326 goto fw_version_out; 2327 2328 /* get the offset to the Pass Through Patch Configuration block */ 2329 if (hw->eeprom.ops.read(hw, (fw_offset + 2330 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), 2331 &fw_ptp_cfg_offset)) { 2332 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 2333 "eeprom read at offset %d failed", 2334 fw_offset + 2335 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR); 2336 return IXGBE_ERR_EEPROM_VERSION; 2337 } 2338 2339 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2340 goto fw_version_out; 2341 2342 /* get the firmware version */ 2343 if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + 2344 IXGBE_FW_PATCH_VERSION_4), &fw_version)) { 2345 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 2346 "eeprom read at offset %d failed", 2347 fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4); 2348 return IXGBE_ERR_EEPROM_VERSION; 2349 } 2350 2351 if (fw_version > 0x5) 2352 status = IXGBE_SUCCESS; 2353 2354 fw_version_out: 2355 return status; 2356 } 2357 2358 /** 2359 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. 2360 * @hw: pointer to hardware structure 2361 * 2362 * Returns TRUE if the LESM FW module is present and enabled. Otherwise 2363 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled. 2364 **/ 2365 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2366 { 2367 bool lesm_enabled = FALSE; 2368 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2369 s32 status; 2370 2371 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); 2372 2373 /* get the offset to the Firmware Module block */ 2374 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2375 2376 if ((status != IXGBE_SUCCESS) || 2377 (fw_offset == 0) || (fw_offset == 0xFFFF)) 2378 goto out; 2379 2380 /* get the offset to the LESM Parameters block */ 2381 status = hw->eeprom.ops.read(hw, (fw_offset + 2382 IXGBE_FW_LESM_PARAMETERS_PTR), 2383 &fw_lesm_param_offset); 2384 2385 if ((status != IXGBE_SUCCESS) || 2386 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) 2387 goto out; 2388 2389 /* get the LESM state word */ 2390 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + 2391 IXGBE_FW_LESM_STATE_1), 2392 &fw_lesm_state); 2393 2394 if ((status == IXGBE_SUCCESS) && 2395 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) 2396 lesm_enabled = TRUE; 2397 2398 out: 2399 return lesm_enabled; 2400 } 2401 2402 /** 2403 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using 2404 * fastest available method 2405 * 2406 * @hw: pointer to hardware structure 2407 * @offset: offset of word in EEPROM to read 2408 * @words: number of words 2409 * @data: word(s) read from the EEPROM 2410 * 2411 * Retrieves 16 bit word(s) read from EEPROM 2412 **/ 2413 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 2414 u16 words, u16 *data) 2415 { 2416 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2417 s32 ret_val = IXGBE_ERR_CONFIG; 2418 2419 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); 2420 2421 /* 2422 * If EEPROM is detected and can be addressed using 14 bits, 2423 * use EERD otherwise use bit bang 2424 */ 2425 if ((eeprom->type == ixgbe_eeprom_spi) && 2426 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) 2427 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, 2428 data); 2429 else 2430 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, 2431 words, 2432 data); 2433 2434 return ret_val; 2435 } 2436 2437 /** 2438 * ixgbe_read_eeprom_82599 - Read EEPROM word using 2439 * fastest available method 2440 * 2441 * @hw: pointer to hardware structure 2442 * @offset: offset of word in the EEPROM to read 2443 * @data: word read from the EEPROM 2444 * 2445 * Reads a 16 bit word from the EEPROM 2446 **/ 2447 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 2448 u16 offset, u16 *data) 2449 { 2450 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2451 s32 ret_val = IXGBE_ERR_CONFIG; 2452 2453 DEBUGFUNC("ixgbe_read_eeprom_82599"); 2454 2455 /* 2456 * If EEPROM is detected and can be addressed using 14 bits, 2457 * use EERD otherwise use bit bang 2458 */ 2459 if ((eeprom->type == ixgbe_eeprom_spi) && 2460 (offset <= IXGBE_EERD_MAX_ADDR)) 2461 ret_val = ixgbe_read_eerd_generic(hw, offset, data); 2462 else 2463 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); 2464 2465 return ret_val; 2466 } 2467 2468 /** 2469 * ixgbe_reset_pipeline_82599 - perform pipeline reset 2470 * 2471 * @hw: pointer to hardware structure 2472 * 2473 * Reset pipeline by asserting Restart_AN together with LMS change to ensure 2474 * full pipeline reset. This function assumes the SW/FW lock is held. 2475 **/ 2476 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) 2477 { 2478 s32 ret_val; 2479 u32 anlp1_reg = 0; 2480 u32 i, autoc_reg, autoc2_reg; 2481 2482 /* Enable link if disabled in NVM */ 2483 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 2484 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) { 2485 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; 2486 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); 2487 IXGBE_WRITE_FLUSH(hw); 2488 } 2489 2490 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2491 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2492 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ 2493 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, 2494 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); 2495 /* Wait for AN to leave state 0 */ 2496 for (i = 0; i < 10; i++) { 2497 msec_delay(4); 2498 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2499 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) 2500 break; 2501 } 2502 2503 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { 2504 DEBUGOUT("auto negotiation not completed\n"); 2505 ret_val = IXGBE_ERR_RESET_FAILED; 2506 goto reset_pipeline_out; 2507 } 2508 2509 ret_val = IXGBE_SUCCESS; 2510 2511 reset_pipeline_out: 2512 /* Write AUTOC register with original LMS field and Restart_AN */ 2513 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2514 IXGBE_WRITE_FLUSH(hw); 2515 2516 return ret_val; 2517 } 2518 2519 /** 2520 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C 2521 * @hw: pointer to hardware structure 2522 * @byte_offset: byte offset to read 2523 * @data: value read 2524 * 2525 * Performs byte read operation to SFP module's EEPROM over I2C interface at 2526 * a specified device address. 2527 **/ 2528 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 2529 u8 dev_addr, u8 *data) 2530 { 2531 u32 esdp; 2532 s32 status; 2533 s32 timeout = 200; 2534 2535 DEBUGFUNC("ixgbe_read_i2c_byte_82599"); 2536 2537 if (hw->phy.qsfp_shared_i2c_bus == TRUE) { 2538 /* Acquire I2C bus ownership. */ 2539 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2540 esdp |= IXGBE_ESDP_SDP0; 2541 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2542 IXGBE_WRITE_FLUSH(hw); 2543 2544 while (timeout) { 2545 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2546 if (esdp & IXGBE_ESDP_SDP1) 2547 break; 2548 2549 msec_delay(5); 2550 timeout--; 2551 } 2552 2553 if (!timeout) { 2554 DEBUGOUT("Driver can't access resource," 2555 " acquiring I2C bus timeout.\n"); 2556 status = IXGBE_ERR_I2C; 2557 goto release_i2c_access; 2558 } 2559 } 2560 2561 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); 2562 2563 release_i2c_access: 2564 2565 if (hw->phy.qsfp_shared_i2c_bus == TRUE) { 2566 /* Release I2C bus ownership. */ 2567 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2568 esdp &= ~IXGBE_ESDP_SDP0; 2569 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2570 IXGBE_WRITE_FLUSH(hw); 2571 } 2572 2573 return status; 2574 } 2575 2576 /** 2577 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C 2578 * @hw: pointer to hardware structure 2579 * @byte_offset: byte offset to write 2580 * @data: value to write 2581 * 2582 * Performs byte write operation to SFP module's EEPROM over I2C interface at 2583 * a specified device address. 2584 **/ 2585 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 2586 u8 dev_addr, u8 data) 2587 { 2588 u32 esdp; 2589 s32 status; 2590 s32 timeout = 200; 2591 2592 DEBUGFUNC("ixgbe_write_i2c_byte_82599"); 2593 2594 if (hw->phy.qsfp_shared_i2c_bus == TRUE) { 2595 /* Acquire I2C bus ownership. */ 2596 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2597 esdp |= IXGBE_ESDP_SDP0; 2598 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2599 IXGBE_WRITE_FLUSH(hw); 2600 2601 while (timeout) { 2602 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2603 if (esdp & IXGBE_ESDP_SDP1) 2604 break; 2605 2606 msec_delay(5); 2607 timeout--; 2608 } 2609 2610 if (!timeout) { 2611 DEBUGOUT("Driver can't access resource," 2612 " acquiring I2C bus timeout.\n"); 2613 status = IXGBE_ERR_I2C; 2614 goto release_i2c_access; 2615 } 2616 } 2617 2618 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); 2619 2620 release_i2c_access: 2621 2622 if (hw->phy.qsfp_shared_i2c_bus == TRUE) { 2623 /* Release I2C bus ownership. */ 2624 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2625 esdp &= ~IXGBE_ESDP_SDP0; 2626 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2627 IXGBE_WRITE_FLUSH(hw); 2628 } 2629 2630 return status; 2631 } 2632