1 /****************************************************************************** 2 SPDX-License-Identifier: BSD-3-Clause 3 4 Copyright (c) 2001-2020, Intel Corporation 5 All rights reserved. 6 7 Redistribution and use in source and binary forms, with or without 8 modification, are permitted provided that the following conditions are met: 9 10 1. Redistributions of source code must retain the above copyright notice, 11 this list of conditions and the following disclaimer. 12 13 2. Redistributions in binary form must reproduce the above copyright 14 notice, this list of conditions and the following disclaimer in the 15 documentation and/or other materials provided with the distribution. 16 17 3. Neither the name of the Intel Corporation nor the names of its 18 contributors may be used to endorse or promote products derived from 19 this software without specific prior written permission. 20 21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 POSSIBILITY OF SUCH DAMAGE. 32 33 ******************************************************************************/ 34 35 #include "ixgbe_type.h" 36 #include "ixgbe_82599.h" 37 #include "ixgbe_api.h" 38 #include "ixgbe_common.h" 39 #include "ixgbe_phy.h" 40 41 #define IXGBE_82599_MAX_TX_QUEUES 128 42 #define IXGBE_82599_MAX_RX_QUEUES 128 43 #define IXGBE_82599_RAR_ENTRIES 128 44 #define IXGBE_82599_MC_TBL_SIZE 128 45 #define IXGBE_82599_VFT_TBL_SIZE 128 46 #define IXGBE_82599_RX_PB_SIZE 512 47 48 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 49 ixgbe_link_speed speed, 50 bool autoneg_wait_to_complete); 51 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 52 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 53 u16 offset, u16 *data); 54 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 55 u16 words, u16 *data); 56 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 57 u8 dev_addr, u8 *data); 58 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 59 u8 dev_addr, u8 data); 60 61 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 62 { 63 struct ixgbe_mac_info *mac = &hw->mac; 64 65 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 66 67 /* 68 * enable the laser control functions for SFP+ fiber 69 * and MNG not enabled 70 */ 71 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && 72 !ixgbe_mng_enabled(hw)) { 73 mac->ops.disable_tx_laser = 74 ixgbe_disable_tx_laser_multispeed_fiber; 75 mac->ops.enable_tx_laser = 76 ixgbe_enable_tx_laser_multispeed_fiber; 77 mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber; 78 79 } else { 80 mac->ops.disable_tx_laser = NULL; 81 mac->ops.enable_tx_laser = NULL; 82 mac->ops.flap_tx_laser = NULL; 83 } 84 85 if (hw->phy.multispeed_fiber) { 86 /* Set up dual speed SFP+ support */ 87 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; 88 mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; 89 mac->ops.set_rate_select_speed = 90 ixgbe_set_hard_rate_select_speed; 91 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed) 92 mac->ops.set_rate_select_speed = 93 ixgbe_set_soft_rate_select_speed; 94 } else { 95 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && 96 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 97 hw->phy.smart_speed == ixgbe_smart_speed_on) && 98 !ixgbe_verify_lesm_fw_enabled_82599(hw)) { 99 mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed; 100 } else { 101 mac->ops.setup_link = ixgbe_setup_mac_link_82599; 102 } 103 } 104 } 105 106 /** 107 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 108 * @hw: pointer to hardware structure 109 * 110 * Initialize any function pointers that were not able to be 111 * set during init_shared_code because the PHY/SFP type was 112 * not known. Perform the SFP init if necessary. 113 * 114 **/ 115 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 116 { 117 struct ixgbe_mac_info *mac = &hw->mac; 118 struct ixgbe_phy_info *phy = &hw->phy; 119 s32 ret_val = IXGBE_SUCCESS; 120 u32 esdp; 121 122 DEBUGFUNC("ixgbe_init_phy_ops_82599"); 123 124 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { 125 /* Store flag indicating I2C bus access control unit. */ 126 hw->phy.qsfp_shared_i2c_bus = true; 127 128 /* Initialize access to QSFP+ I2C bus */ 129 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 130 esdp |= IXGBE_ESDP_SDP0_DIR; 131 esdp &= ~IXGBE_ESDP_SDP1_DIR; 132 esdp &= ~IXGBE_ESDP_SDP0; 133 esdp &= ~IXGBE_ESDP_SDP0_NATIVE; 134 esdp &= ~IXGBE_ESDP_SDP1_NATIVE; 135 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 136 IXGBE_WRITE_FLUSH(hw); 137 138 phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599; 139 phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599; 140 } 141 /* Identify the PHY or SFP module */ 142 ret_val = phy->ops.identify(hw); 143 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) 144 goto init_phy_ops_out; 145 146 /* Setup function pointers based on detected SFP module and speeds */ 147 ixgbe_init_mac_link_ops_82599(hw); 148 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) 149 hw->phy.ops.reset = NULL; 150 151 /* If copper media, overwrite with copper function pointers */ 152 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 153 mac->ops.setup_link = ixgbe_setup_copper_link_82599; 154 mac->ops.get_link_capabilities = 155 ixgbe_get_copper_link_capabilities_generic; 156 } 157 158 /* Set necessary function pointers based on PHY type */ 159 switch (hw->phy.type) { 160 case ixgbe_phy_tn: 161 phy->ops.setup_link = ixgbe_setup_phy_link_tnx; 162 phy->ops.check_link = ixgbe_check_phy_link_tnx; 163 phy->ops.get_firmware_version = 164 ixgbe_get_phy_firmware_version_tnx; 165 break; 166 default: 167 break; 168 } 169 init_phy_ops_out: 170 return ret_val; 171 } 172 173 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 174 { 175 s32 ret_val = IXGBE_SUCCESS; 176 u16 list_offset, data_offset, data_value; 177 178 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); 179 180 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 181 ixgbe_init_mac_link_ops_82599(hw); 182 183 hw->phy.ops.reset = NULL; 184 185 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 186 &data_offset); 187 if (ret_val != IXGBE_SUCCESS) 188 goto setup_sfp_out; 189 190 /* PHY config will finish before releasing the semaphore */ 191 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 192 IXGBE_GSSR_MAC_CSR_SM); 193 if (ret_val != IXGBE_SUCCESS) { 194 ret_val = IXGBE_ERR_SWFW_SYNC; 195 goto setup_sfp_out; 196 } 197 198 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) 199 goto setup_sfp_err; 200 while (data_value != 0xffff) { 201 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 202 IXGBE_WRITE_FLUSH(hw); 203 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) 204 goto setup_sfp_err; 205 } 206 207 /* Release the semaphore */ 208 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 209 /* Delay obtaining semaphore again to allow FW access 210 * prot_autoc_write uses the semaphore too. 211 */ 212 msec_delay(hw->eeprom.semaphore_delay); 213 214 /* Restart DSP and set SFI mode */ 215 ret_val = hw->mac.ops.prot_autoc_write(hw, 216 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, 217 false); 218 219 if (ret_val) { 220 DEBUGOUT("sfp module setup not complete\n"); 221 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 222 goto setup_sfp_out; 223 } 224 225 } 226 227 setup_sfp_out: 228 return ret_val; 229 230 setup_sfp_err: 231 /* Release the semaphore */ 232 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 233 /* Delay obtaining semaphore again to allow FW access */ 234 msec_delay(hw->eeprom.semaphore_delay); 235 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 236 "eeprom read at offset %d failed", data_offset); 237 return IXGBE_ERR_PHY; 238 } 239 240 /** 241 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read 242 * @hw: pointer to hardware structure 243 * @locked: Return the if we locked for this read. 244 * @reg_val: Value we read from AUTOC 245 * 246 * For this part (82599) we need to wrap read-modify-writes with a possible 247 * FW/SW lock. It is assumed this lock will be freed with the next 248 * prot_autoc_write_82599(). 249 */ 250 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 251 { 252 s32 ret_val; 253 254 *locked = false; 255 /* If LESM is on then we need to hold the SW/FW semaphore. */ 256 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { 257 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 258 IXGBE_GSSR_MAC_CSR_SM); 259 if (ret_val != IXGBE_SUCCESS) 260 return IXGBE_ERR_SWFW_SYNC; 261 262 *locked = true; 263 } 264 265 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 266 return IXGBE_SUCCESS; 267 } 268 269 /** 270 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write 271 * @hw: pointer to hardware structure 272 * @autoc: value to write to AUTOC 273 * @locked: bool to indicate whether the SW/FW lock was already taken by 274 * previous proc_autoc_read_82599. 275 * 276 * This part (82599) may need to hold the SW/FW lock around all writes to 277 * AUTOC. Likewise after a write we need to do a pipeline reset. 278 */ 279 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) 280 { 281 s32 ret_val = IXGBE_SUCCESS; 282 283 /* Blocked by MNG FW so bail */ 284 if (ixgbe_check_reset_blocked(hw)) 285 goto out; 286 287 /* We only need to get the lock if: 288 * - We didn't do it already (in the read part of a read-modify-write) 289 * - LESM is enabled. 290 */ 291 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { 292 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 293 IXGBE_GSSR_MAC_CSR_SM); 294 if (ret_val != IXGBE_SUCCESS) 295 return IXGBE_ERR_SWFW_SYNC; 296 297 locked = true; 298 } 299 300 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 301 ret_val = ixgbe_reset_pipeline_82599(hw); 302 303 out: 304 /* Free the SW/FW semaphore as we either grabbed it here or 305 * already had it when this function was called. 306 */ 307 if (locked) 308 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 309 310 return ret_val; 311 } 312 313 /** 314 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type 315 * @hw: pointer to hardware structure 316 * 317 * Initialize the function pointers and assign the MAC type for 82599. 318 * Does not touch the hardware. 319 **/ 320 321 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) 322 { 323 struct ixgbe_mac_info *mac = &hw->mac; 324 struct ixgbe_phy_info *phy = &hw->phy; 325 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 326 s32 ret_val; 327 328 DEBUGFUNC("ixgbe_init_ops_82599"); 329 330 ixgbe_init_phy_ops_generic(hw); 331 ret_val = ixgbe_init_ops_generic(hw); 332 333 /* PHY */ 334 phy->ops.identify = ixgbe_identify_phy_82599; 335 phy->ops.init = ixgbe_init_phy_ops_82599; 336 337 /* MAC */ 338 mac->ops.reset_hw = ixgbe_reset_hw_82599; 339 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; 340 mac->ops.get_media_type = ixgbe_get_media_type_82599; 341 mac->ops.get_supported_physical_layer = 342 ixgbe_get_supported_physical_layer_82599; 343 mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; 344 mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; 345 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599; 346 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599; 347 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599; 348 mac->ops.start_hw = ixgbe_start_hw_82599; 349 mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; 350 mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; 351 mac->ops.get_device_caps = ixgbe_get_device_caps_generic; 352 mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; 353 mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; 354 mac->ops.prot_autoc_read = prot_autoc_read_82599; 355 mac->ops.prot_autoc_write = prot_autoc_write_82599; 356 357 /* RAR, Multicast, VLAN */ 358 mac->ops.set_vmdq = ixgbe_set_vmdq_generic; 359 mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; 360 mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; 361 mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; 362 mac->rar_highwater = 1; 363 mac->ops.set_vfta = ixgbe_set_vfta_generic; 364 mac->ops.set_vlvf = ixgbe_set_vlvf_generic; 365 mac->ops.clear_vfta = ixgbe_clear_vfta_generic; 366 mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; 367 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599; 368 mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; 369 mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; 370 371 /* Link */ 372 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599; 373 mac->ops.check_link = ixgbe_check_mac_link_generic; 374 mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; 375 ixgbe_init_mac_link_ops_82599(hw); 376 377 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; 378 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; 379 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; 380 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; 381 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; 382 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; 383 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 384 385 mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) 386 & IXGBE_FWSM_MODE_MASK); 387 388 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; 389 390 /* EEPROM */ 391 eeprom->ops.read = ixgbe_read_eeprom_82599; 392 eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599; 393 394 /* Manageability interface */ 395 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; 396 397 mac->ops.get_thermal_sensor_data = 398 ixgbe_get_thermal_sensor_data_generic; 399 mac->ops.init_thermal_sensor_thresh = 400 ixgbe_init_thermal_sensor_thresh_generic; 401 402 mac->ops.bypass_rw = ixgbe_bypass_rw_generic; 403 mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic; 404 mac->ops.bypass_set = ixgbe_bypass_set_generic; 405 mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic; 406 407 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; 408 409 return ret_val; 410 } 411 412 /** 413 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 414 * @hw: pointer to hardware structure 415 * @speed: pointer to link speed 416 * @autoneg: true when autoneg or autotry is enabled 417 * 418 * Determines the link capabilities by reading the AUTOC register. 419 **/ 420 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 421 ixgbe_link_speed *speed, 422 bool *autoneg) 423 { 424 s32 status = IXGBE_SUCCESS; 425 u32 autoc = 0; 426 427 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); 428 429 430 /* Check if 1G SFP module. */ 431 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 432 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 433 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || 434 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || 435 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || 436 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { 437 *speed = IXGBE_LINK_SPEED_1GB_FULL; 438 *autoneg = true; 439 goto out; 440 } 441 442 /* 443 * Determine link capabilities based on the stored value of AUTOC, 444 * which represents EEPROM defaults. If AUTOC value has not 445 * been stored, use the current register values. 446 */ 447 if (hw->mac.orig_link_settings_stored) 448 autoc = hw->mac.orig_autoc; 449 else 450 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 451 452 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 453 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 454 *speed = IXGBE_LINK_SPEED_1GB_FULL; 455 *autoneg = false; 456 break; 457 458 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 459 *speed = IXGBE_LINK_SPEED_10GB_FULL; 460 *autoneg = false; 461 break; 462 463 case IXGBE_AUTOC_LMS_1G_AN: 464 *speed = IXGBE_LINK_SPEED_1GB_FULL; 465 *autoneg = true; 466 break; 467 468 case IXGBE_AUTOC_LMS_10G_SERIAL: 469 *speed = IXGBE_LINK_SPEED_10GB_FULL; 470 *autoneg = false; 471 break; 472 473 case IXGBE_AUTOC_LMS_KX4_KX_KR: 474 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 475 *speed = IXGBE_LINK_SPEED_UNKNOWN; 476 if (autoc & IXGBE_AUTOC_KR_SUPP) 477 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 478 if (autoc & IXGBE_AUTOC_KX4_SUPP) 479 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 480 if (autoc & IXGBE_AUTOC_KX_SUPP) 481 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 482 *autoneg = true; 483 break; 484 485 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 486 *speed = IXGBE_LINK_SPEED_100_FULL; 487 if (autoc & IXGBE_AUTOC_KR_SUPP) 488 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 489 if (autoc & IXGBE_AUTOC_KX4_SUPP) 490 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 491 if (autoc & IXGBE_AUTOC_KX_SUPP) 492 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 493 *autoneg = true; 494 break; 495 496 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 497 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 498 *autoneg = false; 499 break; 500 501 default: 502 status = IXGBE_ERR_LINK_SETUP; 503 goto out; 504 break; 505 } 506 507 if (hw->phy.multispeed_fiber) { 508 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 509 IXGBE_LINK_SPEED_1GB_FULL; 510 511 /* QSFP must not enable full auto-negotiation 512 * Limited autoneg is enabled at 1G 513 */ 514 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) 515 *autoneg = false; 516 else 517 *autoneg = true; 518 } 519 520 out: 521 return status; 522 } 523 524 /** 525 * ixgbe_get_media_type_82599 - Get media type 526 * @hw: pointer to hardware structure 527 * 528 * Returns the media type (fiber, copper, backplane) 529 **/ 530 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 531 { 532 enum ixgbe_media_type media_type; 533 534 DEBUGFUNC("ixgbe_get_media_type_82599"); 535 536 /* Detect if there is a copper PHY attached. */ 537 switch (hw->phy.type) { 538 case ixgbe_phy_cu_unknown: 539 case ixgbe_phy_tn: 540 media_type = ixgbe_media_type_copper; 541 goto out; 542 default: 543 break; 544 } 545 546 switch (hw->device_id) { 547 case IXGBE_DEV_ID_82599_KX4: 548 case IXGBE_DEV_ID_82599_KX4_MEZZ: 549 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 550 case IXGBE_DEV_ID_82599_KR: 551 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: 552 case IXGBE_DEV_ID_82599_XAUI_LOM: 553 /* Default device ID is mezzanine card KX/KX4 */ 554 media_type = ixgbe_media_type_backplane; 555 break; 556 case IXGBE_DEV_ID_82599_SFP: 557 case IXGBE_DEV_ID_82599_SFP_FCOE: 558 case IXGBE_DEV_ID_82599_SFP_EM: 559 case IXGBE_DEV_ID_82599_SFP_SF2: 560 case IXGBE_DEV_ID_82599_SFP_SF_QP: 561 case IXGBE_DEV_ID_82599EN_SFP: 562 media_type = ixgbe_media_type_fiber; 563 break; 564 case IXGBE_DEV_ID_82599_CX4: 565 media_type = ixgbe_media_type_cx4; 566 break; 567 case IXGBE_DEV_ID_82599_T3_LOM: 568 media_type = ixgbe_media_type_copper; 569 break; 570 case IXGBE_DEV_ID_82599_LS: 571 media_type = ixgbe_media_type_fiber_lco; 572 break; 573 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 574 media_type = ixgbe_media_type_fiber_qsfp; 575 break; 576 case IXGBE_DEV_ID_82599_BYPASS: 577 media_type = ixgbe_media_type_fiber_fixed; 578 hw->phy.multispeed_fiber = true; 579 break; 580 default: 581 media_type = ixgbe_media_type_unknown; 582 break; 583 } 584 out: 585 return media_type; 586 } 587 588 /** 589 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 590 * @hw: pointer to hardware structure 591 * 592 * Disables link during D3 power down sequence. 593 * 594 **/ 595 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) 596 { 597 u32 autoc2_reg; 598 u16 ee_ctrl_2 = 0; 599 600 DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); 601 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); 602 603 if (!ixgbe_mng_present(hw) && !hw->wol_enabled && 604 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { 605 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 606 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; 607 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); 608 } 609 } 610 611 /** 612 * ixgbe_start_mac_link_82599 - Setup MAC link settings 613 * @hw: pointer to hardware structure 614 * @autoneg_wait_to_complete: true when waiting for completion is needed 615 * 616 * Configures link settings based on values in the ixgbe_hw struct. 617 * Restarts the link. Performs autonegotiation if needed. 618 **/ 619 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 620 bool autoneg_wait_to_complete) 621 { 622 u32 autoc_reg; 623 u32 links_reg; 624 u32 i; 625 s32 status = IXGBE_SUCCESS; 626 bool got_lock = false; 627 628 DEBUGFUNC("ixgbe_start_mac_link_82599"); 629 630 631 /* reset_pipeline requires us to hold this lock as it writes to 632 * AUTOC. 633 */ 634 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { 635 status = hw->mac.ops.acquire_swfw_sync(hw, 636 IXGBE_GSSR_MAC_CSR_SM); 637 if (status != IXGBE_SUCCESS) 638 goto out; 639 640 got_lock = true; 641 } 642 643 /* Restart link */ 644 ixgbe_reset_pipeline_82599(hw); 645 646 if (got_lock) 647 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 648 649 /* Only poll for autoneg to complete if specified to do so */ 650 if (autoneg_wait_to_complete) { 651 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 652 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 653 IXGBE_AUTOC_LMS_KX4_KX_KR || 654 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 655 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 656 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 657 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 658 links_reg = 0; /* Just in case Autoneg time = 0 */ 659 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 660 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 661 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 662 break; 663 msec_delay(100); 664 } 665 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 666 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 667 DEBUGOUT("Autoneg did not complete.\n"); 668 } 669 } 670 } 671 672 /* Add delay to filter out noises during initial link setup */ 673 msec_delay(50); 674 675 out: 676 return status; 677 } 678 679 /** 680 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 681 * @hw: pointer to hardware structure 682 * 683 * The base drivers may require better control over SFP+ module 684 * PHY states. This includes selectively shutting down the Tx 685 * laser on the PHY, effectively halting physical link. 686 **/ 687 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 688 { 689 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 690 691 /* Blocked by MNG FW so bail */ 692 if (ixgbe_check_reset_blocked(hw)) 693 return; 694 695 /* Disable Tx laser; allow 100us to go dark per spec */ 696 esdp_reg |= IXGBE_ESDP_SDP3; 697 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 698 IXGBE_WRITE_FLUSH(hw); 699 usec_delay(100); 700 } 701 702 /** 703 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser 704 * @hw: pointer to hardware structure 705 * 706 * The base drivers may require better control over SFP+ module 707 * PHY states. This includes selectively turning on the Tx 708 * laser on the PHY, effectively starting physical link. 709 **/ 710 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 711 { 712 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 713 714 /* Enable Tx laser; allow 100ms to light up */ 715 esdp_reg &= ~IXGBE_ESDP_SDP3; 716 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 717 IXGBE_WRITE_FLUSH(hw); 718 msec_delay(100); 719 } 720 721 /** 722 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 723 * @hw: pointer to hardware structure 724 * 725 * When the driver changes the link speeds that it can support, 726 * it sets autotry_restart to true to indicate that we need to 727 * initiate a new autotry session with the link partner. To do 728 * so, we set the speed then disable and re-enable the Tx laser, to 729 * alert the link partner that it also needs to restart autotry on its 730 * end. This is consistent with true clause 37 autoneg, which also 731 * involves a loss of signal. 732 **/ 733 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 734 { 735 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); 736 737 /* Blocked by MNG FW so bail */ 738 if (ixgbe_check_reset_blocked(hw)) 739 return; 740 741 if (hw->mac.autotry_restart) { 742 ixgbe_disable_tx_laser_multispeed_fiber(hw); 743 ixgbe_enable_tx_laser_multispeed_fiber(hw); 744 hw->mac.autotry_restart = false; 745 } 746 } 747 748 /** 749 * ixgbe_set_hard_rate_select_speed - Set module link speed 750 * @hw: pointer to hardware structure 751 * @speed: link speed to set 752 * 753 * Set module link speed via RS0/RS1 rate select pins. 754 */ 755 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, 756 ixgbe_link_speed speed) 757 { 758 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 759 760 switch (speed) { 761 case IXGBE_LINK_SPEED_10GB_FULL: 762 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 763 break; 764 case IXGBE_LINK_SPEED_1GB_FULL: 765 esdp_reg &= ~IXGBE_ESDP_SDP5; 766 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 767 break; 768 default: 769 DEBUGOUT("Invalid fixed module speed\n"); 770 return; 771 } 772 773 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 774 IXGBE_WRITE_FLUSH(hw); 775 } 776 777 /** 778 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed 779 * @hw: pointer to hardware structure 780 * @speed: new link speed 781 * @autoneg_wait_to_complete: true when waiting for completion is needed 782 * 783 * Implements the Intel SmartSpeed algorithm. 784 **/ 785 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 786 ixgbe_link_speed speed, 787 bool autoneg_wait_to_complete) 788 { 789 s32 status = IXGBE_SUCCESS; 790 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 791 s32 i, j; 792 bool link_up = false; 793 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 794 795 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); 796 797 /* Set autoneg_advertised value based on input link speed */ 798 hw->phy.autoneg_advertised = 0; 799 800 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 801 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 802 803 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 804 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 805 806 if (speed & IXGBE_LINK_SPEED_100_FULL) 807 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 808 809 /* 810 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the 811 * autoneg advertisement if link is unable to be established at the 812 * highest negotiated rate. This can sometimes happen due to integrity 813 * issues with the physical media connection. 814 */ 815 816 /* First, try to get link with full advertisement */ 817 hw->phy.smart_speed_active = false; 818 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 819 status = ixgbe_setup_mac_link_82599(hw, speed, 820 autoneg_wait_to_complete); 821 if (status != IXGBE_SUCCESS) 822 goto out; 823 824 /* 825 * Wait for the controller to acquire link. Per IEEE 802.3ap, 826 * Section 73.10.2, we may have to wait up to 500ms if KR is 827 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per 828 * Table 9 in the AN MAS. 829 */ 830 for (i = 0; i < 5; i++) { 831 msec_delay(100); 832 833 /* If we have link, just jump out */ 834 status = ixgbe_check_link(hw, &link_speed, &link_up, 835 false); 836 if (status != IXGBE_SUCCESS) 837 goto out; 838 839 if (link_up) 840 goto out; 841 } 842 } 843 844 /* 845 * We didn't get link. If we advertised KR plus one of KX4/KX 846 * (or BX4/BX), then disable KR and try again. 847 */ 848 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || 849 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) 850 goto out; 851 852 /* Turn SmartSpeed on to disable KR support */ 853 hw->phy.smart_speed_active = true; 854 status = ixgbe_setup_mac_link_82599(hw, speed, 855 autoneg_wait_to_complete); 856 if (status != IXGBE_SUCCESS) 857 goto out; 858 859 /* 860 * Wait for the controller to acquire link. 600ms will allow for 861 * the AN link_fail_inhibit_timer as well for multiple cycles of 862 * parallel detect, both 10g and 1g. This allows for the maximum 863 * connect attempts as defined in the AN MAS table 73-7. 864 */ 865 for (i = 0; i < 6; i++) { 866 msec_delay(100); 867 868 /* If we have link, just jump out */ 869 status = ixgbe_check_link(hw, &link_speed, &link_up, false); 870 if (status != IXGBE_SUCCESS) 871 goto out; 872 873 if (link_up) 874 goto out; 875 } 876 877 /* We didn't get link. Turn SmartSpeed back off. */ 878 hw->phy.smart_speed_active = false; 879 status = ixgbe_setup_mac_link_82599(hw, speed, 880 autoneg_wait_to_complete); 881 882 out: 883 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 884 DEBUGOUT("Smartspeed has downgraded the link speed " 885 "from the maximum advertised\n"); 886 return status; 887 } 888 889 /** 890 * ixgbe_setup_mac_link_82599 - Set MAC link speed 891 * @hw: pointer to hardware structure 892 * @speed: new link speed 893 * @autoneg_wait_to_complete: true when waiting for completion is needed 894 * 895 * Set the link speed in the AUTOC register and restarts link. 896 **/ 897 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 898 ixgbe_link_speed speed, 899 bool autoneg_wait_to_complete) 900 { 901 bool autoneg = false; 902 s32 status = IXGBE_SUCCESS; 903 u32 pma_pmd_1g, link_mode; 904 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */ 905 u32 orig_autoc = 0; /* holds the cached value of AUTOC register */ 906 u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */ 907 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 908 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 909 u32 links_reg; 910 u32 i; 911 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 912 913 DEBUGFUNC("ixgbe_setup_mac_link_82599"); 914 915 /* Check to see if speed passed in is supported. */ 916 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 917 if (status) 918 goto out; 919 920 speed &= link_capabilities; 921 922 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 923 status = IXGBE_ERR_LINK_SETUP; 924 goto out; 925 } 926 927 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 928 if (hw->mac.orig_link_settings_stored) 929 orig_autoc = hw->mac.orig_autoc; 930 else 931 orig_autoc = autoc; 932 933 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 934 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 935 936 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 937 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 938 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 939 /* Set KX4/KX/KR support according to speed requested */ 940 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 941 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 942 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 943 autoc |= IXGBE_AUTOC_KX4_SUPP; 944 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && 945 (hw->phy.smart_speed_active == false)) 946 autoc |= IXGBE_AUTOC_KR_SUPP; 947 } 948 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 949 autoc |= IXGBE_AUTOC_KX_SUPP; 950 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 951 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 952 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 953 /* Switch from 1G SFI to 10G SFI if requested */ 954 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 955 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 956 autoc &= ~IXGBE_AUTOC_LMS_MASK; 957 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 958 } 959 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 960 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 961 /* Switch from 10G SFI to 1G SFI if requested */ 962 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 963 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 964 autoc &= ~IXGBE_AUTOC_LMS_MASK; 965 if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel) 966 autoc |= IXGBE_AUTOC_LMS_1G_AN; 967 else 968 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 969 } 970 } 971 972 if (autoc != current_autoc) { 973 /* Restart link */ 974 status = hw->mac.ops.prot_autoc_write(hw, autoc, false); 975 if (status != IXGBE_SUCCESS) 976 goto out; 977 978 /* Only poll for autoneg to complete if specified to do so */ 979 if (autoneg_wait_to_complete) { 980 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 981 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 982 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 983 links_reg = 0; /*Just in case Autoneg time=0*/ 984 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 985 links_reg = 986 IXGBE_READ_REG(hw, IXGBE_LINKS); 987 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 988 break; 989 msec_delay(100); 990 } 991 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 992 status = 993 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 994 DEBUGOUT("Autoneg did not complete.\n"); 995 } 996 } 997 } 998 999 /* Add delay to filter out noises during initial link setup */ 1000 msec_delay(50); 1001 } 1002 1003 out: 1004 return status; 1005 } 1006 1007 /** 1008 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field 1009 * @hw: pointer to hardware structure 1010 * @speed: new link speed 1011 * @autoneg_wait_to_complete: true if waiting is needed to complete 1012 * 1013 * Restarts link on PHY and MAC based on settings passed in. 1014 **/ 1015 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 1016 ixgbe_link_speed speed, 1017 bool autoneg_wait_to_complete) 1018 { 1019 s32 status; 1020 1021 DEBUGFUNC("ixgbe_setup_copper_link_82599"); 1022 1023 /* Setup the PHY according to input speed */ 1024 status = hw->phy.ops.setup_link_speed(hw, speed, 1025 autoneg_wait_to_complete); 1026 /* Set up MAC */ 1027 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 1028 1029 return status; 1030 } 1031 1032 /** 1033 * ixgbe_reset_hw_82599 - Perform hardware reset 1034 * @hw: pointer to hardware structure 1035 * 1036 * Resets the hardware by resetting the transmit and receive units, masks 1037 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 1038 * reset. 1039 **/ 1040 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 1041 { 1042 ixgbe_link_speed link_speed; 1043 s32 status; 1044 u32 ctrl = 0; 1045 u32 i, autoc, autoc2; 1046 u32 curr_lms; 1047 bool link_up = false; 1048 1049 DEBUGFUNC("ixgbe_reset_hw_82599"); 1050 1051 /* Call adapter stop to disable tx/rx and clear interrupts */ 1052 status = hw->mac.ops.stop_adapter(hw); 1053 if (status != IXGBE_SUCCESS) 1054 goto reset_hw_out; 1055 1056 /* flush pending Tx transactions */ 1057 ixgbe_clear_tx_pending(hw); 1058 1059 /* PHY ops must be identified and initialized prior to reset */ 1060 1061 /* Identify PHY and related function pointers */ 1062 status = hw->phy.ops.init(hw); 1063 1064 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1065 goto reset_hw_out; 1066 1067 /* Setup SFP module if there is one present. */ 1068 if (hw->phy.sfp_setup_needed) { 1069 status = hw->mac.ops.setup_sfp(hw); 1070 hw->phy.sfp_setup_needed = false; 1071 } 1072 1073 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1074 goto reset_hw_out; 1075 1076 /* Reset PHY */ 1077 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) 1078 hw->phy.ops.reset(hw); 1079 1080 /* remember AUTOC from before we reset */ 1081 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; 1082 1083 mac_reset_top: 1084 /* 1085 * Issue global reset to the MAC. Needs to be SW reset if link is up. 1086 * If link reset is used when link is up, it might reset the PHY when 1087 * mng is using it. If link is down or the flag to force full link 1088 * reset is set, then perform link reset. 1089 */ 1090 ctrl = IXGBE_CTRL_LNK_RST; 1091 if (!hw->force_full_reset) { 1092 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 1093 if (link_up) 1094 ctrl = IXGBE_CTRL_RST; 1095 } 1096 1097 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); 1098 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 1099 IXGBE_WRITE_FLUSH(hw); 1100 1101 /* Poll for reset bit to self-clear meaning reset is complete */ 1102 for (i = 0; i < 10; i++) { 1103 usec_delay(1); 1104 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1105 if (!(ctrl & IXGBE_CTRL_RST_MASK)) 1106 break; 1107 } 1108 1109 if (ctrl & IXGBE_CTRL_RST_MASK) { 1110 status = IXGBE_ERR_RESET_FAILED; 1111 DEBUGOUT("Reset polling failed to complete.\n"); 1112 } 1113 1114 msec_delay(50); 1115 1116 /* 1117 * Double resets are required for recovery from certain error 1118 * conditions. Between resets, it is necessary to stall to 1119 * allow time for any pending HW events to complete. 1120 */ 1121 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 1122 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 1123 goto mac_reset_top; 1124 } 1125 1126 /* 1127 * Store the original AUTOC/AUTOC2 values if they have not been 1128 * stored off yet. Otherwise restore the stored original 1129 * values since the reset operation sets back to defaults. 1130 */ 1131 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1132 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1133 1134 /* Enable link if disabled in NVM */ 1135 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) { 1136 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; 1137 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1138 IXGBE_WRITE_FLUSH(hw); 1139 } 1140 1141 if (hw->mac.orig_link_settings_stored == false) { 1142 hw->mac.orig_autoc = autoc; 1143 hw->mac.orig_autoc2 = autoc2; 1144 hw->mac.orig_link_settings_stored = true; 1145 } else { 1146 1147 /* If MNG FW is running on a multi-speed device that 1148 * doesn't autoneg with out driver support we need to 1149 * leave LMS in the state it was before we MAC reset. 1150 * Likewise if we support WoL we don't want change the 1151 * LMS state. 1152 */ 1153 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || 1154 hw->wol_enabled) 1155 hw->mac.orig_autoc = 1156 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | 1157 curr_lms; 1158 1159 if (autoc != hw->mac.orig_autoc) { 1160 status = hw->mac.ops.prot_autoc_write(hw, 1161 hw->mac.orig_autoc, 1162 false); 1163 if (status != IXGBE_SUCCESS) 1164 goto reset_hw_out; 1165 } 1166 1167 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1168 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1169 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1170 autoc2 |= (hw->mac.orig_autoc2 & 1171 IXGBE_AUTOC2_UPPER_MASK); 1172 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1173 } 1174 } 1175 1176 /* Store the permanent mac address */ 1177 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 1178 1179 /* 1180 * Store MAC address from RAR0, clear receive address registers, and 1181 * clear the multicast table. Also reset num_rar_entries to 128, 1182 * since we modify this value when programming the SAN MAC address. 1183 */ 1184 hw->mac.num_rar_entries = 128; 1185 hw->mac.ops.init_rx_addrs(hw); 1186 1187 /* Store the permanent SAN mac address */ 1188 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1189 1190 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1191 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 1192 /* Save the SAN MAC RAR index */ 1193 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 1194 1195 hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, 1196 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1197 1198 /* clear VMDq pool/queue selection for this RAR */ 1199 hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, 1200 IXGBE_CLEAR_VMDQ_ALL); 1201 1202 /* Reserve the last RAR for the SAN MAC address */ 1203 hw->mac.num_rar_entries--; 1204 } 1205 1206 /* Store the alternative WWNN/WWPN prefix */ 1207 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1208 &hw->mac.wwpn_prefix); 1209 1210 reset_hw_out: 1211 return status; 1212 } 1213 1214 /** 1215 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete 1216 * @hw: pointer to hardware structure 1217 * @fdircmd: current value of FDIRCMD register 1218 */ 1219 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) 1220 { 1221 int i; 1222 1223 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { 1224 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); 1225 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) 1226 return IXGBE_SUCCESS; 1227 usec_delay(10); 1228 } 1229 1230 return IXGBE_ERR_FDIR_CMD_INCOMPLETE; 1231 } 1232 1233 /** 1234 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1235 * @hw: pointer to hardware structure 1236 **/ 1237 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1238 { 1239 s32 err; 1240 int i; 1241 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1242 u32 fdircmd; 1243 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1244 1245 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); 1246 1247 /* 1248 * Before starting reinitialization process, 1249 * FDIRCMD.CMD must be zero. 1250 */ 1251 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1252 if (err) { 1253 DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n"); 1254 return err; 1255 } 1256 1257 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1258 IXGBE_WRITE_FLUSH(hw); 1259 /* 1260 * 82599 adapters flow director init flow cannot be restarted, 1261 * Workaround 82599 silicon errata by performing the following steps 1262 * before re-writing the FDIRCTRL control register with the same value. 1263 * - write 1 to bit 8 of FDIRCMD register & 1264 * - write 0 to bit 8 of FDIRCMD register 1265 */ 1266 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1267 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1268 IXGBE_FDIRCMD_CLEARHT)); 1269 IXGBE_WRITE_FLUSH(hw); 1270 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1271 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1272 ~IXGBE_FDIRCMD_CLEARHT)); 1273 IXGBE_WRITE_FLUSH(hw); 1274 /* 1275 * Clear FDIR Hash register to clear any leftover hashes 1276 * waiting to be programmed. 1277 */ 1278 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); 1279 IXGBE_WRITE_FLUSH(hw); 1280 1281 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1282 IXGBE_WRITE_FLUSH(hw); 1283 1284 /* Poll init-done after we write FDIRCTRL register */ 1285 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1286 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1287 IXGBE_FDIRCTRL_INIT_DONE) 1288 break; 1289 msec_delay(1); 1290 } 1291 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1292 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1293 return IXGBE_ERR_FDIR_REINIT_FAILED; 1294 } 1295 1296 /* Clear FDIR statistics registers (read to clear) */ 1297 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); 1298 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); 1299 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 1300 IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 1301 IXGBE_READ_REG(hw, IXGBE_FDIRLEN); 1302 1303 return IXGBE_SUCCESS; 1304 } 1305 1306 /** 1307 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers 1308 * @hw: pointer to hardware structure 1309 * @fdirctrl: value to write to flow director control register 1310 **/ 1311 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1312 { 1313 int i; 1314 1315 DEBUGFUNC("ixgbe_fdir_enable_82599"); 1316 1317 /* Prime the keys for hashing */ 1318 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1319 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1320 1321 /* 1322 * Poll init-done after we write the register. Estimated times: 1323 * 10G: PBALLOC = 11b, timing is 60us 1324 * 1G: PBALLOC = 11b, timing is 600us 1325 * 100M: PBALLOC = 11b, timing is 6ms 1326 * 1327 * Multiple these timings by 4 if under full Rx load 1328 * 1329 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1330 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1331 * this might not finish in our poll time, but we can live with that 1332 * for now. 1333 */ 1334 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1335 IXGBE_WRITE_FLUSH(hw); 1336 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1337 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1338 IXGBE_FDIRCTRL_INIT_DONE) 1339 break; 1340 msec_delay(1); 1341 } 1342 1343 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1344 DEBUGOUT("Flow Director poll time exceeded!\n"); 1345 } 1346 1347 /** 1348 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1349 * @hw: pointer to hardware structure 1350 * @fdirctrl: value to write to flow director control register, initially 1351 * contains just the value of the Rx packet buffer allocation 1352 **/ 1353 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1354 { 1355 DEBUGFUNC("ixgbe_init_fdir_signature_82599"); 1356 1357 /* 1358 * Continue setup of fdirctrl register bits: 1359 * Move the flexible bytes to use the ethertype - shift 6 words 1360 * Set the maximum length per hash bucket to 0xA filters 1361 * Send interrupt when 64 filters are left 1362 */ 1363 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1364 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1365 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1366 1367 /* write hashes and fdirctrl register, poll for completion */ 1368 ixgbe_fdir_enable_82599(hw, fdirctrl); 1369 1370 return IXGBE_SUCCESS; 1371 } 1372 1373 /** 1374 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1375 * @hw: pointer to hardware structure 1376 * @fdirctrl: value to write to flow director control register, initially 1377 * contains just the value of the Rx packet buffer allocation 1378 * @cloud_mode: true - cloud mode, false - other mode 1379 **/ 1380 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, 1381 bool cloud_mode) 1382 { 1383 UNREFERENCED_1PARAMETER(cloud_mode); 1384 DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); 1385 1386 /* 1387 * Continue setup of fdirctrl register bits: 1388 * Turn perfect match filtering on 1389 * Report hash in RSS field of Rx wb descriptor 1390 * Initialize the drop queue to queue 127 1391 * Move the flexible bytes to use the ethertype - shift 6 words 1392 * Set the maximum length per hash bucket to 0xA filters 1393 * Send interrupt when 64 (0x4 * 16) filters are left 1394 */ 1395 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | 1396 IXGBE_FDIRCTRL_REPORT_STATUS | 1397 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | 1398 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1399 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1400 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1401 1402 if (cloud_mode) 1403 fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD << 1404 IXGBE_FDIRCTRL_FILTERMODE_SHIFT); 1405 1406 /* write hashes and fdirctrl register, poll for completion */ 1407 ixgbe_fdir_enable_82599(hw, fdirctrl); 1408 1409 return IXGBE_SUCCESS; 1410 } 1411 1412 /** 1413 * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue 1414 * @hw: pointer to hardware structure 1415 * @dropqueue: Rx queue index used for the dropped packets 1416 **/ 1417 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue) 1418 { 1419 u32 fdirctrl; 1420 1421 DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599"); 1422 /* Clear init done bit and drop queue field */ 1423 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1424 fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE); 1425 1426 /* Set drop queue */ 1427 fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); 1428 if ((hw->mac.type == ixgbe_mac_X550) || 1429 (hw->mac.type == ixgbe_mac_X550EM_x) || 1430 (hw->mac.type == ixgbe_mac_X550EM_a)) 1431 fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH; 1432 1433 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1434 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1435 IXGBE_FDIRCMD_CLEARHT)); 1436 IXGBE_WRITE_FLUSH(hw); 1437 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1438 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1439 ~IXGBE_FDIRCMD_CLEARHT)); 1440 IXGBE_WRITE_FLUSH(hw); 1441 1442 /* write hashes and fdirctrl register, poll for completion */ 1443 ixgbe_fdir_enable_82599(hw, fdirctrl); 1444 } 1445 1446 /* 1447 * These defines allow us to quickly generate all of the necessary instructions 1448 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION 1449 * for values 0 through 15 1450 */ 1451 #define IXGBE_ATR_COMMON_HASH_KEY \ 1452 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) 1453 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ 1454 do { \ 1455 u32 n = (_n); \ 1456 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ 1457 common_hash ^= lo_hash_dword >> n; \ 1458 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1459 bucket_hash ^= lo_hash_dword >> n; \ 1460 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ 1461 sig_hash ^= lo_hash_dword << (16 - n); \ 1462 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ 1463 common_hash ^= hi_hash_dword >> n; \ 1464 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1465 bucket_hash ^= hi_hash_dword >> n; \ 1466 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1467 sig_hash ^= hi_hash_dword << (16 - n); \ 1468 } while (0) 1469 1470 /** 1471 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1472 * @input: input bitstream to compute the hash on 1473 * @common: compressed common input dword 1474 * 1475 * This function is almost identical to the function above but contains 1476 * several optimizations such as unwinding all of the loops, letting the 1477 * compiler work out all of the conditional ifs since the keys are static 1478 * defines, and computing two keys at once since the hashed dword stream 1479 * will be the same for both keys. 1480 **/ 1481 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 1482 union ixgbe_atr_hash_dword common) 1483 { 1484 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1485 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; 1486 1487 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1488 flow_vm_vlan = IXGBE_NTOHL(input.dword); 1489 1490 /* generate common hash dword */ 1491 hi_hash_dword = IXGBE_NTOHL(common.dword); 1492 1493 /* low dword is word swapped version of common */ 1494 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1495 1496 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1497 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1498 1499 /* Process bits 0 and 16 */ 1500 IXGBE_COMPUTE_SIG_HASH_ITERATION(0); 1501 1502 /* 1503 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1504 * delay this because bit 0 of the stream should not be processed 1505 * so we do not add the VLAN until after bit 0 was processed 1506 */ 1507 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1508 1509 /* Process remaining 30 bit of the key */ 1510 IXGBE_COMPUTE_SIG_HASH_ITERATION(1); 1511 IXGBE_COMPUTE_SIG_HASH_ITERATION(2); 1512 IXGBE_COMPUTE_SIG_HASH_ITERATION(3); 1513 IXGBE_COMPUTE_SIG_HASH_ITERATION(4); 1514 IXGBE_COMPUTE_SIG_HASH_ITERATION(5); 1515 IXGBE_COMPUTE_SIG_HASH_ITERATION(6); 1516 IXGBE_COMPUTE_SIG_HASH_ITERATION(7); 1517 IXGBE_COMPUTE_SIG_HASH_ITERATION(8); 1518 IXGBE_COMPUTE_SIG_HASH_ITERATION(9); 1519 IXGBE_COMPUTE_SIG_HASH_ITERATION(10); 1520 IXGBE_COMPUTE_SIG_HASH_ITERATION(11); 1521 IXGBE_COMPUTE_SIG_HASH_ITERATION(12); 1522 IXGBE_COMPUTE_SIG_HASH_ITERATION(13); 1523 IXGBE_COMPUTE_SIG_HASH_ITERATION(14); 1524 IXGBE_COMPUTE_SIG_HASH_ITERATION(15); 1525 1526 /* combine common_hash result with signature and bucket hashes */ 1527 bucket_hash ^= common_hash; 1528 bucket_hash &= IXGBE_ATR_HASH_MASK; 1529 1530 sig_hash ^= common_hash << 16; 1531 sig_hash &= IXGBE_ATR_HASH_MASK << 16; 1532 1533 /* return completed signature hash */ 1534 return sig_hash ^ bucket_hash; 1535 } 1536 1537 /** 1538 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1539 * @hw: pointer to hardware structure 1540 * @input: unique input dword 1541 * @common: compressed common input dword 1542 * @queue: queue index to direct traffic to 1543 * 1544 * Note that the tunnel bit in input must not be set when the hardware 1545 * tunneling support does not exist. 1546 **/ 1547 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1548 union ixgbe_atr_hash_dword input, 1549 union ixgbe_atr_hash_dword common, 1550 u8 queue) 1551 { 1552 u64 fdirhashcmd; 1553 u8 flow_type; 1554 bool tunnel; 1555 u32 fdircmd; 1556 1557 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); 1558 1559 /* 1560 * Get the flow_type in order to program FDIRCMD properly 1561 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 1562 * fifth is FDIRCMD.TUNNEL_FILTER 1563 */ 1564 tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); 1565 flow_type = input.formatted.flow_type & 1566 (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); 1567 switch (flow_type) { 1568 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1569 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1570 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1571 case IXGBE_ATR_FLOW_TYPE_TCPV6: 1572 case IXGBE_ATR_FLOW_TYPE_UDPV6: 1573 case IXGBE_ATR_FLOW_TYPE_SCTPV6: 1574 break; 1575 default: 1576 DEBUGOUT(" Error on flow type input\n"); 1577 return; 1578 } 1579 1580 /* configure FDIRCMD register */ 1581 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1582 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1583 fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1584 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1585 if (tunnel) 1586 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; 1587 1588 /* 1589 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1590 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1591 */ 1592 fdirhashcmd = (u64)fdircmd << 32; 1593 fdirhashcmd |= (u64)ixgbe_atr_compute_sig_hash_82599(input, common); 1594 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1595 1596 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); 1597 1598 return; 1599 } 1600 1601 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ 1602 do { \ 1603 u32 n = (_n); \ 1604 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1605 bucket_hash ^= lo_hash_dword >> n; \ 1606 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1607 bucket_hash ^= hi_hash_dword >> n; \ 1608 } while (0) 1609 1610 /** 1611 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash 1612 * @input: input bitstream to compute the hash on 1613 * @input_mask: mask for the input bitstream 1614 * 1615 * This function serves two main purposes. First it applies the input_mask 1616 * to the atr_input resulting in a cleaned up atr_input data stream. 1617 * Secondly it computes the hash and stores it in the bkt_hash field at 1618 * the end of the input byte stream. This way it will be available for 1619 * future use without needing to recompute the hash. 1620 **/ 1621 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 1622 union ixgbe_atr_input *input_mask) 1623 { 1624 1625 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1626 u32 bucket_hash = 0; 1627 u32 hi_dword = 0; 1628 u32 i = 0; 1629 1630 /* Apply masks to input data */ 1631 for (i = 0; i < 14; i++) 1632 input->dword_stream[i] &= input_mask->dword_stream[i]; 1633 1634 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1635 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); 1636 1637 /* generate common hash dword */ 1638 for (i = 1; i <= 13; i++) 1639 hi_dword ^= input->dword_stream[i]; 1640 hi_hash_dword = IXGBE_NTOHL(hi_dword); 1641 1642 /* low dword is word swapped version of common */ 1643 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1644 1645 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1646 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1647 1648 /* Process bits 0 and 16 */ 1649 IXGBE_COMPUTE_BKT_HASH_ITERATION(0); 1650 1651 /* 1652 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1653 * delay this because bit 0 of the stream should not be processed 1654 * so we do not add the VLAN until after bit 0 was processed 1655 */ 1656 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1657 1658 /* Process remaining 30 bit of the key */ 1659 for (i = 1; i <= 15; i++) 1660 IXGBE_COMPUTE_BKT_HASH_ITERATION(i); 1661 1662 /* 1663 * Limit hash to 13 bits since max bucket count is 8K. 1664 * Store result at the end of the input stream. 1665 */ 1666 input->formatted.bkt_hash = bucket_hash & 0x1FFF; 1667 } 1668 1669 /** 1670 * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks 1671 * @input_mask: mask to be bit swapped 1672 * 1673 * The source and destination port masks for flow director are bit swapped 1674 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to 1675 * generate a correctly swapped value we need to bit swap the mask and that 1676 * is what is accomplished by this function. 1677 **/ 1678 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) 1679 { 1680 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); 1681 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1682 mask |= (u32)IXGBE_NTOHS(input_mask->formatted.src_port); 1683 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1684 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1685 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1686 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); 1687 } 1688 1689 /* 1690 * These two macros are meant to address the fact that we have registers 1691 * that are either all or in part big-endian. As a result on big-endian 1692 * systems we will end up byte swapping the value to little-endian before 1693 * it is byte swapped again and written to the hardware in the original 1694 * big-endian format. 1695 */ 1696 #define IXGBE_STORE_AS_BE32(_value) \ 1697 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ 1698 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) 1699 1700 #define IXGBE_WRITE_REG_BE32(a, reg, value) \ 1701 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) 1702 1703 #define IXGBE_STORE_AS_BE16(_value) \ 1704 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) 1705 1706 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, 1707 union ixgbe_atr_input *input_mask, bool cloud_mode) 1708 { 1709 /* mask IPv6 since it is currently not supported */ 1710 u32 fdirm = IXGBE_FDIRM_DIPv6; 1711 u32 fdirtcpm; 1712 u32 fdirip6m; 1713 UNREFERENCED_1PARAMETER(cloud_mode); 1714 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); 1715 1716 /* 1717 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1718 * are zero, then assume a full mask for that field. Also assume that 1719 * a VLAN of 0 is unspecified, so mask that out as well. L4type 1720 * cannot be masked out in this implementation. 1721 * 1722 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1723 * point in time. 1724 */ 1725 1726 /* verify bucket hash is cleared on hash generation */ 1727 if (input_mask->formatted.bkt_hash) 1728 DEBUGOUT(" bucket hash should always be 0 in mask\n"); 1729 1730 /* Program FDIRM and verify partial masks */ 1731 switch (input_mask->formatted.vm_pool & 0x7F) { 1732 case 0x0: 1733 fdirm |= IXGBE_FDIRM_POOL; 1734 case 0x7F: 1735 break; 1736 default: 1737 DEBUGOUT(" Error on vm pool mask\n"); 1738 return IXGBE_ERR_CONFIG; 1739 } 1740 1741 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { 1742 case 0x0: 1743 fdirm |= IXGBE_FDIRM_L4P; 1744 if (input_mask->formatted.dst_port || 1745 input_mask->formatted.src_port) { 1746 DEBUGOUT(" Error on src/dst port mask\n"); 1747 return IXGBE_ERR_CONFIG; 1748 } 1749 case IXGBE_ATR_L4TYPE_MASK: 1750 break; 1751 default: 1752 DEBUGOUT(" Error on flow type mask\n"); 1753 return IXGBE_ERR_CONFIG; 1754 } 1755 1756 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { 1757 case 0x0000: 1758 /* mask VLAN ID */ 1759 fdirm |= IXGBE_FDIRM_VLANID; 1760 /* FALLTHROUGH */ 1761 case 0x0FFF: 1762 /* mask VLAN priority */ 1763 fdirm |= IXGBE_FDIRM_VLANP; 1764 break; 1765 case 0xE000: 1766 /* mask VLAN ID only */ 1767 fdirm |= IXGBE_FDIRM_VLANID; 1768 /* fall through */ 1769 case 0xEFFF: 1770 /* no VLAN fields masked */ 1771 break; 1772 default: 1773 DEBUGOUT(" Error on VLAN mask\n"); 1774 return IXGBE_ERR_CONFIG; 1775 } 1776 1777 switch (input_mask->formatted.flex_bytes & 0xFFFF) { 1778 case 0x0000: 1779 /* Mask Flex Bytes */ 1780 fdirm |= IXGBE_FDIRM_FLEX; 1781 /* fall through */ 1782 case 0xFFFF: 1783 break; 1784 default: 1785 DEBUGOUT(" Error on flexible byte mask\n"); 1786 return IXGBE_ERR_CONFIG; 1787 } 1788 1789 if (cloud_mode) { 1790 fdirm |= IXGBE_FDIRM_L3P; 1791 fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); 1792 fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; 1793 1794 switch (input_mask->formatted.inner_mac[0] & 0xFF) { 1795 case 0x00: 1796 /* Mask inner MAC, fall through */ 1797 fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC; 1798 case 0xFF: 1799 break; 1800 default: 1801 DEBUGOUT(" Error on inner_mac byte mask\n"); 1802 return IXGBE_ERR_CONFIG; 1803 } 1804 1805 switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) { 1806 case 0x0: 1807 /* Mask vxlan id */ 1808 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI; 1809 break; 1810 case 0x00FFFFFF: 1811 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24; 1812 break; 1813 case 0xFFFFFFFF: 1814 break; 1815 default: 1816 DEBUGOUT(" Error on TNI/VNI byte mask\n"); 1817 return IXGBE_ERR_CONFIG; 1818 } 1819 1820 switch (input_mask->formatted.tunnel_type & 0xFFFF) { 1821 case 0x0: 1822 /* Mask turnnel type, fall through */ 1823 fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; 1824 case 0xFFFF: 1825 break; 1826 default: 1827 DEBUGOUT(" Error on tunnel type byte mask\n"); 1828 return IXGBE_ERR_CONFIG; 1829 } 1830 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); 1831 1832 /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM, 1833 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow 1834 * L3/L3 packets to tunnel. 1835 */ 1836 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); 1837 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); 1838 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); 1839 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); 1840 switch (hw->mac.type) { 1841 case ixgbe_mac_X550: 1842 case ixgbe_mac_X550EM_x: 1843 case ixgbe_mac_X550EM_a: 1844 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); 1845 break; 1846 default: 1847 break; 1848 } 1849 } 1850 1851 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1852 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1853 1854 if (!cloud_mode) { 1855 /* store the TCP/UDP port masks, bit reversed from port 1856 * layout */ 1857 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); 1858 1859 /* write both the same so that UDP and TCP use the same mask */ 1860 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1861 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); 1862 /* also use it for SCTP */ 1863 switch (hw->mac.type) { 1864 case ixgbe_mac_X550: 1865 case ixgbe_mac_X550EM_x: 1866 case ixgbe_mac_X550EM_a: 1867 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); 1868 break; 1869 default: 1870 break; 1871 } 1872 1873 /* store source and destination IP masks (big-enian) */ 1874 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1875 ~input_mask->formatted.src_ip[0]); 1876 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1877 ~input_mask->formatted.dst_ip[0]); 1878 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xFFFFFFFF); 1879 } 1880 return IXGBE_SUCCESS; 1881 } 1882 1883 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, 1884 union ixgbe_atr_input *input, 1885 u16 soft_id, u8 queue, bool cloud_mode) 1886 { 1887 u32 fdirport, fdirvlan, fdirhash, fdircmd; 1888 u32 addr_low, addr_high; 1889 u32 cloud_type = 0; 1890 s32 err; 1891 UNREFERENCED_1PARAMETER(cloud_mode); 1892 1893 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); 1894 if (!cloud_mode) { 1895 /* currently IPv6 is not supported, must be programmed with 0 */ 1896 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), 1897 input->formatted.src_ip[0]); 1898 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), 1899 input->formatted.src_ip[1]); 1900 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), 1901 input->formatted.src_ip[2]); 1902 1903 /* record the source address (big-endian) */ 1904 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, 1905 input->formatted.src_ip[0]); 1906 1907 /* record the first 32 bits of the destination address 1908 * (big-endian) */ 1909 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, 1910 input->formatted.dst_ip[0]); 1911 1912 /* record source and destination port (little-endian)*/ 1913 fdirport = IXGBE_NTOHS(input->formatted.dst_port); 1914 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; 1915 fdirport |= (u32)IXGBE_NTOHS(input->formatted.src_port); 1916 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1917 } 1918 1919 /* record VLAN (little-endian) and flex_bytes(big-endian) */ 1920 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); 1921 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1922 fdirvlan |= (u32)IXGBE_NTOHS(input->formatted.vlan_id); 1923 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1924 1925 if (cloud_mode) { 1926 if (input->formatted.tunnel_type != 0) 1927 cloud_type = 0x80000000; 1928 1929 addr_low = ((u32)input->formatted.inner_mac[0] | 1930 ((u32)input->formatted.inner_mac[1] << 8) | 1931 ((u32)input->formatted.inner_mac[2] << 16) | 1932 ((u32)input->formatted.inner_mac[3] << 24)); 1933 addr_high = ((u32)input->formatted.inner_mac[4] | 1934 ((u32)input->formatted.inner_mac[5] << 8)); 1935 cloud_type |= addr_high; 1936 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low); 1937 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type); 1938 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); 1939 } 1940 1941 /* configure FDIRHASH register */ 1942 fdirhash = input->formatted.bkt_hash; 1943 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1944 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1945 1946 /* 1947 * flush all previous writes to make certain registers are 1948 * programmed prior to issuing the command 1949 */ 1950 IXGBE_WRITE_FLUSH(hw); 1951 1952 /* configure FDIRCMD register */ 1953 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1954 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1955 if (queue == IXGBE_FDIR_DROP_QUEUE) 1956 fdircmd |= IXGBE_FDIRCMD_DROP; 1957 if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK) 1958 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; 1959 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1960 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1961 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; 1962 1963 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1964 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1965 if (err) { 1966 DEBUGOUT("Flow Director command did not complete!\n"); 1967 return err; 1968 } 1969 1970 return IXGBE_SUCCESS; 1971 } 1972 1973 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, 1974 union ixgbe_atr_input *input, 1975 u16 soft_id) 1976 { 1977 u32 fdirhash; 1978 u32 fdircmd; 1979 s32 err; 1980 1981 /* configure FDIRHASH register */ 1982 fdirhash = input->formatted.bkt_hash; 1983 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1984 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1985 1986 /* flush hash to HW */ 1987 IXGBE_WRITE_FLUSH(hw); 1988 1989 /* Query if filter is present */ 1990 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); 1991 1992 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1993 if (err) { 1994 DEBUGOUT("Flow Director command did not complete!\n"); 1995 return err; 1996 } 1997 1998 /* if filter exists in hardware then remove it */ 1999 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { 2000 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 2001 IXGBE_WRITE_FLUSH(hw); 2002 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 2003 IXGBE_FDIRCMD_CMD_REMOVE_FLOW); 2004 } 2005 2006 return IXGBE_SUCCESS; 2007 } 2008 2009 /** 2010 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 2011 * @hw: pointer to hardware structure 2012 * @input: input bitstream 2013 * @input_mask: mask for the input bitstream 2014 * @soft_id: software index for the filters 2015 * @queue: queue index to direct traffic to 2016 * @cloud_mode: unused 2017 * 2018 * Note that the caller to this function must lock before calling, since the 2019 * hardware writes must be protected from one another. 2020 **/ 2021 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 2022 union ixgbe_atr_input *input, 2023 union ixgbe_atr_input *input_mask, 2024 u16 soft_id, u8 queue, bool cloud_mode) 2025 { 2026 s32 err = IXGBE_ERR_CONFIG; 2027 UNREFERENCED_1PARAMETER(cloud_mode); 2028 2029 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); 2030 2031 /* 2032 * Check flow_type formatting, and bail out before we touch the hardware 2033 * if there's a configuration issue 2034 */ 2035 switch (input->formatted.flow_type) { 2036 case IXGBE_ATR_FLOW_TYPE_IPV4: 2037 case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4: 2038 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; 2039 if (input->formatted.dst_port || input->formatted.src_port) { 2040 DEBUGOUT(" Error on src/dst port\n"); 2041 return IXGBE_ERR_CONFIG; 2042 } 2043 break; 2044 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 2045 case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4: 2046 if (input->formatted.dst_port || input->formatted.src_port) { 2047 DEBUGOUT(" Error on src/dst port\n"); 2048 return IXGBE_ERR_CONFIG; 2049 } 2050 /* FALLTHROUGH */ 2051 case IXGBE_ATR_FLOW_TYPE_TCPV4: 2052 case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: 2053 case IXGBE_ATR_FLOW_TYPE_UDPV4: 2054 case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4: 2055 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 2056 IXGBE_ATR_L4TYPE_MASK; 2057 break; 2058 default: 2059 DEBUGOUT(" Error on flow type input\n"); 2060 return err; 2061 } 2062 2063 /* program input mask into the HW */ 2064 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode); 2065 if (err) 2066 return err; 2067 2068 /* apply mask and compute/store hash */ 2069 ixgbe_atr_compute_perfect_hash_82599(input, input_mask); 2070 2071 /* program filters to filter memory */ 2072 return ixgbe_fdir_write_perfect_filter_82599(hw, input, 2073 soft_id, queue, cloud_mode); 2074 } 2075 2076 /** 2077 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 2078 * @hw: pointer to hardware structure 2079 * @reg: analog register to read 2080 * @val: read value 2081 * 2082 * Performs read operation to Omer analog register specified. 2083 **/ 2084 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) 2085 { 2086 u32 core_ctl; 2087 2088 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); 2089 2090 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 2091 (reg << 8)); 2092 IXGBE_WRITE_FLUSH(hw); 2093 usec_delay(10); 2094 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 2095 *val = (u8)core_ctl; 2096 2097 return IXGBE_SUCCESS; 2098 } 2099 2100 /** 2101 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 2102 * @hw: pointer to hardware structure 2103 * @reg: atlas register to write 2104 * @val: value to write 2105 * 2106 * Performs write operation to Omer analog register specified. 2107 **/ 2108 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) 2109 { 2110 u32 core_ctl; 2111 2112 DEBUGFUNC("ixgbe_write_analog_reg8_82599"); 2113 2114 core_ctl = (reg << 8) | val; 2115 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 2116 IXGBE_WRITE_FLUSH(hw); 2117 usec_delay(10); 2118 2119 return IXGBE_SUCCESS; 2120 } 2121 2122 /** 2123 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx 2124 * @hw: pointer to hardware structure 2125 * 2126 * Starts the hardware using the generic start_hw function 2127 * and the generation start_hw function. 2128 * Then performs revision-specific operations, if any. 2129 **/ 2130 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) 2131 { 2132 s32 ret_val = IXGBE_SUCCESS; 2133 2134 DEBUGFUNC("ixgbe_start_hw_82599"); 2135 2136 ret_val = ixgbe_start_hw_generic(hw); 2137 if (ret_val != IXGBE_SUCCESS) 2138 goto out; 2139 2140 ixgbe_start_hw_gen2(hw); 2141 2142 /* We need to run link autotry after the driver loads */ 2143 hw->mac.autotry_restart = true; 2144 2145 if (ret_val == IXGBE_SUCCESS) 2146 ret_val = ixgbe_verify_fw_version_82599(hw); 2147 out: 2148 return ret_val; 2149 } 2150 2151 /** 2152 * ixgbe_identify_phy_82599 - Get physical layer module 2153 * @hw: pointer to hardware structure 2154 * 2155 * Determines the physical layer module found on the current adapter. 2156 * If PHY already detected, maintains current PHY type in hw struct, 2157 * otherwise executes the PHY detection routine. 2158 **/ 2159 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 2160 { 2161 s32 status; 2162 2163 DEBUGFUNC("ixgbe_identify_phy_82599"); 2164 2165 /* Detect PHY if not unknown - returns success if already detected. */ 2166 status = ixgbe_identify_phy_generic(hw); 2167 if (status != IXGBE_SUCCESS) { 2168 /* 82599 10GBASE-T requires an external PHY */ 2169 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 2170 return status; 2171 else 2172 status = ixgbe_identify_module_generic(hw); 2173 } 2174 2175 /* Set PHY type none if no PHY detected */ 2176 if (hw->phy.type == ixgbe_phy_unknown) { 2177 hw->phy.type = ixgbe_phy_none; 2178 return IXGBE_SUCCESS; 2179 } 2180 2181 /* Return error if SFP module has been detected but is not supported */ 2182 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 2183 return IXGBE_ERR_SFP_NOT_SUPPORTED; 2184 2185 return status; 2186 } 2187 2188 /** 2189 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type 2190 * @hw: pointer to hardware structure 2191 * 2192 * Determines physical layer capabilities of the current configuration. 2193 **/ 2194 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 2195 { 2196 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 2197 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2198 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 2199 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 2200 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 2201 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 2202 u16 ext_ability = 0; 2203 2204 DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); 2205 2206 hw->phy.ops.identify(hw); 2207 2208 switch (hw->phy.type) { 2209 case ixgbe_phy_tn: 2210 case ixgbe_phy_cu_unknown: 2211 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 2212 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 2213 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 2214 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 2215 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 2216 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 2217 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 2218 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 2219 goto out; 2220 default: 2221 break; 2222 } 2223 2224 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 2225 case IXGBE_AUTOC_LMS_1G_AN: 2226 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 2227 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { 2228 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | 2229 IXGBE_PHYSICAL_LAYER_1000BASE_BX; 2230 goto out; 2231 } else 2232 /* SFI mode so read SFP module */ 2233 goto sfp_check; 2234 break; 2235 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 2236 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) 2237 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 2238 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) 2239 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2240 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) 2241 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; 2242 goto out; 2243 break; 2244 case IXGBE_AUTOC_LMS_10G_SERIAL: 2245 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { 2246 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2247 goto out; 2248 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) 2249 goto sfp_check; 2250 break; 2251 case IXGBE_AUTOC_LMS_KX4_KX_KR: 2252 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 2253 if (autoc & IXGBE_AUTOC_KX_SUPP) 2254 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 2255 if (autoc & IXGBE_AUTOC_KX4_SUPP) 2256 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2257 if (autoc & IXGBE_AUTOC_KR_SUPP) 2258 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2259 goto out; 2260 break; 2261 default: 2262 goto out; 2263 break; 2264 } 2265 2266 sfp_check: 2267 /* SFP check must be done last since DA modules are sometimes used to 2268 * test KR mode - we need to id KR mode correctly before SFP module. 2269 * Call identify_sfp because the pluggable module may have changed */ 2270 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); 2271 out: 2272 return physical_layer; 2273 } 2274 2275 /** 2276 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 2277 * @hw: pointer to hardware structure 2278 * @regval: register value to write to RXCTRL 2279 * 2280 * Enables the Rx DMA unit for 82599 2281 **/ 2282 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 2283 { 2284 2285 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); 2286 2287 /* 2288 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2289 * If traffic is incoming before we enable the Rx unit, it could hang 2290 * the Rx DMA unit. Therefore, make sure the security engine is 2291 * completely disabled prior to enabling the Rx unit. 2292 */ 2293 2294 hw->mac.ops.disable_sec_rx_path(hw); 2295 2296 if (regval & IXGBE_RXCTRL_RXEN) 2297 ixgbe_enable_rx(hw); 2298 else 2299 ixgbe_disable_rx(hw); 2300 2301 hw->mac.ops.enable_sec_rx_path(hw); 2302 2303 return IXGBE_SUCCESS; 2304 } 2305 2306 /** 2307 * ixgbe_verify_fw_version_82599 - verify FW version for 82599 2308 * @hw: pointer to hardware structure 2309 * 2310 * Verifies that installed the firmware version is 0.6 or higher 2311 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. 2312 * 2313 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or 2314 * if the FW version is not supported. 2315 **/ 2316 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) 2317 { 2318 s32 status = IXGBE_ERR_EEPROM_VERSION; 2319 u16 fw_offset, fw_ptp_cfg_offset; 2320 u16 fw_version; 2321 2322 DEBUGFUNC("ixgbe_verify_fw_version_82599"); 2323 2324 /* firmware check is only necessary for SFI devices */ 2325 if (hw->phy.media_type != ixgbe_media_type_fiber) { 2326 status = IXGBE_SUCCESS; 2327 goto fw_version_out; 2328 } 2329 2330 /* get the offset to the Firmware Module block */ 2331 if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) { 2332 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 2333 "eeprom read at offset %d failed", IXGBE_FW_PTR); 2334 return IXGBE_ERR_EEPROM_VERSION; 2335 } 2336 2337 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2338 goto fw_version_out; 2339 2340 /* get the offset to the Pass Through Patch Configuration block */ 2341 if (hw->eeprom.ops.read(hw, (fw_offset + 2342 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), 2343 &fw_ptp_cfg_offset)) { 2344 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 2345 "eeprom read at offset %d failed", 2346 fw_offset + 2347 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR); 2348 return IXGBE_ERR_EEPROM_VERSION; 2349 } 2350 2351 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2352 goto fw_version_out; 2353 2354 /* get the firmware version */ 2355 if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + 2356 IXGBE_FW_PATCH_VERSION_4), &fw_version)) { 2357 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 2358 "eeprom read at offset %d failed", 2359 fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4); 2360 return IXGBE_ERR_EEPROM_VERSION; 2361 } 2362 2363 if (fw_version > 0x5) 2364 status = IXGBE_SUCCESS; 2365 2366 fw_version_out: 2367 return status; 2368 } 2369 2370 /** 2371 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. 2372 * @hw: pointer to hardware structure 2373 * 2374 * Returns true if the LESM FW module is present and enabled. Otherwise 2375 * returns false. Smart Speed must be disabled if LESM FW module is enabled. 2376 **/ 2377 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2378 { 2379 bool lesm_enabled = false; 2380 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2381 s32 status; 2382 2383 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); 2384 2385 /* get the offset to the Firmware Module block */ 2386 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2387 2388 if ((status != IXGBE_SUCCESS) || 2389 (fw_offset == 0) || (fw_offset == 0xFFFF)) 2390 goto out; 2391 2392 /* get the offset to the LESM Parameters block */ 2393 status = hw->eeprom.ops.read(hw, (fw_offset + 2394 IXGBE_FW_LESM_PARAMETERS_PTR), 2395 &fw_lesm_param_offset); 2396 2397 if ((status != IXGBE_SUCCESS) || 2398 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) 2399 goto out; 2400 2401 /* get the LESM state word */ 2402 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + 2403 IXGBE_FW_LESM_STATE_1), 2404 &fw_lesm_state); 2405 2406 if ((status == IXGBE_SUCCESS) && 2407 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) 2408 lesm_enabled = true; 2409 2410 out: 2411 return lesm_enabled; 2412 } 2413 2414 /** 2415 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using 2416 * fastest available method 2417 * 2418 * @hw: pointer to hardware structure 2419 * @offset: offset of word in EEPROM to read 2420 * @words: number of words 2421 * @data: word(s) read from the EEPROM 2422 * 2423 * Retrieves 16 bit word(s) read from EEPROM 2424 **/ 2425 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 2426 u16 words, u16 *data) 2427 { 2428 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2429 s32 ret_val = IXGBE_ERR_CONFIG; 2430 2431 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); 2432 2433 /* 2434 * If EEPROM is detected and can be addressed using 14 bits, 2435 * use EERD otherwise use bit bang 2436 */ 2437 if ((eeprom->type == ixgbe_eeprom_spi) && 2438 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) 2439 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, 2440 data); 2441 else 2442 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, 2443 words, 2444 data); 2445 2446 return ret_val; 2447 } 2448 2449 /** 2450 * ixgbe_read_eeprom_82599 - Read EEPROM word using 2451 * fastest available method 2452 * 2453 * @hw: pointer to hardware structure 2454 * @offset: offset of word in the EEPROM to read 2455 * @data: word read from the EEPROM 2456 * 2457 * Reads a 16 bit word from the EEPROM 2458 **/ 2459 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 2460 u16 offset, u16 *data) 2461 { 2462 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2463 s32 ret_val = IXGBE_ERR_CONFIG; 2464 2465 DEBUGFUNC("ixgbe_read_eeprom_82599"); 2466 2467 /* 2468 * If EEPROM is detected and can be addressed using 14 bits, 2469 * use EERD otherwise use bit bang 2470 */ 2471 if ((eeprom->type == ixgbe_eeprom_spi) && 2472 (offset <= IXGBE_EERD_MAX_ADDR)) 2473 ret_val = ixgbe_read_eerd_generic(hw, offset, data); 2474 else 2475 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); 2476 2477 return ret_val; 2478 } 2479 2480 /** 2481 * ixgbe_reset_pipeline_82599 - perform pipeline reset 2482 * 2483 * @hw: pointer to hardware structure 2484 * 2485 * Reset pipeline by asserting Restart_AN together with LMS change to ensure 2486 * full pipeline reset. This function assumes the SW/FW lock is held. 2487 **/ 2488 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) 2489 { 2490 s32 ret_val; 2491 u32 anlp1_reg = 0; 2492 u32 i, autoc_reg, autoc2_reg; 2493 2494 /* Enable link if disabled in NVM */ 2495 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 2496 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) { 2497 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; 2498 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); 2499 IXGBE_WRITE_FLUSH(hw); 2500 } 2501 2502 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2503 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2504 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ 2505 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, 2506 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); 2507 /* Wait for AN to leave state 0 */ 2508 for (i = 0; i < 10; i++) { 2509 msec_delay(4); 2510 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2511 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) 2512 break; 2513 } 2514 2515 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { 2516 DEBUGOUT("auto negotiation not completed\n"); 2517 ret_val = IXGBE_ERR_RESET_FAILED; 2518 goto reset_pipeline_out; 2519 } 2520 2521 ret_val = IXGBE_SUCCESS; 2522 2523 reset_pipeline_out: 2524 /* Write AUTOC register with original LMS field and Restart_AN */ 2525 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2526 IXGBE_WRITE_FLUSH(hw); 2527 2528 return ret_val; 2529 } 2530 2531 /** 2532 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C 2533 * @hw: pointer to hardware structure 2534 * @byte_offset: byte offset to read 2535 * @dev_addr: address to read from 2536 * @data: value read 2537 * 2538 * Performs byte read operation to SFP module's EEPROM over I2C interface at 2539 * a specified device address. 2540 **/ 2541 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 2542 u8 dev_addr, u8 *data) 2543 { 2544 u32 esdp; 2545 s32 status; 2546 s32 timeout = 200; 2547 2548 DEBUGFUNC("ixgbe_read_i2c_byte_82599"); 2549 2550 if (hw->phy.qsfp_shared_i2c_bus == true) { 2551 /* Acquire I2C bus ownership. */ 2552 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2553 esdp |= IXGBE_ESDP_SDP0; 2554 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2555 IXGBE_WRITE_FLUSH(hw); 2556 2557 while (timeout) { 2558 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2559 if (esdp & IXGBE_ESDP_SDP1) 2560 break; 2561 2562 msec_delay(5); 2563 timeout--; 2564 } 2565 2566 if (!timeout) { 2567 DEBUGOUT("Driver can't access resource," 2568 " acquiring I2C bus timeout.\n"); 2569 status = IXGBE_ERR_I2C; 2570 goto release_i2c_access; 2571 } 2572 } 2573 2574 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); 2575 2576 release_i2c_access: 2577 2578 if (hw->phy.qsfp_shared_i2c_bus == true) { 2579 /* Release I2C bus ownership. */ 2580 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2581 esdp &= ~IXGBE_ESDP_SDP0; 2582 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2583 IXGBE_WRITE_FLUSH(hw); 2584 } 2585 2586 return status; 2587 } 2588 2589 /** 2590 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C 2591 * @hw: pointer to hardware structure 2592 * @byte_offset: byte offset to write 2593 * @dev_addr: address to read from 2594 * @data: value to write 2595 * 2596 * Performs byte write operation to SFP module's EEPROM over I2C interface at 2597 * a specified device address. 2598 **/ 2599 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 2600 u8 dev_addr, u8 data) 2601 { 2602 u32 esdp; 2603 s32 status; 2604 s32 timeout = 200; 2605 2606 DEBUGFUNC("ixgbe_write_i2c_byte_82599"); 2607 2608 if (hw->phy.qsfp_shared_i2c_bus == true) { 2609 /* Acquire I2C bus ownership. */ 2610 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2611 esdp |= IXGBE_ESDP_SDP0; 2612 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2613 IXGBE_WRITE_FLUSH(hw); 2614 2615 while (timeout) { 2616 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2617 if (esdp & IXGBE_ESDP_SDP1) 2618 break; 2619 2620 msec_delay(5); 2621 timeout--; 2622 } 2623 2624 if (!timeout) { 2625 DEBUGOUT("Driver can't access resource," 2626 " acquiring I2C bus timeout.\n"); 2627 status = IXGBE_ERR_I2C; 2628 goto release_i2c_access; 2629 } 2630 } 2631 2632 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); 2633 2634 release_i2c_access: 2635 2636 if (hw->phy.qsfp_shared_i2c_bus == true) { 2637 /* Release I2C bus ownership. */ 2638 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2639 esdp &= ~IXGBE_ESDP_SDP0; 2640 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2641 IXGBE_WRITE_FLUSH(hw); 2642 } 2643 2644 return status; 2645 } 2646