1 /****************************************************************************** 2 3 Copyright (c) 2001-2011, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 /* 36 * 82575EB Gigabit Network Connection 37 * 82575EB Gigabit Backplane Connection 38 * 82575GB Gigabit Network Connection 39 * 82576 Gigabit Network Connection 40 * 82576 Quad Port Gigabit Mezzanine Adapter 41 */ 42 43 #include "e1000_api.h" 44 45 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); 46 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); 47 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); 48 static void e1000_release_phy_82575(struct e1000_hw *hw); 49 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); 50 static void e1000_release_nvm_82575(struct e1000_hw *hw); 51 static s32 e1000_check_for_link_82575(struct e1000_hw *hw); 52 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); 53 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, 54 u16 *duplex); 55 static s32 e1000_init_hw_82575(struct e1000_hw *hw); 56 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); 57 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 58 u16 *data); 59 static s32 e1000_reset_hw_82575(struct e1000_hw *hw); 60 static s32 e1000_reset_hw_82580(struct e1000_hw *hw); 61 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, 62 u32 offset, u16 *data); 63 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, 64 u32 offset, u16 data); 65 static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, 66 bool active); 67 static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, 68 bool active); 69 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, 70 bool active); 71 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); 72 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); 73 static s32 e1000_get_media_type_82575(struct e1000_hw *hw); 74 static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); 75 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); 76 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, 77 u32 offset, u16 data); 78 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); 79 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); 80 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, 81 u16 *speed, u16 *duplex); 82 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); 83 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); 84 static bool e1000_sgmii_active_82575(struct e1000_hw *hw); 85 static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); 86 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); 87 static void e1000_config_collision_dist_82575(struct e1000_hw *hw); 88 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); 89 static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); 90 static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); 91 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); 92 static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); 93 static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); 94 static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); 95 static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, 96 u16 offset); 97 static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, 98 u16 offset); 99 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); 100 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); 101 static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); 102 static void e1000_clear_vfta_i350(struct e1000_hw *hw); 103 104 static void e1000_i2c_start(struct e1000_hw *hw); 105 static void e1000_i2c_stop(struct e1000_hw *hw); 106 static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); 107 static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); 108 static s32 e1000_get_i2c_ack(struct e1000_hw *hw); 109 static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); 110 static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); 111 static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); 112 static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); 113 static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); 114 static bool e1000_get_i2c_data(u32 *i2cctl); 115 116 static const u16 e1000_82580_rxpbs_table[] = { 117 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; 118 #define E1000_82580_RXPBS_TABLE_SIZE \ 119 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) 120 121 122 /** 123 * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 124 * @hw: pointer to the HW structure 125 * 126 * Called to determine if the I2C pins are being used for I2C or as an 127 * external MDIO interface since the two options are mutually exclusive. 128 **/ 129 static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) 130 { 131 u32 reg = 0; 132 bool ext_mdio = FALSE; 133 134 DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); 135 136 switch (hw->mac.type) { 137 case e1000_82575: 138 case e1000_82576: 139 reg = E1000_READ_REG(hw, E1000_MDIC); 140 ext_mdio = !!(reg & E1000_MDIC_DEST); 141 break; 142 case e1000_82580: 143 case e1000_i350: 144 reg = E1000_READ_REG(hw, E1000_MDICNFG); 145 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); 146 break; 147 default: 148 break; 149 } 150 return ext_mdio; 151 } 152 153 /** 154 * e1000_init_phy_params_82575 - Init PHY func ptrs. 155 * @hw: pointer to the HW structure 156 **/ 157 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) 158 { 159 struct e1000_phy_info *phy = &hw->phy; 160 s32 ret_val = E1000_SUCCESS; 161 u32 ctrl_ext; 162 163 DEBUGFUNC("e1000_init_phy_params_82575"); 164 165 if (hw->phy.media_type != e1000_media_type_copper) { 166 phy->type = e1000_phy_none; 167 goto out; 168 } 169 170 phy->ops.power_up = e1000_power_up_phy_copper; 171 phy->ops.power_down = e1000_power_down_phy_copper_82575; 172 173 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 174 phy->reset_delay_us = 100; 175 176 phy->ops.acquire = e1000_acquire_phy_82575; 177 phy->ops.check_reset_block = e1000_check_reset_block_generic; 178 phy->ops.commit = e1000_phy_sw_reset_generic; 179 phy->ops.get_cfg_done = e1000_get_cfg_done_82575; 180 phy->ops.release = e1000_release_phy_82575; 181 182 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 183 184 if (e1000_sgmii_active_82575(hw)) { 185 phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; 186 ctrl_ext |= E1000_CTRL_I2C_ENA; 187 } else { 188 phy->ops.reset = e1000_phy_hw_reset_generic; 189 ctrl_ext &= ~E1000_CTRL_I2C_ENA; 190 } 191 192 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 193 e1000_reset_mdicnfg_82580(hw); 194 195 if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { 196 phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; 197 phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; 198 } else if (hw->mac.type >= e1000_82580) { 199 phy->ops.read_reg = e1000_read_phy_reg_82580; 200 phy->ops.write_reg = e1000_write_phy_reg_82580; 201 } else { 202 phy->ops.read_reg = e1000_read_phy_reg_igp; 203 phy->ops.write_reg = e1000_write_phy_reg_igp; 204 } 205 206 /* Set phy->phy_addr and phy->id. */ 207 ret_val = e1000_get_phy_id_82575(hw); 208 209 /* Verify phy id and set remaining function pointers */ 210 switch (phy->id) { 211 case I347AT4_E_PHY_ID: 212 case M88E1112_E_PHY_ID: 213 case M88E1340M_E_PHY_ID: 214 case M88E1111_I_PHY_ID: 215 phy->type = e1000_phy_m88; 216 phy->ops.check_polarity = e1000_check_polarity_m88; 217 phy->ops.get_info = e1000_get_phy_info_m88; 218 if (phy->id == I347AT4_E_PHY_ID || 219 phy->id == M88E1112_E_PHY_ID || 220 phy->id == M88E1340M_E_PHY_ID) 221 phy->ops.get_cable_length = 222 e1000_get_cable_length_m88_gen2; 223 else 224 phy->ops.get_cable_length = e1000_get_cable_length_m88; 225 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 226 break; 227 case IGP03E1000_E_PHY_ID: 228 case IGP04E1000_E_PHY_ID: 229 phy->type = e1000_phy_igp_3; 230 phy->ops.check_polarity = e1000_check_polarity_igp; 231 phy->ops.get_info = e1000_get_phy_info_igp; 232 phy->ops.get_cable_length = e1000_get_cable_length_igp_2; 233 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; 234 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; 235 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; 236 break; 237 case I82580_I_PHY_ID: 238 case I350_I_PHY_ID: 239 phy->type = e1000_phy_82580; 240 phy->ops.check_polarity = e1000_check_polarity_82577; 241 phy->ops.force_speed_duplex = 242 e1000_phy_force_speed_duplex_82577; 243 phy->ops.get_cable_length = e1000_get_cable_length_82577; 244 phy->ops.get_info = e1000_get_phy_info_82577; 245 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; 246 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; 247 break; 248 default: 249 ret_val = -E1000_ERR_PHY; 250 goto out; 251 } 252 253 out: 254 return ret_val; 255 } 256 257 /** 258 * e1000_init_nvm_params_82575 - Init NVM func ptrs. 259 * @hw: pointer to the HW structure 260 **/ 261 s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) 262 { 263 struct e1000_nvm_info *nvm = &hw->nvm; 264 u32 eecd = E1000_READ_REG(hw, E1000_EECD); 265 u16 size; 266 267 DEBUGFUNC("e1000_init_nvm_params_82575"); 268 269 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 270 E1000_EECD_SIZE_EX_SHIFT); 271 /* 272 * Added to a constant, "size" becomes the left-shift value 273 * for setting word_size. 274 */ 275 size += NVM_WORD_SIZE_BASE_SHIFT; 276 277 /* Just in case size is out of range, cap it to the largest 278 * EEPROM size supported 279 */ 280 if (size > 15) 281 size = 15; 282 283 nvm->word_size = 1 << size; 284 nvm->opcode_bits = 8; 285 nvm->delay_usec = 1; 286 switch (nvm->override) { 287 case e1000_nvm_override_spi_large: 288 nvm->page_size = 32; 289 nvm->address_bits = 16; 290 break; 291 case e1000_nvm_override_spi_small: 292 nvm->page_size = 8; 293 nvm->address_bits = 8; 294 break; 295 default: 296 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 297 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; 298 break; 299 } 300 301 nvm->type = e1000_nvm_eeprom_spi; 302 303 if (nvm->word_size == (1 << 15)) 304 nvm->page_size = 128; 305 306 /* Function Pointers */ 307 nvm->ops.acquire = e1000_acquire_nvm_82575; 308 nvm->ops.release = e1000_release_nvm_82575; 309 if (nvm->word_size < (1 << 15)) 310 nvm->ops.read = e1000_read_nvm_eerd; 311 else 312 nvm->ops.read = e1000_read_nvm_spi; 313 314 nvm->ops.write = e1000_write_nvm_spi; 315 nvm->ops.validate = e1000_validate_nvm_checksum_generic; 316 nvm->ops.update = e1000_update_nvm_checksum_generic; 317 nvm->ops.valid_led_default = e1000_valid_led_default_82575; 318 319 /* override genric family function pointers for specific descendants */ 320 switch (hw->mac.type) { 321 case e1000_82580: 322 nvm->ops.validate = e1000_validate_nvm_checksum_82580; 323 nvm->ops.update = e1000_update_nvm_checksum_82580; 324 break; 325 case e1000_i350: 326 nvm->ops.validate = e1000_validate_nvm_checksum_i350; 327 nvm->ops.update = e1000_update_nvm_checksum_i350; 328 break; 329 default: 330 break; 331 } 332 333 return E1000_SUCCESS; 334 } 335 336 /** 337 * e1000_init_mac_params_82575 - Init MAC func ptrs. 338 * @hw: pointer to the HW structure 339 **/ 340 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) 341 { 342 struct e1000_mac_info *mac = &hw->mac; 343 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 344 345 DEBUGFUNC("e1000_init_mac_params_82575"); 346 347 /* Derives media type */ 348 e1000_get_media_type_82575(hw); 349 /* Set mta register count */ 350 mac->mta_reg_count = 128; 351 /* Set uta register count */ 352 mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; 353 /* Set rar entry count */ 354 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 355 if (mac->type == e1000_82576) 356 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 357 if (mac->type == e1000_82580) 358 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 359 if (mac->type == e1000_i350) { 360 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 361 /* Enable EEE default settings for i350 */ 362 dev_spec->eee_disable = FALSE; 363 } 364 365 /* Set if part includes ASF firmware */ 366 mac->asf_firmware_present = TRUE; 367 /* FWSM register */ 368 mac->has_fwsm = TRUE; 369 /* ARC supported; valid only if manageability features are enabled. */ 370 mac->arc_subsystem_valid = 371 (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK) 372 ? TRUE : FALSE; 373 374 /* Function pointers */ 375 376 /* bus type/speed/width */ 377 mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; 378 /* reset */ 379 if (mac->type >= e1000_82580) 380 mac->ops.reset_hw = e1000_reset_hw_82580; 381 else 382 mac->ops.reset_hw = e1000_reset_hw_82575; 383 /* hw initialization */ 384 mac->ops.init_hw = e1000_init_hw_82575; 385 /* link setup */ 386 mac->ops.setup_link = e1000_setup_link_generic; 387 /* physical interface link setup */ 388 mac->ops.setup_physical_interface = 389 (hw->phy.media_type == e1000_media_type_copper) 390 ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; 391 /* physical interface shutdown */ 392 mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; 393 /* physical interface power up */ 394 mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; 395 /* check for link */ 396 mac->ops.check_for_link = e1000_check_for_link_82575; 397 /* receive address register setting */ 398 mac->ops.rar_set = e1000_rar_set_generic; 399 /* read mac address */ 400 mac->ops.read_mac_addr = e1000_read_mac_addr_82575; 401 /* configure collision distance */ 402 mac->ops.config_collision_dist = e1000_config_collision_dist_82575; 403 /* multicast address update */ 404 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; 405 if (hw->mac.type == e1000_i350) { 406 /* writing VFTA */ 407 mac->ops.write_vfta = e1000_write_vfta_i350; 408 /* clearing VFTA */ 409 mac->ops.clear_vfta = e1000_clear_vfta_i350; 410 } else { 411 /* writing VFTA */ 412 mac->ops.write_vfta = e1000_write_vfta_generic; 413 /* clearing VFTA */ 414 mac->ops.clear_vfta = e1000_clear_vfta_generic; 415 } 416 /* ID LED init */ 417 mac->ops.id_led_init = e1000_id_led_init_generic; 418 /* blink LED */ 419 mac->ops.blink_led = e1000_blink_led_generic; 420 /* setup LED */ 421 mac->ops.setup_led = e1000_setup_led_generic; 422 /* cleanup LED */ 423 mac->ops.cleanup_led = e1000_cleanup_led_generic; 424 /* turn on/off LED */ 425 mac->ops.led_on = e1000_led_on_generic; 426 mac->ops.led_off = e1000_led_off_generic; 427 /* clear hardware counters */ 428 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; 429 /* link info */ 430 mac->ops.get_link_up_info = e1000_get_link_up_info_82575; 431 432 /* set lan id for port to determine which phy lock to use */ 433 hw->mac.ops.set_lan_id(hw); 434 435 return E1000_SUCCESS; 436 } 437 438 /** 439 * e1000_init_function_pointers_82575 - Init func ptrs. 440 * @hw: pointer to the HW structure 441 * 442 * Called to initialize all function pointers and parameters. 443 **/ 444 void e1000_init_function_pointers_82575(struct e1000_hw *hw) 445 { 446 DEBUGFUNC("e1000_init_function_pointers_82575"); 447 448 hw->mac.ops.init_params = e1000_init_mac_params_82575; 449 hw->nvm.ops.init_params = e1000_init_nvm_params_82575; 450 hw->phy.ops.init_params = e1000_init_phy_params_82575; 451 hw->mbx.ops.init_params = e1000_init_mbx_params_pf; 452 } 453 454 /** 455 * e1000_acquire_phy_82575 - Acquire rights to access PHY 456 * @hw: pointer to the HW structure 457 * 458 * Acquire access rights to the correct PHY. 459 **/ 460 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) 461 { 462 u16 mask = E1000_SWFW_PHY0_SM; 463 464 DEBUGFUNC("e1000_acquire_phy_82575"); 465 466 if (hw->bus.func == E1000_FUNC_1) 467 mask = E1000_SWFW_PHY1_SM; 468 else if (hw->bus.func == E1000_FUNC_2) 469 mask = E1000_SWFW_PHY2_SM; 470 else if (hw->bus.func == E1000_FUNC_3) 471 mask = E1000_SWFW_PHY3_SM; 472 473 return e1000_acquire_swfw_sync_82575(hw, mask); 474 } 475 476 /** 477 * e1000_release_phy_82575 - Release rights to access PHY 478 * @hw: pointer to the HW structure 479 * 480 * A wrapper to release access rights to the correct PHY. 481 **/ 482 static void e1000_release_phy_82575(struct e1000_hw *hw) 483 { 484 u16 mask = E1000_SWFW_PHY0_SM; 485 486 DEBUGFUNC("e1000_release_phy_82575"); 487 488 if (hw->bus.func == E1000_FUNC_1) 489 mask = E1000_SWFW_PHY1_SM; 490 else if (hw->bus.func == E1000_FUNC_2) 491 mask = E1000_SWFW_PHY2_SM; 492 else if (hw->bus.func == E1000_FUNC_3) 493 mask = E1000_SWFW_PHY3_SM; 494 495 e1000_release_swfw_sync_82575(hw, mask); 496 } 497 498 /** 499 * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii 500 * @hw: pointer to the HW structure 501 * @offset: register offset to be read 502 * @data: pointer to the read data 503 * 504 * Reads the PHY register at offset using the serial gigabit media independent 505 * interface and stores the retrieved information in data. 506 **/ 507 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 508 u16 *data) 509 { 510 s32 ret_val = -E1000_ERR_PARAM; 511 512 DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); 513 514 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 515 DEBUGOUT1("PHY Address %u is out of range\n", offset); 516 goto out; 517 } 518 519 ret_val = hw->phy.ops.acquire(hw); 520 if (ret_val) 521 goto out; 522 523 ret_val = e1000_read_phy_reg_i2c(hw, offset, data); 524 525 hw->phy.ops.release(hw); 526 527 out: 528 return ret_val; 529 } 530 531 /** 532 * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii 533 * @hw: pointer to the HW structure 534 * @offset: register offset to write to 535 * @data: data to write at register offset 536 * 537 * Writes the data to PHY register at the offset using the serial gigabit 538 * media independent interface. 539 **/ 540 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 541 u16 data) 542 { 543 s32 ret_val = -E1000_ERR_PARAM; 544 545 DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); 546 547 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 548 DEBUGOUT1("PHY Address %d is out of range\n", offset); 549 goto out; 550 } 551 552 ret_val = hw->phy.ops.acquire(hw); 553 if (ret_val) 554 goto out; 555 556 ret_val = e1000_write_phy_reg_i2c(hw, offset, data); 557 558 hw->phy.ops.release(hw); 559 560 out: 561 return ret_val; 562 } 563 564 /** 565 * e1000_get_phy_id_82575 - Retrieve PHY addr and id 566 * @hw: pointer to the HW structure 567 * 568 * Retrieves the PHY address and ID for both PHY's which do and do not use 569 * sgmi interface. 570 **/ 571 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) 572 { 573 struct e1000_phy_info *phy = &hw->phy; 574 s32 ret_val = E1000_SUCCESS; 575 u16 phy_id; 576 u32 ctrl_ext; 577 u32 mdic; 578 579 DEBUGFUNC("e1000_get_phy_id_82575"); 580 581 /* 582 * For SGMII PHYs, we try the list of possible addresses until 583 * we find one that works. For non-SGMII PHYs 584 * (e.g. integrated copper PHYs), an address of 1 should 585 * work. The result of this function should mean phy->phy_addr 586 * and phy->id are set correctly. 587 */ 588 if (!e1000_sgmii_active_82575(hw)) { 589 phy->addr = 1; 590 ret_val = e1000_get_phy_id(hw); 591 goto out; 592 } 593 594 if (e1000_sgmii_uses_mdio_82575(hw)) { 595 switch (hw->mac.type) { 596 case e1000_82575: 597 case e1000_82576: 598 mdic = E1000_READ_REG(hw, E1000_MDIC); 599 mdic &= E1000_MDIC_PHY_MASK; 600 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; 601 break; 602 case e1000_82580: 603 case e1000_i350: 604 mdic = E1000_READ_REG(hw, E1000_MDICNFG); 605 mdic &= E1000_MDICNFG_PHY_MASK; 606 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; 607 break; 608 default: 609 ret_val = -E1000_ERR_PHY; 610 goto out; 611 break; 612 } 613 ret_val = e1000_get_phy_id(hw); 614 goto out; 615 } 616 617 /* Power on sgmii phy if it is disabled */ 618 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 619 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 620 ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 621 E1000_WRITE_FLUSH(hw); 622 msec_delay(300); 623 624 /* 625 * The address field in the I2CCMD register is 3 bits and 0 is invalid. 626 * Therefore, we need to test 1-7 627 */ 628 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 629 ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 630 if (ret_val == E1000_SUCCESS) { 631 DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", 632 phy_id, phy->addr); 633 /* 634 * At the time of this writing, The M88 part is 635 * the only supported SGMII PHY product. 636 */ 637 if (phy_id == M88_VENDOR) 638 break; 639 } else { 640 DEBUGOUT1("PHY address %u was unreadable\n", 641 phy->addr); 642 } 643 } 644 645 /* A valid PHY type couldn't be found. */ 646 if (phy->addr == 8) { 647 phy->addr = 0; 648 ret_val = -E1000_ERR_PHY; 649 } else { 650 ret_val = e1000_get_phy_id(hw); 651 } 652 653 /* restore previous sfp cage power state */ 654 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 655 656 out: 657 return ret_val; 658 } 659 660 /** 661 * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset 662 * @hw: pointer to the HW structure 663 * 664 * Resets the PHY using the serial gigabit media independent interface. 665 **/ 666 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) 667 { 668 s32 ret_val = E1000_SUCCESS; 669 670 DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); 671 672 /* 673 * This isn't a TRUE "hard" reset, but is the only reset 674 * available to us at this time. 675 */ 676 677 DEBUGOUT("Soft resetting SGMII attached PHY...\n"); 678 679 if (!(hw->phy.ops.write_reg)) 680 goto out; 681 682 /* 683 * SFP documentation requires the following to configure the SPF module 684 * to work on SGMII. No further documentation is given. 685 */ 686 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 687 if (ret_val) 688 goto out; 689 690 ret_val = hw->phy.ops.commit(hw); 691 692 out: 693 return ret_val; 694 } 695 696 /** 697 * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state 698 * @hw: pointer to the HW structure 699 * @active: TRUE to enable LPLU, FALSE to disable 700 * 701 * Sets the LPLU D0 state according to the active flag. When 702 * activating LPLU this function also disables smart speed 703 * and vice versa. LPLU will not be activated unless the 704 * device autonegotiation advertisement meets standards of 705 * either 10 or 10/100 or 10/100/1000 at all duplexes. 706 * This is a function pointer entry point only called by 707 * PHY setup routines. 708 **/ 709 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) 710 { 711 struct e1000_phy_info *phy = &hw->phy; 712 s32 ret_val = E1000_SUCCESS; 713 u16 data; 714 715 DEBUGFUNC("e1000_set_d0_lplu_state_82575"); 716 717 if (!(hw->phy.ops.read_reg)) 718 goto out; 719 720 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 721 if (ret_val) 722 goto out; 723 724 if (active) { 725 data |= IGP02E1000_PM_D0_LPLU; 726 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 727 data); 728 if (ret_val) 729 goto out; 730 731 /* When LPLU is enabled, we should disable SmartSpeed */ 732 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 733 &data); 734 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 735 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 736 data); 737 if (ret_val) 738 goto out; 739 } else { 740 data &= ~IGP02E1000_PM_D0_LPLU; 741 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 742 data); 743 /* 744 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 745 * during Dx states where the power conservation is most 746 * important. During driver activity we should enable 747 * SmartSpeed, so performance is maintained. 748 */ 749 if (phy->smart_speed == e1000_smart_speed_on) { 750 ret_val = phy->ops.read_reg(hw, 751 IGP01E1000_PHY_PORT_CONFIG, 752 &data); 753 if (ret_val) 754 goto out; 755 756 data |= IGP01E1000_PSCFR_SMART_SPEED; 757 ret_val = phy->ops.write_reg(hw, 758 IGP01E1000_PHY_PORT_CONFIG, 759 data); 760 if (ret_val) 761 goto out; 762 } else if (phy->smart_speed == e1000_smart_speed_off) { 763 ret_val = phy->ops.read_reg(hw, 764 IGP01E1000_PHY_PORT_CONFIG, 765 &data); 766 if (ret_val) 767 goto out; 768 769 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 770 ret_val = phy->ops.write_reg(hw, 771 IGP01E1000_PHY_PORT_CONFIG, 772 data); 773 if (ret_val) 774 goto out; 775 } 776 } 777 778 out: 779 return ret_val; 780 } 781 782 /** 783 * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state 784 * @hw: pointer to the HW structure 785 * @active: TRUE to enable LPLU, FALSE to disable 786 * 787 * Sets the LPLU D0 state according to the active flag. When 788 * activating LPLU this function also disables smart speed 789 * and vice versa. LPLU will not be activated unless the 790 * device autonegotiation advertisement meets standards of 791 * either 10 or 10/100 or 10/100/1000 at all duplexes. 792 * This is a function pointer entry point only called by 793 * PHY setup routines. 794 **/ 795 static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) 796 { 797 struct e1000_phy_info *phy = &hw->phy; 798 s32 ret_val = E1000_SUCCESS; 799 u16 data; 800 801 DEBUGFUNC("e1000_set_d0_lplu_state_82580"); 802 803 data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 804 805 if (active) { 806 data |= E1000_82580_PM_D0_LPLU; 807 808 /* When LPLU is enabled, we should disable SmartSpeed */ 809 data &= ~E1000_82580_PM_SPD; 810 } else { 811 data &= ~E1000_82580_PM_D0_LPLU; 812 813 /* 814 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 815 * during Dx states where the power conservation is most 816 * important. During driver activity we should enable 817 * SmartSpeed, so performance is maintained. 818 */ 819 if (phy->smart_speed == e1000_smart_speed_on) 820 data |= E1000_82580_PM_SPD; 821 else if (phy->smart_speed == e1000_smart_speed_off) 822 data &= ~E1000_82580_PM_SPD; 823 } 824 825 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); 826 return ret_val; 827 } 828 829 /** 830 * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 831 * @hw: pointer to the HW structure 832 * @active: boolean used to enable/disable lplu 833 * 834 * Success returns 0, Failure returns 1 835 * 836 * The low power link up (lplu) state is set to the power management level D3 837 * and SmartSpeed is disabled when active is TRUE, else clear lplu for D3 838 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU 839 * is used during Dx states where the power conservation is most important. 840 * During driver activity, SmartSpeed should be enabled so performance is 841 * maintained. 842 **/ 843 s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 844 { 845 struct e1000_phy_info *phy = &hw->phy; 846 s32 ret_val = E1000_SUCCESS; 847 u16 data; 848 849 DEBUGFUNC("e1000_set_d3_lplu_state_82580"); 850 851 data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 852 853 if (!active) { 854 data &= ~E1000_82580_PM_D3_LPLU; 855 /* 856 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 857 * during Dx states where the power conservation is most 858 * important. During driver activity we should enable 859 * SmartSpeed, so performance is maintained. 860 */ 861 if (phy->smart_speed == e1000_smart_speed_on) 862 data |= E1000_82580_PM_SPD; 863 else if (phy->smart_speed == e1000_smart_speed_off) 864 data &= ~E1000_82580_PM_SPD; 865 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 866 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 867 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 868 data |= E1000_82580_PM_D3_LPLU; 869 /* When LPLU is enabled, we should disable SmartSpeed */ 870 data &= ~E1000_82580_PM_SPD; 871 } 872 873 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); 874 return ret_val; 875 } 876 877 /** 878 * e1000_acquire_nvm_82575 - Request for access to EEPROM 879 * @hw: pointer to the HW structure 880 * 881 * Acquire the necessary semaphores for exclusive access to the EEPROM. 882 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 883 * Return successful if access grant bit set, else clear the request for 884 * EEPROM access and return -E1000_ERR_NVM (-1). 885 **/ 886 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) 887 { 888 s32 ret_val; 889 890 DEBUGFUNC("e1000_acquire_nvm_82575"); 891 892 ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 893 if (ret_val) 894 goto out; 895 896 /* 897 * Check if there is some access 898 * error this access may hook on 899 */ 900 if (hw->mac.type == e1000_i350) { 901 u32 eecd = E1000_READ_REG(hw, E1000_EECD); 902 if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | 903 E1000_EECD_TIMEOUT)) { 904 /* Clear all access error flags */ 905 E1000_WRITE_REG(hw, E1000_EECD, eecd | 906 E1000_EECD_ERROR_CLR); 907 DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); 908 } 909 } 910 if (hw->mac.type == e1000_82580) { 911 u32 eecd = E1000_READ_REG(hw, E1000_EECD); 912 if (eecd & E1000_EECD_BLOCKED) { 913 /* Clear access error flag */ 914 E1000_WRITE_REG(hw, E1000_EECD, eecd | 915 E1000_EECD_BLOCKED); 916 DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); 917 } 918 } 919 920 921 switch (hw->mac.type) { 922 default: 923 ret_val = e1000_acquire_nvm_generic(hw); 924 } 925 926 if (ret_val) 927 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 928 929 out: 930 return ret_val; 931 } 932 933 /** 934 * e1000_release_nvm_82575 - Release exclusive access to EEPROM 935 * @hw: pointer to the HW structure 936 * 937 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 938 * then release the semaphores acquired. 939 **/ 940 static void e1000_release_nvm_82575(struct e1000_hw *hw) 941 { 942 DEBUGFUNC("e1000_release_nvm_82575"); 943 944 switch (hw->mac.type) { 945 default: 946 e1000_release_nvm_generic(hw); 947 } 948 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 949 } 950 951 /** 952 * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore 953 * @hw: pointer to the HW structure 954 * @mask: specifies which semaphore to acquire 955 * 956 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 957 * will also specify which port we're acquiring the lock for. 958 **/ 959 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 960 { 961 u32 swfw_sync; 962 u32 swmask = mask; 963 u32 fwmask = mask << 16; 964 s32 ret_val = E1000_SUCCESS; 965 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 966 967 DEBUGFUNC("e1000_acquire_swfw_sync_82575"); 968 969 while (i < timeout) { 970 if (e1000_get_hw_semaphore_generic(hw)) { 971 ret_val = -E1000_ERR_SWFW_SYNC; 972 goto out; 973 } 974 975 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); 976 if (!(swfw_sync & (fwmask | swmask))) 977 break; 978 979 /* 980 * Firmware currently using resource (fwmask) 981 * or other software thread using resource (swmask) 982 */ 983 e1000_put_hw_semaphore_generic(hw); 984 msec_delay_irq(5); 985 i++; 986 } 987 988 if (i == timeout) { 989 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); 990 ret_val = -E1000_ERR_SWFW_SYNC; 991 goto out; 992 } 993 994 swfw_sync |= swmask; 995 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); 996 997 e1000_put_hw_semaphore_generic(hw); 998 999 out: 1000 return ret_val; 1001 } 1002 1003 /** 1004 * e1000_release_swfw_sync_82575 - Release SW/FW semaphore 1005 * @hw: pointer to the HW structure 1006 * @mask: specifies which semaphore to acquire 1007 * 1008 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 1009 * will also specify which port we're releasing the lock for. 1010 **/ 1011 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1012 { 1013 u32 swfw_sync; 1014 1015 DEBUGFUNC("e1000_release_swfw_sync_82575"); 1016 1017 while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) 1018 ; /* Empty */ 1019 1020 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); 1021 swfw_sync &= ~mask; 1022 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); 1023 1024 e1000_put_hw_semaphore_generic(hw); 1025 } 1026 1027 /** 1028 * e1000_get_cfg_done_82575 - Read config done bit 1029 * @hw: pointer to the HW structure 1030 * 1031 * Read the management control register for the config done bit for 1032 * completion status. NOTE: silicon which is EEPROM-less will fail trying 1033 * to read the config done bit, so an error is *ONLY* logged and returns 1034 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon 1035 * would not be able to be reset or change link. 1036 **/ 1037 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) 1038 { 1039 s32 timeout = PHY_CFG_TIMEOUT; 1040 s32 ret_val = E1000_SUCCESS; 1041 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 1042 1043 DEBUGFUNC("e1000_get_cfg_done_82575"); 1044 1045 if (hw->bus.func == E1000_FUNC_1) 1046 mask = E1000_NVM_CFG_DONE_PORT_1; 1047 else if (hw->bus.func == E1000_FUNC_2) 1048 mask = E1000_NVM_CFG_DONE_PORT_2; 1049 else if (hw->bus.func == E1000_FUNC_3) 1050 mask = E1000_NVM_CFG_DONE_PORT_3; 1051 while (timeout) { 1052 if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) 1053 break; 1054 msec_delay(1); 1055 timeout--; 1056 } 1057 if (!timeout) 1058 DEBUGOUT("MNG configuration cycle has not completed.\n"); 1059 1060 /* If EEPROM is not marked present, init the PHY manually */ 1061 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) && 1062 (hw->phy.type == e1000_phy_igp_3)) 1063 e1000_phy_init_script_igp3(hw); 1064 1065 return ret_val; 1066 } 1067 1068 /** 1069 * e1000_get_link_up_info_82575 - Get link speed/duplex info 1070 * @hw: pointer to the HW structure 1071 * @speed: stores the current speed 1072 * @duplex: stores the current duplex 1073 * 1074 * This is a wrapper function, if using the serial gigabit media independent 1075 * interface, use PCS to retrieve the link speed and duplex information. 1076 * Otherwise, use the generic function to get the link speed and duplex info. 1077 **/ 1078 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, 1079 u16 *duplex) 1080 { 1081 s32 ret_val; 1082 1083 DEBUGFUNC("e1000_get_link_up_info_82575"); 1084 1085 if (hw->phy.media_type != e1000_media_type_copper) 1086 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, 1087 duplex); 1088 else 1089 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, 1090 duplex); 1091 1092 return ret_val; 1093 } 1094 1095 /** 1096 * e1000_check_for_link_82575 - Check for link 1097 * @hw: pointer to the HW structure 1098 * 1099 * If sgmii is enabled, then use the pcs register to determine link, otherwise 1100 * use the generic interface for determining link. 1101 **/ 1102 static s32 e1000_check_for_link_82575(struct e1000_hw *hw) 1103 { 1104 s32 ret_val; 1105 u16 speed, duplex; 1106 1107 DEBUGFUNC("e1000_check_for_link_82575"); 1108 1109 if (hw->phy.media_type != e1000_media_type_copper) { 1110 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, 1111 &duplex); 1112 /* 1113 * Use this flag to determine if link needs to be checked or 1114 * not. If we have link clear the flag so that we do not 1115 * continue to check for link. 1116 */ 1117 hw->mac.get_link_status = !hw->mac.serdes_has_link; 1118 } else { 1119 ret_val = e1000_check_for_copper_link_generic(hw); 1120 } 1121 1122 return ret_val; 1123 } 1124 1125 /** 1126 * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown 1127 * @hw: pointer to the HW structure 1128 **/ 1129 static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) 1130 { 1131 u32 reg; 1132 1133 DEBUGFUNC("e1000_power_up_serdes_link_82575"); 1134 1135 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1136 !e1000_sgmii_active_82575(hw)) 1137 return; 1138 1139 /* Enable PCS to turn on link */ 1140 reg = E1000_READ_REG(hw, E1000_PCS_CFG0); 1141 reg |= E1000_PCS_CFG_PCS_EN; 1142 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); 1143 1144 /* Power up the laser */ 1145 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 1146 reg &= ~E1000_CTRL_EXT_SDP3_DATA; 1147 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 1148 1149 /* flush the write to verify completion */ 1150 E1000_WRITE_FLUSH(hw); 1151 msec_delay(1); 1152 } 1153 1154 /** 1155 * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 1156 * @hw: pointer to the HW structure 1157 * @speed: stores the current speed 1158 * @duplex: stores the current duplex 1159 * 1160 * Using the physical coding sub-layer (PCS), retrieve the current speed and 1161 * duplex, then store the values in the pointers provided. 1162 **/ 1163 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, 1164 u16 *speed, u16 *duplex) 1165 { 1166 struct e1000_mac_info *mac = &hw->mac; 1167 u32 pcs; 1168 1169 DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); 1170 1171 /* Set up defaults for the return values of this function */ 1172 mac->serdes_has_link = FALSE; 1173 *speed = 0; 1174 *duplex = 0; 1175 1176 /* 1177 * Read the PCS Status register for link state. For non-copper mode, 1178 * the status register is not accurate. The PCS status register is 1179 * used instead. 1180 */ 1181 pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); 1182 1183 /* 1184 * The link up bit determines when link is up on autoneg. The sync ok 1185 * gets set once both sides sync up and agree upon link. Stable link 1186 * can be determined by checking for both link up and link sync ok 1187 */ 1188 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { 1189 mac->serdes_has_link = TRUE; 1190 1191 /* Detect and store PCS speed */ 1192 if (pcs & E1000_PCS_LSTS_SPEED_1000) 1193 *speed = SPEED_1000; 1194 else if (pcs & E1000_PCS_LSTS_SPEED_100) 1195 *speed = SPEED_100; 1196 else 1197 *speed = SPEED_10; 1198 1199 /* Detect and store PCS duplex */ 1200 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) 1201 *duplex = FULL_DUPLEX; 1202 else 1203 *duplex = HALF_DUPLEX; 1204 } 1205 1206 return E1000_SUCCESS; 1207 } 1208 1209 /** 1210 * e1000_shutdown_serdes_link_82575 - Remove link during power down 1211 * @hw: pointer to the HW structure 1212 * 1213 * In the case of serdes shut down sfp and PCS on driver unload 1214 * when management pass thru is not enabled. 1215 **/ 1216 void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) 1217 { 1218 u32 reg; 1219 1220 DEBUGFUNC("e1000_shutdown_serdes_link_82575"); 1221 1222 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1223 !e1000_sgmii_active_82575(hw)) 1224 return; 1225 1226 if (!e1000_enable_mng_pass_thru(hw)) { 1227 /* Disable PCS to turn off link */ 1228 reg = E1000_READ_REG(hw, E1000_PCS_CFG0); 1229 reg &= ~E1000_PCS_CFG_PCS_EN; 1230 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); 1231 1232 /* shutdown the laser */ 1233 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 1234 reg |= E1000_CTRL_EXT_SDP3_DATA; 1235 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 1236 1237 /* flush the write to verify completion */ 1238 E1000_WRITE_FLUSH(hw); 1239 msec_delay(1); 1240 } 1241 1242 return; 1243 } 1244 1245 /** 1246 * e1000_reset_hw_82575 - Reset hardware 1247 * @hw: pointer to the HW structure 1248 * 1249 * This resets the hardware into a known state. 1250 **/ 1251 static s32 e1000_reset_hw_82575(struct e1000_hw *hw) 1252 { 1253 u32 ctrl; 1254 s32 ret_val; 1255 1256 DEBUGFUNC("e1000_reset_hw_82575"); 1257 1258 /* 1259 * Prevent the PCI-E bus from sticking if there is no TLP connection 1260 * on the last TLP read/write transaction when MAC is reset. 1261 */ 1262 ret_val = e1000_disable_pcie_master_generic(hw); 1263 if (ret_val) 1264 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 1265 1266 /* set the completion timeout for interface */ 1267 ret_val = e1000_set_pcie_completion_timeout(hw); 1268 if (ret_val) 1269 DEBUGOUT("PCI-E Set completion timeout has failed.\n"); 1270 1271 DEBUGOUT("Masking off all interrupts\n"); 1272 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 1273 1274 E1000_WRITE_REG(hw, E1000_RCTL, 0); 1275 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); 1276 E1000_WRITE_FLUSH(hw); 1277 1278 msec_delay(10); 1279 1280 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1281 1282 DEBUGOUT("Issuing a global reset to MAC\n"); 1283 E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); 1284 1285 ret_val = e1000_get_auto_rd_done_generic(hw); 1286 if (ret_val) { 1287 /* 1288 * When auto config read does not complete, do not 1289 * return with an error. This can happen in situations 1290 * where there is no eeprom and prevents getting link. 1291 */ 1292 DEBUGOUT("Auto Read Done did not complete\n"); 1293 } 1294 1295 /* If EEPROM is not present, run manual init scripts */ 1296 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) 1297 e1000_reset_init_script_82575(hw); 1298 1299 /* Clear any pending interrupt events. */ 1300 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 1301 E1000_READ_REG(hw, E1000_ICR); 1302 1303 /* Install any alternate MAC address into RAR0 */ 1304 ret_val = e1000_check_alt_mac_addr_generic(hw); 1305 1306 return ret_val; 1307 } 1308 1309 /** 1310 * e1000_init_hw_82575 - Initialize hardware 1311 * @hw: pointer to the HW structure 1312 * 1313 * This inits the hardware readying it for operation. 1314 **/ 1315 static s32 e1000_init_hw_82575(struct e1000_hw *hw) 1316 { 1317 struct e1000_mac_info *mac = &hw->mac; 1318 s32 ret_val; 1319 u16 i, rar_count = mac->rar_entry_count; 1320 1321 DEBUGFUNC("e1000_init_hw_82575"); 1322 1323 /* Initialize identification LED */ 1324 ret_val = mac->ops.id_led_init(hw); 1325 if (ret_val) { 1326 DEBUGOUT("Error initializing identification LED\n"); 1327 /* This is not fatal and we should not stop init due to this */ 1328 } 1329 1330 /* Disabling VLAN filtering */ 1331 DEBUGOUT("Initializing the IEEE VLAN\n"); 1332 mac->ops.clear_vfta(hw); 1333 1334 /* Setup the receive address */ 1335 e1000_init_rx_addrs_generic(hw, rar_count); 1336 1337 /* Zero out the Multicast HASH table */ 1338 DEBUGOUT("Zeroing the MTA\n"); 1339 for (i = 0; i < mac->mta_reg_count; i++) 1340 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 1341 1342 /* Zero out the Unicast HASH table */ 1343 DEBUGOUT("Zeroing the UTA\n"); 1344 for (i = 0; i < mac->uta_reg_count; i++) 1345 E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); 1346 1347 /* Setup link and flow control */ 1348 ret_val = mac->ops.setup_link(hw); 1349 1350 /* Set the default MTU size */ 1351 hw->dev_spec._82575.mtu = 1500; 1352 1353 /* 1354 * Clear all of the statistics registers (clear on read). It is 1355 * important that we do this after we have tried to establish link 1356 * because the symbol error count will increment wildly if there 1357 * is no link. 1358 */ 1359 e1000_clear_hw_cntrs_82575(hw); 1360 1361 return ret_val; 1362 } 1363 1364 /** 1365 * e1000_setup_copper_link_82575 - Configure copper link settings 1366 * @hw: pointer to the HW structure 1367 * 1368 * Configures the link for auto-neg or forced speed and duplex. Then we check 1369 * for link, once link is established calls to configure collision distance 1370 * and flow control are called. 1371 **/ 1372 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) 1373 { 1374 u32 ctrl; 1375 s32 ret_val; 1376 1377 DEBUGFUNC("e1000_setup_copper_link_82575"); 1378 1379 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1380 ctrl |= E1000_CTRL_SLU; 1381 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1382 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 1383 1384 ret_val = e1000_setup_serdes_link_82575(hw); 1385 if (ret_val) 1386 goto out; 1387 1388 if (e1000_sgmii_active_82575(hw)) { 1389 /* allow time for SFP cage time to power up phy */ 1390 msec_delay(300); 1391 1392 ret_val = hw->phy.ops.reset(hw); 1393 if (ret_val) { 1394 DEBUGOUT("Error resetting the PHY.\n"); 1395 goto out; 1396 } 1397 } 1398 switch (hw->phy.type) { 1399 case e1000_phy_m88: 1400 if (hw->phy.id == I347AT4_E_PHY_ID || 1401 hw->phy.id == M88E1112_E_PHY_ID || 1402 hw->phy.id == M88E1340M_E_PHY_ID) 1403 ret_val = e1000_copper_link_setup_m88_gen2(hw); 1404 else 1405 ret_val = e1000_copper_link_setup_m88(hw); 1406 break; 1407 case e1000_phy_igp_3: 1408 ret_val = e1000_copper_link_setup_igp(hw); 1409 break; 1410 case e1000_phy_82580: 1411 ret_val = e1000_copper_link_setup_82577(hw); 1412 break; 1413 default: 1414 ret_val = -E1000_ERR_PHY; 1415 break; 1416 } 1417 1418 if (ret_val) 1419 goto out; 1420 1421 ret_val = e1000_setup_copper_link_generic(hw); 1422 out: 1423 return ret_val; 1424 } 1425 1426 /** 1427 * e1000_setup_serdes_link_82575 - Setup link for serdes 1428 * @hw: pointer to the HW structure 1429 * 1430 * Configure the physical coding sub-layer (PCS) link. The PCS link is 1431 * used on copper connections where the serialized gigabit media independent 1432 * interface (sgmii), or serdes fiber is being used. Configures the link 1433 * for auto-negotiation or forces speed/duplex. 1434 **/ 1435 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) 1436 { 1437 u32 ctrl_ext, ctrl_reg, reg; 1438 bool pcs_autoneg; 1439 s32 ret_val = E1000_SUCCESS; 1440 u16 data; 1441 1442 DEBUGFUNC("e1000_setup_serdes_link_82575"); 1443 1444 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1445 !e1000_sgmii_active_82575(hw)) 1446 return ret_val; 1447 1448 /* 1449 * On the 82575, SerDes loopback mode persists until it is 1450 * explicitly turned off or a power cycle is performed. A read to 1451 * the register does not indicate its status. Therefore, we ensure 1452 * loopback mode is disabled during initialization. 1453 */ 1454 E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1455 1456 /* power on the sfp cage if present */ 1457 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1458 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1459 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1460 1461 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); 1462 ctrl_reg |= E1000_CTRL_SLU; 1463 1464 /* set both sw defined pins on 82575/82576*/ 1465 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) 1466 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; 1467 1468 reg = E1000_READ_REG(hw, E1000_PCS_LCTL); 1469 1470 /* default pcs_autoneg to the same setting as mac autoneg */ 1471 pcs_autoneg = hw->mac.autoneg; 1472 1473 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 1474 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1475 /* sgmii mode lets the phy handle forcing speed/duplex */ 1476 pcs_autoneg = TRUE; 1477 /* autoneg time out should be disabled for SGMII mode */ 1478 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1479 break; 1480 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1481 /* disable PCS autoneg and support parallel detect only */ 1482 pcs_autoneg = FALSE; 1483 /* fall through to default case */ 1484 default: 1485 if (hw->mac.type == e1000_82575 || 1486 hw->mac.type == e1000_82576) { 1487 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1488 if (ret_val) { 1489 DEBUGOUT("NVM Read Error\n"); 1490 return ret_val; 1491 } 1492 1493 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) 1494 pcs_autoneg = FALSE; 1495 } 1496 1497 /* 1498 * non-SGMII modes only supports a speed of 1000/Full for the 1499 * link so it is best to just force the MAC and let the pcs 1500 * link either autoneg or be forced to 1000/Full 1501 */ 1502 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1503 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1504 1505 /* set speed of 1000/Full if speed/duplex is forced */ 1506 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1507 break; 1508 } 1509 1510 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); 1511 1512 /* 1513 * New SerDes mode allows for forcing speed or autonegotiating speed 1514 * at 1gb. Autoneg should be default set by most drivers. This is the 1515 * mode that will be compatible with older link partners and switches. 1516 * However, both are supported by the hardware and some drivers/tools. 1517 */ 1518 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1519 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1520 1521 /* 1522 * We force flow control to prevent the CTRL register values from being 1523 * overwritten by the autonegotiated flow control values 1524 */ 1525 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1526 1527 if (pcs_autoneg) { 1528 /* Set PCS register for autoneg */ 1529 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1530 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1531 DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1532 } else { 1533 /* Set PCS register for forced link */ 1534 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1535 DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1536 } 1537 1538 E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); 1539 1540 if (!e1000_sgmii_active_82575(hw)) 1541 e1000_force_mac_fc_generic(hw); 1542 1543 return ret_val; 1544 } 1545 1546 /** 1547 * e1000_get_media_type_82575 - derives current media type. 1548 * @hw: pointer to the HW structure 1549 * 1550 * The media type is chosen reflecting few settings. 1551 * The following are taken into account: 1552 * - link mode set in the current port Init Control Word #3 1553 * - current link mode settings in CSR register 1554 * - MDIO vs. I2C PHY control interface chosen 1555 * - SFP module media type 1556 **/ 1557 static s32 e1000_get_media_type_82575(struct e1000_hw *hw) 1558 { 1559 u32 lan_id = 0; 1560 s32 ret_val = E1000_ERR_CONFIG; 1561 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1562 u32 ctrl_ext = 0; 1563 u32 current_link_mode = 0; 1564 u16 init_ctrl_wd_3 = 0; 1565 u8 init_ctrl_wd_3_offset = 0; 1566 u8 init_ctrl_wd_3_bit_offset = 0; 1567 1568 /* Set internal phy as default */ 1569 dev_spec->sgmii_active = FALSE; 1570 dev_spec->module_plugged = FALSE; 1571 1572 /* 1573 * Check if NVM access method is attached already. 1574 * If it is then Init Control Word #3 is considered 1575 * otherwise runtime CSR register content is taken. 1576 */ 1577 1578 /* Get CSR setting */ 1579 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1580 1581 /* Get link mode setting */ 1582 if ((hw->nvm.ops.read) && (hw->nvm.ops.read != e1000_null_read_nvm)) { 1583 /* Take link mode from EEPROM */ 1584 1585 /* 1586 * Get LAN port ID to derive its 1587 * adequate Init Control Word #3 1588 */ 1589 lan_id = ((E1000_READ_REG(hw, E1000_STATUS) & 1590 E1000_STATUS_LAN_ID_MASK) >> E1000_STATUS_LAN_ID_OFFSET); 1591 /* 1592 * Derive Init Control Word #3 offset 1593 * and mask to pick up link mode setting. 1594 */ 1595 if (hw->mac.type < e1000_82580) { 1596 init_ctrl_wd_3_offset = lan_id ? 1597 NVM_INIT_CONTROL3_PORT_A : NVM_INIT_CONTROL3_PORT_B; 1598 init_ctrl_wd_3_bit_offset = NVM_WORD24_LNK_MODE_OFFSET; 1599 } else { 1600 init_ctrl_wd_3_offset = 1601 NVM_82580_LAN_FUNC_OFFSET(lan_id) + 1602 NVM_INIT_CONTROL3_PORT_A; 1603 init_ctrl_wd_3_bit_offset = 1604 NVM_WORD24_82580_LNK_MODE_OFFSET; 1605 } 1606 /* Read Init Control Word #3*/ 1607 hw->nvm.ops.read(hw, init_ctrl_wd_3_offset, 1, &init_ctrl_wd_3); 1608 current_link_mode = init_ctrl_wd_3; 1609 /* 1610 * Switch to CSR for all but internal PHY. 1611 */ 1612 if ((init_ctrl_wd_3 << (E1000_CTRL_EXT_LINK_MODE_OFFSET - 1613 init_ctrl_wd_3_bit_offset)) != 1614 E1000_CTRL_EXT_LINK_MODE_GMII) { 1615 current_link_mode = ctrl_ext; 1616 init_ctrl_wd_3_bit_offset = 1617 E1000_CTRL_EXT_LINK_MODE_OFFSET; 1618 } 1619 } else { 1620 /* Take link mode from CSR */ 1621 current_link_mode = ctrl_ext; 1622 init_ctrl_wd_3_bit_offset = E1000_CTRL_EXT_LINK_MODE_OFFSET; 1623 } 1624 1625 /* 1626 * Align link mode bits to 1627 * their CTRL_EXT location. 1628 */ 1629 current_link_mode <<= (E1000_CTRL_EXT_LINK_MODE_OFFSET - 1630 init_ctrl_wd_3_bit_offset); 1631 current_link_mode &= E1000_CTRL_EXT_LINK_MODE_MASK; 1632 1633 switch (current_link_mode) { 1634 1635 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1636 hw->phy.media_type = e1000_media_type_internal_serdes; 1637 current_link_mode = E1000_CTRL_EXT_LINK_MODE_1000BASE_KX; 1638 break; 1639 case E1000_CTRL_EXT_LINK_MODE_GMII: 1640 hw->phy.media_type = e1000_media_type_copper; 1641 current_link_mode = E1000_CTRL_EXT_LINK_MODE_GMII; 1642 break; 1643 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1644 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 1645 /* Get phy control interface type set (MDIO vs. I2C)*/ 1646 if (e1000_sgmii_uses_mdio_82575(hw)) { 1647 hw->phy.media_type = e1000_media_type_copper; 1648 dev_spec->sgmii_active = TRUE; 1649 current_link_mode = E1000_CTRL_EXT_LINK_MODE_SGMII; 1650 } else { 1651 ret_val = e1000_set_sfp_media_type_82575(hw); 1652 if (ret_val != E1000_SUCCESS) 1653 goto out; 1654 if (hw->phy.media_type == 1655 e1000_media_type_internal_serdes) { 1656 current_link_mode = 1657 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; 1658 } else if (hw->phy.media_type == 1659 e1000_media_type_copper) { 1660 current_link_mode = 1661 E1000_CTRL_EXT_LINK_MODE_SGMII; 1662 } 1663 } 1664 break; 1665 default: 1666 DEBUGOUT("Link mode mask doesn't fit bit field size\n"); 1667 goto out; 1668 } 1669 /* 1670 * Do not change current link mode setting 1671 * if media type is fibre or has not been 1672 * recognized. 1673 */ 1674 if ((hw->phy.media_type != e1000_media_type_unknown) && 1675 (hw->phy.media_type != e1000_media_type_fiber)) { 1676 /* Update link mode */ 1677 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; 1678 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | 1679 current_link_mode); 1680 } 1681 1682 ret_val = E1000_SUCCESS; 1683 out: 1684 /* 1685 * If media type was not identified then return media type 1686 * defined by the CTRL_EXT settings. 1687 */ 1688 if (hw->phy.media_type == e1000_media_type_unknown) { 1689 if (current_link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) 1690 hw->phy.media_type = e1000_media_type_copper; 1691 else 1692 hw->phy.media_type = e1000_media_type_internal_serdes; 1693 } 1694 1695 return ret_val; 1696 } 1697 1698 /** 1699 * e1000_set_sfp_media_type_82575 - derives SFP module media type. 1700 * @hw: pointer to the HW structure 1701 * 1702 * The media type is chosen based on SFP module. 1703 * compatibility flags retrieved from SFP ID EEPROM. 1704 **/ 1705 static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) 1706 { 1707 s32 ret_val = E1000_ERR_CONFIG; 1708 u32 ctrl_ext = 0; 1709 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1710 struct sfp_e1000_flags eth_flags = {0}; 1711 u8 tranceiver_type = 0; 1712 1713 /* Turn I2C interface ON */ 1714 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1715 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); 1716 1717 /* Read SFP module data */ 1718 ret_val = e1000_read_sfp_data_byte(hw, 1719 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), 1720 &tranceiver_type); 1721 if (ret_val != E1000_SUCCESS) 1722 goto out; 1723 ret_val = e1000_read_sfp_data_byte(hw, 1724 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), 1725 (u8 *)ð_flags); 1726 if (ret_val != E1000_SUCCESS) 1727 goto out; 1728 /* 1729 * Check if there is some SFP 1730 * module plugged and powered 1731 */ 1732 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || 1733 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { 1734 dev_spec->module_plugged = TRUE; 1735 if (eth_flags.e1000_base_lx || eth_flags.e1000_base_sx) { 1736 hw->phy.media_type = e1000_media_type_internal_serdes; 1737 } else if (eth_flags.e1000_base_t) { 1738 dev_spec->sgmii_active = TRUE; 1739 hw->phy.media_type = e1000_media_type_copper; 1740 } else { 1741 hw->phy.media_type = e1000_media_type_unknown; 1742 DEBUGOUT("PHY module has not been recognized\n"); 1743 goto out; 1744 } 1745 } else { 1746 hw->phy.media_type = e1000_media_type_unknown; 1747 } 1748 ret_val = E1000_SUCCESS; 1749 out: 1750 /* Restore I2C interface setting */ 1751 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1752 return ret_val; 1753 } 1754 1755 /** 1756 * e1000_valid_led_default_82575 - Verify a valid default LED config 1757 * @hw: pointer to the HW structure 1758 * @data: pointer to the NVM (EEPROM) 1759 * 1760 * Read the EEPROM for the current default LED configuration. If the 1761 * LED configuration is not valid, set to a valid LED configuration. 1762 **/ 1763 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) 1764 { 1765 s32 ret_val; 1766 1767 DEBUGFUNC("e1000_valid_led_default_82575"); 1768 1769 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 1770 if (ret_val) { 1771 DEBUGOUT("NVM Read Error\n"); 1772 goto out; 1773 } 1774 1775 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 1776 switch (hw->phy.media_type) { 1777 case e1000_media_type_internal_serdes: 1778 *data = ID_LED_DEFAULT_82575_SERDES; 1779 break; 1780 case e1000_media_type_copper: 1781 default: 1782 *data = ID_LED_DEFAULT; 1783 break; 1784 } 1785 } 1786 out: 1787 return ret_val; 1788 } 1789 1790 /** 1791 * e1000_sgmii_active_82575 - Return sgmii state 1792 * @hw: pointer to the HW structure 1793 * 1794 * 82575 silicon has a serialized gigabit media independent interface (sgmii) 1795 * which can be enabled for use in the embedded applications. Simply 1796 * return the current state of the sgmii interface. 1797 **/ 1798 static bool e1000_sgmii_active_82575(struct e1000_hw *hw) 1799 { 1800 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1801 return dev_spec->sgmii_active; 1802 } 1803 1804 /** 1805 * e1000_reset_init_script_82575 - Inits HW defaults after reset 1806 * @hw: pointer to the HW structure 1807 * 1808 * Inits recommended HW defaults after a reset when there is no EEPROM 1809 * detected. This is only for the 82575. 1810 **/ 1811 static s32 e1000_reset_init_script_82575(struct e1000_hw *hw) 1812 { 1813 DEBUGFUNC("e1000_reset_init_script_82575"); 1814 1815 if (hw->mac.type == e1000_82575) { 1816 DEBUGOUT("Running reset init script for 82575\n"); 1817 /* SerDes configuration via SERDESCTRL */ 1818 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); 1819 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); 1820 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); 1821 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); 1822 1823 /* CCM configuration via CCMCTL register */ 1824 e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); 1825 e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); 1826 1827 /* PCIe lanes configuration */ 1828 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); 1829 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); 1830 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); 1831 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); 1832 1833 /* PCIe PLL Configuration */ 1834 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); 1835 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); 1836 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); 1837 } 1838 1839 return E1000_SUCCESS; 1840 } 1841 1842 /** 1843 * e1000_read_mac_addr_82575 - Read device MAC address 1844 * @hw: pointer to the HW structure 1845 **/ 1846 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) 1847 { 1848 s32 ret_val = E1000_SUCCESS; 1849 1850 DEBUGFUNC("e1000_read_mac_addr_82575"); 1851 1852 /* 1853 * If there's an alternate MAC address place it in RAR0 1854 * so that it will override the Si installed default perm 1855 * address. 1856 */ 1857 ret_val = e1000_check_alt_mac_addr_generic(hw); 1858 if (ret_val) 1859 goto out; 1860 1861 ret_val = e1000_read_mac_addr_generic(hw); 1862 1863 out: 1864 return ret_val; 1865 } 1866 1867 /** 1868 * e1000_config_collision_dist_82575 - Configure collision distance 1869 * @hw: pointer to the HW structure 1870 * 1871 * Configures the collision distance to the default value and is used 1872 * during link setup. 1873 **/ 1874 static void e1000_config_collision_dist_82575(struct e1000_hw *hw) 1875 { 1876 u32 tctl_ext; 1877 1878 DEBUGFUNC("e1000_config_collision_dist_82575"); 1879 1880 tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); 1881 1882 tctl_ext &= ~E1000_TCTL_EXT_COLD; 1883 tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; 1884 1885 E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); 1886 E1000_WRITE_FLUSH(hw); 1887 } 1888 1889 /** 1890 * e1000_power_down_phy_copper_82575 - Remove link during PHY power down 1891 * @hw: pointer to the HW structure 1892 * 1893 * In the case of a PHY power down to save power, or to turn off link during a 1894 * driver unload, or wake on lan is not enabled, remove the link. 1895 **/ 1896 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) 1897 { 1898 struct e1000_phy_info *phy = &hw->phy; 1899 1900 if (!(phy->ops.check_reset_block)) 1901 return; 1902 1903 /* If the management interface is not enabled, then power down */ 1904 if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) 1905 e1000_power_down_phy_copper(hw); 1906 1907 return; 1908 } 1909 1910 /** 1911 * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters 1912 * @hw: pointer to the HW structure 1913 * 1914 * Clears the hardware counters by reading the counter registers. 1915 **/ 1916 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) 1917 { 1918 DEBUGFUNC("e1000_clear_hw_cntrs_82575"); 1919 1920 e1000_clear_hw_cntrs_base_generic(hw); 1921 1922 E1000_READ_REG(hw, E1000_PRC64); 1923 E1000_READ_REG(hw, E1000_PRC127); 1924 E1000_READ_REG(hw, E1000_PRC255); 1925 E1000_READ_REG(hw, E1000_PRC511); 1926 E1000_READ_REG(hw, E1000_PRC1023); 1927 E1000_READ_REG(hw, E1000_PRC1522); 1928 E1000_READ_REG(hw, E1000_PTC64); 1929 E1000_READ_REG(hw, E1000_PTC127); 1930 E1000_READ_REG(hw, E1000_PTC255); 1931 E1000_READ_REG(hw, E1000_PTC511); 1932 E1000_READ_REG(hw, E1000_PTC1023); 1933 E1000_READ_REG(hw, E1000_PTC1522); 1934 1935 E1000_READ_REG(hw, E1000_ALGNERRC); 1936 E1000_READ_REG(hw, E1000_RXERRC); 1937 E1000_READ_REG(hw, E1000_TNCRS); 1938 E1000_READ_REG(hw, E1000_CEXTERR); 1939 E1000_READ_REG(hw, E1000_TSCTC); 1940 E1000_READ_REG(hw, E1000_TSCTFC); 1941 1942 E1000_READ_REG(hw, E1000_MGTPRC); 1943 E1000_READ_REG(hw, E1000_MGTPDC); 1944 E1000_READ_REG(hw, E1000_MGTPTC); 1945 1946 E1000_READ_REG(hw, E1000_IAC); 1947 E1000_READ_REG(hw, E1000_ICRXOC); 1948 1949 E1000_READ_REG(hw, E1000_ICRXPTC); 1950 E1000_READ_REG(hw, E1000_ICRXATC); 1951 E1000_READ_REG(hw, E1000_ICTXPTC); 1952 E1000_READ_REG(hw, E1000_ICTXATC); 1953 E1000_READ_REG(hw, E1000_ICTXQEC); 1954 E1000_READ_REG(hw, E1000_ICTXQMTC); 1955 E1000_READ_REG(hw, E1000_ICRXDMTC); 1956 1957 E1000_READ_REG(hw, E1000_CBTMPC); 1958 E1000_READ_REG(hw, E1000_HTDPMC); 1959 E1000_READ_REG(hw, E1000_CBRMPC); 1960 E1000_READ_REG(hw, E1000_RPTHC); 1961 E1000_READ_REG(hw, E1000_HGPTC); 1962 E1000_READ_REG(hw, E1000_HTCBDPC); 1963 E1000_READ_REG(hw, E1000_HGORCL); 1964 E1000_READ_REG(hw, E1000_HGORCH); 1965 E1000_READ_REG(hw, E1000_HGOTCL); 1966 E1000_READ_REG(hw, E1000_HGOTCH); 1967 E1000_READ_REG(hw, E1000_LENERRS); 1968 1969 /* This register should not be read in copper configurations */ 1970 if ((hw->phy.media_type == e1000_media_type_internal_serdes) || 1971 e1000_sgmii_active_82575(hw)) 1972 E1000_READ_REG(hw, E1000_SCVPC); 1973 } 1974 1975 /** 1976 * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable 1977 * @hw: pointer to the HW structure 1978 * 1979 * After rx enable if managability is enabled then there is likely some 1980 * bad data at the start of the fifo and possibly in the DMA fifo. This 1981 * function clears the fifos and flushes any packets that came in as rx was 1982 * being enabled. 1983 **/ 1984 void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) 1985 { 1986 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 1987 int i, ms_wait; 1988 1989 DEBUGFUNC("e1000_rx_fifo_workaround_82575"); 1990 if (hw->mac.type != e1000_82575 || 1991 !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) 1992 return; 1993 1994 /* Disable all Rx queues */ 1995 for (i = 0; i < 4; i++) { 1996 rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); 1997 E1000_WRITE_REG(hw, E1000_RXDCTL(i), 1998 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); 1999 } 2000 /* Poll all queues to verify they have shut down */ 2001 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 2002 msec_delay(1); 2003 rx_enabled = 0; 2004 for (i = 0; i < 4; i++) 2005 rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); 2006 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) 2007 break; 2008 } 2009 2010 if (ms_wait == 10) 2011 DEBUGOUT("Queue disable timed out after 10ms\n"); 2012 2013 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 2014 * incoming packets are rejected. Set enable and wait 2ms so that 2015 * any packet that was coming in as RCTL.EN was set is flushed 2016 */ 2017 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 2018 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 2019 2020 rlpml = E1000_READ_REG(hw, E1000_RLPML); 2021 E1000_WRITE_REG(hw, E1000_RLPML, 0); 2022 2023 rctl = E1000_READ_REG(hw, E1000_RCTL); 2024 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); 2025 temp_rctl |= E1000_RCTL_LPE; 2026 2027 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); 2028 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); 2029 E1000_WRITE_FLUSH(hw); 2030 msec_delay(2); 2031 2032 /* Enable Rx queues that were previously enabled and restore our 2033 * previous state 2034 */ 2035 for (i = 0; i < 4; i++) 2036 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); 2037 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2038 E1000_WRITE_FLUSH(hw); 2039 2040 E1000_WRITE_REG(hw, E1000_RLPML, rlpml); 2041 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); 2042 2043 /* Flush receive errors generated by workaround */ 2044 E1000_READ_REG(hw, E1000_ROC); 2045 E1000_READ_REG(hw, E1000_RNBC); 2046 E1000_READ_REG(hw, E1000_MPC); 2047 } 2048 2049 /** 2050 * e1000_set_pcie_completion_timeout - set pci-e completion timeout 2051 * @hw: pointer to the HW structure 2052 * 2053 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, 2054 * however the hardware default for these parts is 500us to 1ms which is less 2055 * than the 10ms recommended by the pci-e spec. To address this we need to 2056 * increase the value to either 10ms to 200ms for capability version 1 config, 2057 * or 16ms to 55ms for version 2. 2058 **/ 2059 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) 2060 { 2061 u32 gcr = E1000_READ_REG(hw, E1000_GCR); 2062 s32 ret_val = E1000_SUCCESS; 2063 u16 pcie_devctl2; 2064 2065 /* only take action if timeout value is defaulted to 0 */ 2066 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 2067 goto out; 2068 2069 /* 2070 * if capababilities version is type 1 we can write the 2071 * timeout of 10ms to 200ms through the GCR register 2072 */ 2073 if (!(gcr & E1000_GCR_CAP_VER2)) { 2074 gcr |= E1000_GCR_CMPL_TMOUT_10ms; 2075 goto out; 2076 } 2077 2078 /* 2079 * for version 2 capabilities we need to write the config space 2080 * directly in order to set the completion timeout value for 2081 * 16ms to 55ms 2082 */ 2083 ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2084 &pcie_devctl2); 2085 if (ret_val) 2086 goto out; 2087 2088 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 2089 2090 ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2091 &pcie_devctl2); 2092 out: 2093 /* disable completion timeout resend */ 2094 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 2095 2096 E1000_WRITE_REG(hw, E1000_GCR, gcr); 2097 return ret_val; 2098 } 2099 2100 /** 2101 * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing 2102 * @hw: pointer to the hardware struct 2103 * @enable: state to enter, either enabled or disabled 2104 * @pf: Physical Function pool - do not set anti-spoofing for the PF 2105 * 2106 * enables/disables L2 switch anti-spoofing functionality. 2107 **/ 2108 void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 2109 { 2110 u32 dtxswc; 2111 2112 switch (hw->mac.type) { 2113 case e1000_82576: 2114 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); 2115 if (enable) { 2116 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | 2117 E1000_DTXSWC_VLAN_SPOOF_MASK); 2118 /* The PF can spoof - it has to in order to 2119 * support emulation mode NICs */ 2120 dtxswc ^= (1 << pf | 1 << (pf + 2121 E1000_DTXSWC_VLAN_SPOOF_SHIFT)); 2122 } else { 2123 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 2124 E1000_DTXSWC_VLAN_SPOOF_MASK); 2125 } 2126 E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); 2127 break; 2128 case e1000_i350: 2129 dtxswc = E1000_READ_REG(hw, E1000_TXSWC); 2130 if (enable) { 2131 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | 2132 E1000_DTXSWC_VLAN_SPOOF_MASK); 2133 /* The PF can spoof - it has to in order to 2134 * support emulation mode NICs 2135 */ 2136 dtxswc ^= (1 << pf | 1 << (pf + 2137 E1000_DTXSWC_VLAN_SPOOF_SHIFT)); 2138 } else { 2139 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 2140 E1000_DTXSWC_VLAN_SPOOF_MASK); 2141 } 2142 E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); 2143 default: 2144 break; 2145 } 2146 } 2147 2148 /** 2149 * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback 2150 * @hw: pointer to the hardware struct 2151 * @enable: state to enter, either enabled or disabled 2152 * 2153 * enables/disables L2 switch loopback functionality. 2154 **/ 2155 void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 2156 { 2157 u32 dtxswc; 2158 2159 switch (hw->mac.type) { 2160 case e1000_82576: 2161 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); 2162 if (enable) 2163 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2164 else 2165 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2166 E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); 2167 break; 2168 case e1000_i350: 2169 dtxswc = E1000_READ_REG(hw, E1000_TXSWC); 2170 if (enable) 2171 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2172 else 2173 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2174 E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); 2175 break; 2176 default: 2177 /* Currently no other hardware supports loopback */ 2178 break; 2179 } 2180 2181 2182 } 2183 2184 /** 2185 * e1000_vmdq_set_replication_pf - enable or disable vmdq replication 2186 * @hw: pointer to the hardware struct 2187 * @enable: state to enter, either enabled or disabled 2188 * 2189 * enables/disables replication of packets across multiple pools. 2190 **/ 2191 void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 2192 { 2193 u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); 2194 2195 if (enable) 2196 vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 2197 else 2198 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 2199 2200 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); 2201 } 2202 2203 /** 2204 * e1000_read_phy_reg_82580 - Read 82580 MDI control register 2205 * @hw: pointer to the HW structure 2206 * @offset: register offset to be read 2207 * @data: pointer to the read data 2208 * 2209 * Reads the MDI control register in the PHY at offset and stores the 2210 * information read to data. 2211 **/ 2212 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 2213 { 2214 s32 ret_val; 2215 2216 DEBUGFUNC("e1000_read_phy_reg_82580"); 2217 2218 ret_val = hw->phy.ops.acquire(hw); 2219 if (ret_val) 2220 goto out; 2221 2222 ret_val = e1000_read_phy_reg_mdic(hw, offset, data); 2223 2224 hw->phy.ops.release(hw); 2225 2226 out: 2227 return ret_val; 2228 } 2229 2230 /** 2231 * e1000_write_phy_reg_82580 - Write 82580 MDI control register 2232 * @hw: pointer to the HW structure 2233 * @offset: register offset to write to 2234 * @data: data to write to register at offset 2235 * 2236 * Writes data to MDI control register in the PHY at offset. 2237 **/ 2238 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 2239 { 2240 s32 ret_val; 2241 2242 DEBUGFUNC("e1000_write_phy_reg_82580"); 2243 2244 ret_val = hw->phy.ops.acquire(hw); 2245 if (ret_val) 2246 goto out; 2247 2248 ret_val = e1000_write_phy_reg_mdic(hw, offset, data); 2249 2250 hw->phy.ops.release(hw); 2251 2252 out: 2253 return ret_val; 2254 } 2255 2256 /** 2257 * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits 2258 * @hw: pointer to the HW structure 2259 * 2260 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on 2261 * the values found in the EEPROM. This addresses an issue in which these 2262 * bits are not restored from EEPROM after reset. 2263 **/ 2264 static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) 2265 { 2266 s32 ret_val = E1000_SUCCESS; 2267 u32 mdicnfg; 2268 u16 nvm_data = 0; 2269 2270 DEBUGFUNC("e1000_reset_mdicnfg_82580"); 2271 2272 if (hw->mac.type != e1000_82580) 2273 goto out; 2274 if (!e1000_sgmii_active_82575(hw)) 2275 goto out; 2276 2277 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2278 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2279 &nvm_data); 2280 if (ret_val) { 2281 DEBUGOUT("NVM Read Error\n"); 2282 goto out; 2283 } 2284 2285 mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); 2286 if (nvm_data & NVM_WORD24_EXT_MDIO) 2287 mdicnfg |= E1000_MDICNFG_EXT_MDIO; 2288 if (nvm_data & NVM_WORD24_COM_MDIO) 2289 mdicnfg |= E1000_MDICNFG_COM_MDIO; 2290 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); 2291 out: 2292 return ret_val; 2293 } 2294 2295 /** 2296 * e1000_reset_hw_82580 - Reset hardware 2297 * @hw: pointer to the HW structure 2298 * 2299 * This resets function or entire device (all ports, etc.) 2300 * to a known state. 2301 **/ 2302 static s32 e1000_reset_hw_82580(struct e1000_hw *hw) 2303 { 2304 s32 ret_val = E1000_SUCCESS; 2305 /* BH SW mailbox bit in SW_FW_SYNC */ 2306 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 2307 u32 ctrl; 2308 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 2309 2310 DEBUGFUNC("e1000_reset_hw_82580"); 2311 2312 hw->dev_spec._82575.global_device_reset = FALSE; 2313 2314 /* Get current control state. */ 2315 ctrl = E1000_READ_REG(hw, E1000_CTRL); 2316 2317 /* 2318 * Prevent the PCI-E bus from sticking if there is no TLP connection 2319 * on the last TLP read/write transaction when MAC is reset. 2320 */ 2321 ret_val = e1000_disable_pcie_master_generic(hw); 2322 if (ret_val) 2323 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 2324 2325 DEBUGOUT("Masking off all interrupts\n"); 2326 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 2327 E1000_WRITE_REG(hw, E1000_RCTL, 0); 2328 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); 2329 E1000_WRITE_FLUSH(hw); 2330 2331 msec_delay(10); 2332 2333 /* Determine whether or not a global dev reset is requested */ 2334 if (global_device_reset && e1000_acquire_swfw_sync_82575(hw, 2335 swmbsw_mask)) 2336 global_device_reset = FALSE; 2337 2338 if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & 2339 E1000_STAT_DEV_RST_SET)) 2340 ctrl |= E1000_CTRL_DEV_RST; 2341 else 2342 ctrl |= E1000_CTRL_RST; 2343 2344 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 2345 E1000_WRITE_FLUSH(hw); 2346 2347 /* Add delay to insure DEV_RST has time to complete */ 2348 if (global_device_reset) 2349 msec_delay(5); 2350 2351 ret_val = e1000_get_auto_rd_done_generic(hw); 2352 if (ret_val) { 2353 /* 2354 * When auto config read does not complete, do not 2355 * return with an error. This can happen in situations 2356 * where there is no eeprom and prevents getting link. 2357 */ 2358 DEBUGOUT("Auto Read Done did not complete\n"); 2359 } 2360 2361 /* If EEPROM is not present, run manual init scripts */ 2362 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) 2363 e1000_reset_init_script_82575(hw); 2364 2365 /* clear global device reset status bit */ 2366 E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); 2367 2368 /* Clear any pending interrupt events. */ 2369 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 2370 E1000_READ_REG(hw, E1000_ICR); 2371 2372 ret_val = e1000_reset_mdicnfg_82580(hw); 2373 if (ret_val) 2374 DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); 2375 2376 /* Install any alternate MAC address into RAR0 */ 2377 ret_val = e1000_check_alt_mac_addr_generic(hw); 2378 2379 /* Release semaphore */ 2380 if (global_device_reset) 2381 e1000_release_swfw_sync_82575(hw, swmbsw_mask); 2382 2383 return ret_val; 2384 } 2385 2386 /** 2387 * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size 2388 * @data: data received by reading RXPBS register 2389 * 2390 * The 82580 uses a table based approach for packet buffer allocation sizes. 2391 * This function converts the retrieved value into the correct table value 2392 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 2393 * 0x0 36 72 144 1 2 4 8 16 2394 * 0x8 35 70 140 rsv rsv rsv rsv rsv 2395 */ 2396 u16 e1000_rxpbs_adjust_82580(u32 data) 2397 { 2398 u16 ret_val = 0; 2399 2400 if (data < E1000_82580_RXPBS_TABLE_SIZE) 2401 ret_val = e1000_82580_rxpbs_table[data]; 2402 2403 return ret_val; 2404 } 2405 2406 /** 2407 * e1000_validate_nvm_checksum_with_offset - Validate EEPROM 2408 * checksum 2409 * @hw: pointer to the HW structure 2410 * @offset: offset in words of the checksum protected region 2411 * 2412 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 2413 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 2414 **/ 2415 s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 2416 { 2417 s32 ret_val = E1000_SUCCESS; 2418 u16 checksum = 0; 2419 u16 i, nvm_data; 2420 2421 DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); 2422 2423 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { 2424 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2425 if (ret_val) { 2426 DEBUGOUT("NVM Read Error\n"); 2427 goto out; 2428 } 2429 checksum += nvm_data; 2430 } 2431 2432 if (checksum != (u16) NVM_SUM) { 2433 DEBUGOUT("NVM Checksum Invalid\n"); 2434 ret_val = -E1000_ERR_NVM; 2435 goto out; 2436 } 2437 2438 out: 2439 return ret_val; 2440 } 2441 2442 /** 2443 * e1000_update_nvm_checksum_with_offset - Update EEPROM 2444 * checksum 2445 * @hw: pointer to the HW structure 2446 * @offset: offset in words of the checksum protected region 2447 * 2448 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 2449 * up to the checksum. Then calculates the EEPROM checksum and writes the 2450 * value to the EEPROM. 2451 **/ 2452 s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 2453 { 2454 s32 ret_val; 2455 u16 checksum = 0; 2456 u16 i, nvm_data; 2457 2458 DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); 2459 2460 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { 2461 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2462 if (ret_val) { 2463 DEBUGOUT("NVM Read Error while updating checksum.\n"); 2464 goto out; 2465 } 2466 checksum += nvm_data; 2467 } 2468 checksum = (u16) NVM_SUM - checksum; 2469 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, 2470 &checksum); 2471 if (ret_val) 2472 DEBUGOUT("NVM Write Error while updating checksum.\n"); 2473 2474 out: 2475 return ret_val; 2476 } 2477 2478 /** 2479 * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum 2480 * @hw: pointer to the HW structure 2481 * 2482 * Calculates the EEPROM section checksum by reading/adding each word of 2483 * the EEPROM and then verifies that the sum of the EEPROM is 2484 * equal to 0xBABA. 2485 **/ 2486 static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) 2487 { 2488 s32 ret_val = E1000_SUCCESS; 2489 u16 eeprom_regions_count = 1; 2490 u16 j, nvm_data; 2491 u16 nvm_offset; 2492 2493 DEBUGFUNC("e1000_validate_nvm_checksum_82580"); 2494 2495 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2496 if (ret_val) { 2497 DEBUGOUT("NVM Read Error\n"); 2498 goto out; 2499 } 2500 2501 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 2502 /* if chekcsums compatibility bit is set validate checksums 2503 * for all 4 ports. */ 2504 eeprom_regions_count = 4; 2505 } 2506 2507 for (j = 0; j < eeprom_regions_count; j++) { 2508 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2509 ret_val = e1000_validate_nvm_checksum_with_offset(hw, 2510 nvm_offset); 2511 if (ret_val != E1000_SUCCESS) 2512 goto out; 2513 } 2514 2515 out: 2516 return ret_val; 2517 } 2518 2519 /** 2520 * e1000_update_nvm_checksum_82580 - Update EEPROM checksum 2521 * @hw: pointer to the HW structure 2522 * 2523 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2524 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2525 * checksum and writes the value to the EEPROM. 2526 **/ 2527 static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) 2528 { 2529 s32 ret_val; 2530 u16 j, nvm_data; 2531 u16 nvm_offset; 2532 2533 DEBUGFUNC("e1000_update_nvm_checksum_82580"); 2534 2535 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2536 if (ret_val) { 2537 DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); 2538 goto out; 2539 } 2540 2541 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { 2542 /* set compatibility bit to validate checksums appropriately */ 2543 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; 2544 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 2545 &nvm_data); 2546 if (ret_val) { 2547 DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); 2548 goto out; 2549 } 2550 } 2551 2552 for (j = 0; j < 4; j++) { 2553 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2554 ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); 2555 if (ret_val) 2556 goto out; 2557 } 2558 2559 out: 2560 return ret_val; 2561 } 2562 2563 /** 2564 * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum 2565 * @hw: pointer to the HW structure 2566 * 2567 * Calculates the EEPROM section checksum by reading/adding each word of 2568 * the EEPROM and then verifies that the sum of the EEPROM is 2569 * equal to 0xBABA. 2570 **/ 2571 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) 2572 { 2573 s32 ret_val = E1000_SUCCESS; 2574 u16 j; 2575 u16 nvm_offset; 2576 2577 DEBUGFUNC("e1000_validate_nvm_checksum_i350"); 2578 2579 for (j = 0; j < 4; j++) { 2580 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2581 ret_val = e1000_validate_nvm_checksum_with_offset(hw, 2582 nvm_offset); 2583 if (ret_val != E1000_SUCCESS) 2584 goto out; 2585 } 2586 2587 out: 2588 return ret_val; 2589 } 2590 2591 /** 2592 * e1000_update_nvm_checksum_i350 - Update EEPROM checksum 2593 * @hw: pointer to the HW structure 2594 * 2595 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2596 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2597 * checksum and writes the value to the EEPROM. 2598 **/ 2599 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) 2600 { 2601 s32 ret_val = E1000_SUCCESS; 2602 u16 j; 2603 u16 nvm_offset; 2604 2605 DEBUGFUNC("e1000_update_nvm_checksum_i350"); 2606 2607 for (j = 0; j < 4; j++) { 2608 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2609 ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); 2610 if (ret_val != E1000_SUCCESS) 2611 goto out; 2612 } 2613 2614 out: 2615 return ret_val; 2616 } 2617 2618 /** 2619 * e1000_set_eee_i350 - Enable/disable EEE support 2620 * @hw: pointer to the HW structure 2621 * 2622 * Enable/disable EEE based on setting in dev_spec structure. 2623 * 2624 **/ 2625 s32 e1000_set_eee_i350(struct e1000_hw *hw) 2626 { 2627 s32 ret_val = E1000_SUCCESS; 2628 u32 ipcnfg, eeer; 2629 2630 DEBUGFUNC("e1000_set_eee_i350"); 2631 2632 if ((hw->mac.type < e1000_i350) || 2633 (hw->phy.media_type != e1000_media_type_copper)) 2634 goto out; 2635 ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); 2636 eeer = E1000_READ_REG(hw, E1000_EEER); 2637 2638 /* enable or disable per user setting */ 2639 if (!(hw->dev_spec._82575.eee_disable)) { 2640 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); 2641 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | 2642 E1000_EEER_LPI_FC); 2643 2644 } else { 2645 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); 2646 eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | 2647 E1000_EEER_LPI_FC); 2648 } 2649 E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); 2650 E1000_WRITE_REG(hw, E1000_EEER, eeer); 2651 E1000_READ_REG(hw, E1000_IPCNFG); 2652 E1000_READ_REG(hw, E1000_EEER); 2653 out: 2654 2655 return ret_val; 2656 } 2657 2658 /* Due to a hw errata, if the host tries to configure the VFTA register 2659 * while performing queries from the BMC or DMA, then the VFTA in some 2660 * cases won't be written. 2661 */ 2662 2663 /** 2664 * e1000_clear_vfta_i350 - Clear VLAN filter table 2665 * @hw: pointer to the HW structure 2666 * 2667 * Clears the register array which contains the VLAN filter table by 2668 * setting all the values to 0. 2669 **/ 2670 void e1000_clear_vfta_i350(struct e1000_hw *hw) 2671 { 2672 u32 offset; 2673 int i; 2674 2675 DEBUGFUNC("e1000_clear_vfta_350"); 2676 2677 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 2678 for (i = 0; i < 10; i++) 2679 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); 2680 2681 E1000_WRITE_FLUSH(hw); 2682 } 2683 } 2684 2685 /** 2686 * e1000_write_vfta_i350 - Write value to VLAN filter table 2687 * @hw: pointer to the HW structure 2688 * @offset: register offset in VLAN filter table 2689 * @value: register value written to VLAN filter table 2690 * 2691 * Writes value at the given offset in the register array which stores 2692 * the VLAN filter table. 2693 **/ 2694 void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) 2695 { 2696 int i; 2697 2698 DEBUGFUNC("e1000_write_vfta_350"); 2699 2700 for (i = 0; i < 10; i++) 2701 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); 2702 2703 E1000_WRITE_FLUSH(hw); 2704 } 2705 2706 2707 /** 2708 * e1000_set_i2c_bb - Enable I2C bit-bang 2709 * @hw: pointer to the HW structure 2710 * 2711 * Enable I2C bit-bang interface 2712 * 2713 **/ 2714 s32 e1000_set_i2c_bb(struct e1000_hw *hw) 2715 { 2716 s32 ret_val = E1000_SUCCESS; 2717 u32 ctrl_ext, i2cparams; 2718 2719 DEBUGFUNC("e1000_set_i2c_bb"); 2720 2721 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2722 ctrl_ext |= E1000_CTRL_I2C_ENA; 2723 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 2724 E1000_WRITE_FLUSH(hw); 2725 2726 i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); 2727 i2cparams |= E1000_I2CBB_EN; 2728 i2cparams |= E1000_I2C_DATA_OE_N; 2729 i2cparams |= E1000_I2C_CLK_OE_N; 2730 E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); 2731 E1000_WRITE_FLUSH(hw); 2732 2733 return ret_val; 2734 } 2735 2736 /** 2737 * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C 2738 * @hw: pointer to hardware structure 2739 * @byte_offset: byte offset to read 2740 * @data: value read 2741 * 2742 * Performs byte read operation over I2C interface at 2743 * a specified device address. 2744 **/ 2745 s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, 2746 u8 dev_addr, u8 *data) 2747 { 2748 s32 status = E1000_SUCCESS; 2749 u32 max_retry = 10; 2750 u32 retry = 1; 2751 u16 swfw_mask = 0; 2752 2753 bool nack = 1; 2754 2755 DEBUGFUNC("e1000_read_i2c_byte_generic"); 2756 2757 swfw_mask = E1000_SWFW_PHY0_SM; 2758 2759 do { 2760 if (e1000_acquire_swfw_sync_82575(hw, swfw_mask) 2761 != E1000_SUCCESS) { 2762 status = E1000_ERR_SWFW_SYNC; 2763 goto read_byte_out; 2764 } 2765 2766 e1000_i2c_start(hw); 2767 2768 /* Device Address and write indication */ 2769 status = e1000_clock_out_i2c_byte(hw, dev_addr); 2770 if (status != E1000_SUCCESS) 2771 goto fail; 2772 2773 status = e1000_get_i2c_ack(hw); 2774 if (status != E1000_SUCCESS) 2775 goto fail; 2776 2777 status = e1000_clock_out_i2c_byte(hw, byte_offset); 2778 if (status != E1000_SUCCESS) 2779 goto fail; 2780 2781 status = e1000_get_i2c_ack(hw); 2782 if (status != E1000_SUCCESS) 2783 goto fail; 2784 2785 e1000_i2c_start(hw); 2786 2787 /* Device Address and read indication */ 2788 status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); 2789 if (status != E1000_SUCCESS) 2790 goto fail; 2791 2792 status = e1000_get_i2c_ack(hw); 2793 if (status != E1000_SUCCESS) 2794 goto fail; 2795 2796 status = e1000_clock_in_i2c_byte(hw, data); 2797 if (status != E1000_SUCCESS) 2798 goto fail; 2799 2800 status = e1000_clock_out_i2c_bit(hw, nack); 2801 if (status != E1000_SUCCESS) 2802 goto fail; 2803 2804 e1000_i2c_stop(hw); 2805 break; 2806 2807 fail: 2808 e1000_release_swfw_sync_82575(hw, swfw_mask); 2809 msec_delay(100); 2810 e1000_i2c_bus_clear(hw); 2811 retry++; 2812 if (retry < max_retry) 2813 DEBUGOUT("I2C byte read error - Retrying.\n"); 2814 else 2815 DEBUGOUT("I2C byte read error.\n"); 2816 2817 } while (retry < max_retry); 2818 2819 e1000_release_swfw_sync_82575(hw, swfw_mask); 2820 2821 read_byte_out: 2822 2823 return status; 2824 } 2825 2826 /** 2827 * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C 2828 * @hw: pointer to hardware structure 2829 * @byte_offset: byte offset to write 2830 * @data: value to write 2831 * 2832 * Performs byte write operation over I2C interface at 2833 * a specified device address. 2834 **/ 2835 s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, 2836 u8 dev_addr, u8 data) 2837 { 2838 s32 status = E1000_SUCCESS; 2839 u32 max_retry = 1; 2840 u32 retry = 0; 2841 u16 swfw_mask = 0; 2842 2843 DEBUGFUNC("e1000_write_i2c_byte_generic"); 2844 2845 swfw_mask = E1000_SWFW_PHY0_SM; 2846 2847 if (e1000_acquire_swfw_sync_82575(hw, swfw_mask) != E1000_SUCCESS) { 2848 status = E1000_ERR_SWFW_SYNC; 2849 goto write_byte_out; 2850 } 2851 2852 do { 2853 e1000_i2c_start(hw); 2854 2855 status = e1000_clock_out_i2c_byte(hw, dev_addr); 2856 if (status != E1000_SUCCESS) 2857 goto fail; 2858 2859 status = e1000_get_i2c_ack(hw); 2860 if (status != E1000_SUCCESS) 2861 goto fail; 2862 2863 status = e1000_clock_out_i2c_byte(hw, byte_offset); 2864 if (status != E1000_SUCCESS) 2865 goto fail; 2866 2867 status = e1000_get_i2c_ack(hw); 2868 if (status != E1000_SUCCESS) 2869 goto fail; 2870 2871 status = e1000_clock_out_i2c_byte(hw, data); 2872 if (status != E1000_SUCCESS) 2873 goto fail; 2874 2875 status = e1000_get_i2c_ack(hw); 2876 if (status != E1000_SUCCESS) 2877 goto fail; 2878 2879 e1000_i2c_stop(hw); 2880 break; 2881 2882 fail: 2883 e1000_i2c_bus_clear(hw); 2884 retry++; 2885 if (retry < max_retry) 2886 DEBUGOUT("I2C byte write error - Retrying.\n"); 2887 else 2888 DEBUGOUT("I2C byte write error.\n"); 2889 } while (retry < max_retry); 2890 2891 e1000_release_swfw_sync_82575(hw, swfw_mask); 2892 2893 write_byte_out: 2894 2895 return status; 2896 } 2897 2898 /** 2899 * e1000_i2c_start - Sets I2C start condition 2900 * @hw: pointer to hardware structure 2901 * 2902 * Sets I2C start condition (High -> Low on SDA while SCL is High) 2903 **/ 2904 static void e1000_i2c_start(struct e1000_hw *hw) 2905 { 2906 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 2907 2908 DEBUGFUNC("e1000_i2c_start"); 2909 2910 /* Start condition must begin with data and clock high */ 2911 e1000_set_i2c_data(hw, &i2cctl, 1); 2912 e1000_raise_i2c_clk(hw, &i2cctl); 2913 2914 /* Setup time for start condition (4.7us) */ 2915 usec_delay(E1000_I2C_T_SU_STA); 2916 2917 e1000_set_i2c_data(hw, &i2cctl, 0); 2918 2919 /* Hold time for start condition (4us) */ 2920 usec_delay(E1000_I2C_T_HD_STA); 2921 2922 e1000_lower_i2c_clk(hw, &i2cctl); 2923 2924 /* Minimum low period of clock is 4.7 us */ 2925 usec_delay(E1000_I2C_T_LOW); 2926 2927 } 2928 2929 /** 2930 * e1000_i2c_stop - Sets I2C stop condition 2931 * @hw: pointer to hardware structure 2932 * 2933 * Sets I2C stop condition (Low -> High on SDA while SCL is High) 2934 **/ 2935 static void e1000_i2c_stop(struct e1000_hw *hw) 2936 { 2937 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 2938 2939 DEBUGFUNC("e1000_i2c_stop"); 2940 2941 /* Stop condition must begin with data low and clock high */ 2942 e1000_set_i2c_data(hw, &i2cctl, 0); 2943 e1000_raise_i2c_clk(hw, &i2cctl); 2944 2945 /* Setup time for stop condition (4us) */ 2946 usec_delay(E1000_I2C_T_SU_STO); 2947 2948 e1000_set_i2c_data(hw, &i2cctl, 1); 2949 2950 /* bus free time between stop and start (4.7us)*/ 2951 usec_delay(E1000_I2C_T_BUF); 2952 } 2953 2954 /** 2955 * e1000_clock_in_i2c_byte - Clocks in one byte via I2C 2956 * @hw: pointer to hardware structure 2957 * @data: data byte to clock in 2958 * 2959 * Clocks in one byte data via I2C data/clock 2960 **/ 2961 static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) 2962 { 2963 s32 i; 2964 bool bit = 0; 2965 2966 DEBUGFUNC("e1000_clock_in_i2c_byte"); 2967 2968 *data = 0; 2969 for (i = 7; i >= 0; i--) { 2970 e1000_clock_in_i2c_bit(hw, &bit); 2971 *data |= bit << i; 2972 } 2973 2974 return E1000_SUCCESS; 2975 } 2976 2977 /** 2978 * e1000_clock_out_i2c_byte - Clocks out one byte via I2C 2979 * @hw: pointer to hardware structure 2980 * @data: data byte clocked out 2981 * 2982 * Clocks out one byte data via I2C data/clock 2983 **/ 2984 static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) 2985 { 2986 s32 status = E1000_SUCCESS; 2987 s32 i; 2988 u32 i2cctl; 2989 bool bit = 0; 2990 2991 DEBUGFUNC("e1000_clock_out_i2c_byte"); 2992 2993 for (i = 7; i >= 0; i--) { 2994 bit = (data >> i) & 0x1; 2995 status = e1000_clock_out_i2c_bit(hw, bit); 2996 2997 if (status != E1000_SUCCESS) 2998 break; 2999 } 3000 3001 /* Release SDA line (set high) */ 3002 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3003 3004 i2cctl |= E1000_I2C_DATA_OE_N; 3005 E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); 3006 E1000_WRITE_FLUSH(hw); 3007 3008 return status; 3009 } 3010 3011 /** 3012 * e1000_get_i2c_ack - Polls for I2C ACK 3013 * @hw: pointer to hardware structure 3014 * 3015 * Clocks in/out one bit via I2C data/clock 3016 **/ 3017 static s32 e1000_get_i2c_ack(struct e1000_hw *hw) 3018 { 3019 s32 status = E1000_SUCCESS; 3020 u32 i = 0; 3021 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3022 u32 timeout = 10; 3023 bool ack = 1; 3024 3025 DEBUGFUNC("e1000_get_i2c_ack"); 3026 3027 e1000_raise_i2c_clk(hw, &i2cctl); 3028 3029 /* Minimum high period of clock is 4us */ 3030 usec_delay(E1000_I2C_T_HIGH); 3031 3032 /* Wait until SCL returns high */ 3033 for (i = 0; i < timeout; i++) { 3034 usec_delay(1); 3035 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3036 if (i2cctl & E1000_I2C_CLK_IN) 3037 break; 3038 } 3039 if (!(i2cctl & E1000_I2C_CLK_IN)) 3040 return E1000_ERR_I2C; 3041 3042 ack = e1000_get_i2c_data(&i2cctl); 3043 if (ack == 1) { 3044 DEBUGOUT("I2C ack was not received.\n"); 3045 status = E1000_ERR_I2C; 3046 } 3047 3048 e1000_lower_i2c_clk(hw, &i2cctl); 3049 3050 /* Minimum low period of clock is 4.7 us */ 3051 usec_delay(E1000_I2C_T_LOW); 3052 3053 return status; 3054 } 3055 3056 /** 3057 * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock 3058 * @hw: pointer to hardware structure 3059 * @data: read data value 3060 * 3061 * Clocks in one bit via I2C data/clock 3062 **/ 3063 static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) 3064 { 3065 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3066 3067 DEBUGFUNC("e1000_clock_in_i2c_bit"); 3068 3069 e1000_raise_i2c_clk(hw, &i2cctl); 3070 3071 /* Minimum high period of clock is 4us */ 3072 usec_delay(E1000_I2C_T_HIGH); 3073 3074 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3075 *data = e1000_get_i2c_data(&i2cctl); 3076 3077 e1000_lower_i2c_clk(hw, &i2cctl); 3078 3079 /* Minimum low period of clock is 4.7 us */ 3080 usec_delay(E1000_I2C_T_LOW); 3081 3082 return E1000_SUCCESS; 3083 } 3084 3085 /** 3086 * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock 3087 * @hw: pointer to hardware structure 3088 * @data: data value to write 3089 * 3090 * Clocks out one bit via I2C data/clock 3091 **/ 3092 static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) 3093 { 3094 s32 status; 3095 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3096 3097 DEBUGFUNC("e1000_clock_out_i2c_bit"); 3098 3099 status = e1000_set_i2c_data(hw, &i2cctl, data); 3100 if (status == E1000_SUCCESS) { 3101 e1000_raise_i2c_clk(hw, &i2cctl); 3102 3103 /* Minimum high period of clock is 4us */ 3104 usec_delay(E1000_I2C_T_HIGH); 3105 3106 e1000_lower_i2c_clk(hw, &i2cctl); 3107 3108 /* Minimum low period of clock is 4.7 us. 3109 * This also takes care of the data hold time. 3110 */ 3111 usec_delay(E1000_I2C_T_LOW); 3112 } else { 3113 status = E1000_ERR_I2C; 3114 DEBUGOUT1("I2C data was not set to %X\n", data); 3115 } 3116 3117 return status; 3118 } 3119 /** 3120 * e1000_raise_i2c_clk - Raises the I2C SCL clock 3121 * @hw: pointer to hardware structure 3122 * @i2cctl: Current value of I2CCTL register 3123 * 3124 * Raises the I2C clock line '0'->'1' 3125 **/ 3126 static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) 3127 { 3128 DEBUGFUNC("e1000_raise_i2c_clk"); 3129 3130 *i2cctl |= E1000_I2C_CLK_OUT; 3131 *i2cctl &= ~E1000_I2C_CLK_OE_N; 3132 E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); 3133 E1000_WRITE_FLUSH(hw); 3134 3135 /* SCL rise time (1000ns) */ 3136 usec_delay(E1000_I2C_T_RISE); 3137 } 3138 3139 /** 3140 * e1000_lower_i2c_clk - Lowers the I2C SCL clock 3141 * @hw: pointer to hardware structure 3142 * @i2cctl: Current value of I2CCTL register 3143 * 3144 * Lowers the I2C clock line '1'->'0' 3145 **/ 3146 static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) 3147 { 3148 3149 DEBUGFUNC("e1000_lower_i2c_clk"); 3150 3151 *i2cctl &= ~E1000_I2C_CLK_OUT; 3152 *i2cctl &= ~E1000_I2C_CLK_OE_N; 3153 E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); 3154 E1000_WRITE_FLUSH(hw); 3155 3156 /* SCL fall time (300ns) */ 3157 usec_delay(E1000_I2C_T_FALL); 3158 } 3159 3160 /** 3161 * e1000_set_i2c_data - Sets the I2C data bit 3162 * @hw: pointer to hardware structure 3163 * @i2cctl: Current value of I2CCTL register 3164 * @data: I2C data value (0 or 1) to set 3165 * 3166 * Sets the I2C data bit 3167 **/ 3168 static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) 3169 { 3170 s32 status = E1000_SUCCESS; 3171 3172 DEBUGFUNC("e1000_set_i2c_data"); 3173 3174 if (data) 3175 *i2cctl |= E1000_I2C_DATA_OUT; 3176 else 3177 *i2cctl &= ~E1000_I2C_DATA_OUT; 3178 3179 *i2cctl &= ~E1000_I2C_DATA_OE_N; 3180 *i2cctl |= E1000_I2C_CLK_OE_N; 3181 E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); 3182 E1000_WRITE_FLUSH(hw); 3183 3184 /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ 3185 usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); 3186 3187 *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3188 if (data != e1000_get_i2c_data(i2cctl)) { 3189 status = E1000_ERR_I2C; 3190 DEBUGOUT1("Error - I2C data was not set to %X.\n", data); 3191 } 3192 3193 return status; 3194 } 3195 3196 /** 3197 * e1000_get_i2c_data - Reads the I2C SDA data bit 3198 * @hw: pointer to hardware structure 3199 * @i2cctl: Current value of I2CCTL register 3200 * 3201 * Returns the I2C data bit value 3202 **/ 3203 static bool e1000_get_i2c_data(u32 *i2cctl) 3204 { 3205 bool data; 3206 3207 DEBUGFUNC("e1000_get_i2c_data"); 3208 3209 if (*i2cctl & E1000_I2C_DATA_IN) 3210 data = 1; 3211 else 3212 data = 0; 3213 3214 return data; 3215 } 3216 3217 /** 3218 * e1000_i2c_bus_clear - Clears the I2C bus 3219 * @hw: pointer to hardware structure 3220 * 3221 * Clears the I2C bus by sending nine clock pulses. 3222 * Used when data line is stuck low. 3223 **/ 3224 void e1000_i2c_bus_clear(struct e1000_hw *hw) 3225 { 3226 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3227 u32 i; 3228 3229 DEBUGFUNC("e1000_i2c_bus_clear"); 3230 3231 e1000_i2c_start(hw); 3232 3233 e1000_set_i2c_data(hw, &i2cctl, 1); 3234 3235 for (i = 0; i < 9; i++) { 3236 e1000_raise_i2c_clk(hw, &i2cctl); 3237 3238 /* Min high period of clock is 4us */ 3239 usec_delay(E1000_I2C_T_HIGH); 3240 3241 e1000_lower_i2c_clk(hw, &i2cctl); 3242 3243 /* Min low period of clock is 4.7us*/ 3244 usec_delay(E1000_I2C_T_LOW); 3245 } 3246 3247 e1000_i2c_start(hw); 3248 3249 /* Put the i2c bus back to default state */ 3250 e1000_i2c_stop(hw); 3251 } 3252 3253