1 /****************************************************************************** 2 3 Copyright (c) 2001-2009, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 /* 36 * 82575EB Gigabit Network Connection 37 * 82575EB Gigabit Backplane Connection 38 * 82575GB Gigabit Network Connection 39 * 82575GB Gigabit Network Connection 40 * 82576 Gigabit Network Connection 41 * 82576 Quad Port Gigabit Mezzanine Adapter 42 */ 43 44 #include "e1000_api.h" 45 46 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); 47 static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); 48 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); 49 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); 50 static void e1000_release_phy_82575(struct e1000_hw *hw); 51 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); 52 static void e1000_release_nvm_82575(struct e1000_hw *hw); 53 static s32 e1000_check_for_link_82575(struct e1000_hw *hw); 54 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); 55 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, 56 u16 *duplex); 57 static s32 e1000_init_hw_82575(struct e1000_hw *hw); 58 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); 59 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 60 u16 *data); 61 static s32 e1000_reset_hw_82575(struct e1000_hw *hw); 62 static s32 e1000_reset_hw_82580(struct e1000_hw *hw); 63 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, 64 u32 offset, u16 *data); 65 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, 66 u32 offset, u16 data); 67 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, 68 bool active); 69 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); 70 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); 71 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); 72 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, 73 u32 offset, u16 data); 74 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); 75 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); 76 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, 77 u16 *speed, u16 *duplex); 78 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); 79 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); 80 static bool e1000_sgmii_active_82575(struct e1000_hw *hw); 81 static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); 82 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); 83 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); 84 static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); 85 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); 86 87 static const u16 e1000_82580_rxpbs_table[] = 88 { 36, 72, 144, 1, 2, 4, 8, 16, 89 35, 70, 140 }; 90 #define E1000_82580_RXPBS_TABLE_SIZE \ 91 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) 92 93 /** 94 * e1000_init_phy_params_82575 - Init PHY func ptrs. 95 * @hw: pointer to the HW structure 96 **/ 97 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) 98 { 99 struct e1000_phy_info *phy = &hw->phy; 100 s32 ret_val = E1000_SUCCESS; 101 102 DEBUGFUNC("e1000_init_phy_params_82575"); 103 104 if (hw->phy.media_type != e1000_media_type_copper) { 105 phy->type = e1000_phy_none; 106 goto out; 107 } 108 109 phy->ops.power_up = e1000_power_up_phy_copper; 110 phy->ops.power_down = e1000_power_down_phy_copper_82575; 111 112 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 113 phy->reset_delay_us = 100; 114 115 phy->ops.acquire = e1000_acquire_phy_82575; 116 phy->ops.check_reset_block = e1000_check_reset_block_generic; 117 phy->ops.commit = e1000_phy_sw_reset_generic; 118 phy->ops.get_cfg_done = e1000_get_cfg_done_82575; 119 phy->ops.release = e1000_release_phy_82575; 120 121 if (e1000_sgmii_active_82575(hw)) { 122 phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; 123 phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; 124 phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; 125 } else if ((hw->mac.type == e1000_82580) || 126 (hw->mac.type == e1000_82580er)) { 127 phy->ops.reset = e1000_phy_hw_reset_generic; 128 phy->ops.read_reg = e1000_read_phy_reg_82580; 129 phy->ops.write_reg = e1000_write_phy_reg_82580; 130 } else { 131 phy->ops.reset = e1000_phy_hw_reset_generic; 132 phy->ops.read_reg = e1000_read_phy_reg_igp; 133 phy->ops.write_reg = e1000_write_phy_reg_igp; 134 } 135 136 /* Set phy->phy_addr and phy->id. */ 137 ret_val = e1000_get_phy_id_82575(hw); 138 139 /* Verify phy id and set remaining function pointers */ 140 switch (phy->id) { 141 case M88E1111_I_PHY_ID: 142 phy->type = e1000_phy_m88; 143 phy->ops.check_polarity = e1000_check_polarity_m88; 144 phy->ops.get_info = e1000_get_phy_info_m88; 145 phy->ops.get_cable_length = e1000_get_cable_length_m88; 146 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 147 break; 148 case IGP03E1000_E_PHY_ID: 149 case IGP04E1000_E_PHY_ID: 150 phy->type = e1000_phy_igp_3; 151 phy->ops.check_polarity = e1000_check_polarity_igp; 152 phy->ops.get_info = e1000_get_phy_info_igp; 153 phy->ops.get_cable_length = e1000_get_cable_length_igp_2; 154 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; 155 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; 156 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; 157 break; 158 case I82580_I_PHY_ID: 159 phy->type = e1000_phy_82580; 160 phy->ops.check_polarity = e1000_check_polarity_82577; 161 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577; 162 phy->ops.get_cable_length = e1000_get_cable_length_82577; 163 phy->ops.get_info = e1000_get_phy_info_82577; 164 break; 165 default: 166 ret_val = -E1000_ERR_PHY; 167 goto out; 168 } 169 170 out: 171 return ret_val; 172 } 173 174 /** 175 * e1000_init_nvm_params_82575 - Init NVM func ptrs. 176 * @hw: pointer to the HW structure 177 **/ 178 static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) 179 { 180 struct e1000_nvm_info *nvm = &hw->nvm; 181 u32 eecd = E1000_READ_REG(hw, E1000_EECD); 182 u16 size; 183 184 DEBUGFUNC("e1000_init_nvm_params_82575"); 185 186 nvm->opcode_bits = 8; 187 nvm->delay_usec = 1; 188 switch (nvm->override) { 189 case e1000_nvm_override_spi_large: 190 nvm->page_size = 32; 191 nvm->address_bits = 16; 192 break; 193 case e1000_nvm_override_spi_small: 194 nvm->page_size = 8; 195 nvm->address_bits = 8; 196 break; 197 default: 198 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 199 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; 200 break; 201 } 202 203 nvm->type = e1000_nvm_eeprom_spi; 204 205 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 206 E1000_EECD_SIZE_EX_SHIFT); 207 208 /* 209 * Added to a constant, "size" becomes the left-shift value 210 * for setting word_size. 211 */ 212 size += NVM_WORD_SIZE_BASE_SHIFT; 213 214 /* EEPROM access above 16k is unsupported */ 215 if (size > 14) 216 size = 14; 217 nvm->word_size = 1 << size; 218 219 /* Function Pointers */ 220 nvm->ops.acquire = e1000_acquire_nvm_82575; 221 nvm->ops.read = e1000_read_nvm_eerd; 222 nvm->ops.release = e1000_release_nvm_82575; 223 nvm->ops.update = e1000_update_nvm_checksum_generic; 224 nvm->ops.valid_led_default = e1000_valid_led_default_82575; 225 nvm->ops.validate = e1000_validate_nvm_checksum_generic; 226 nvm->ops.write = e1000_write_nvm_spi; 227 228 return E1000_SUCCESS; 229 } 230 231 /** 232 * e1000_init_mac_params_82575 - Init MAC func ptrs. 233 * @hw: pointer to the HW structure 234 **/ 235 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) 236 { 237 struct e1000_mac_info *mac = &hw->mac; 238 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 239 u32 ctrl_ext = 0; 240 241 DEBUGFUNC("e1000_init_mac_params_82575"); 242 243 /* Set media type */ 244 /* 245 * The 82575 uses bits 22:23 for link mode. The mode can be changed 246 * based on the EEPROM. We cannot rely upon device ID. There 247 * is no distinguishable difference between fiber and internal 248 * SerDes mode on the 82575. There can be an external PHY attached 249 * on the SGMII interface. For this, we'll set sgmii_active to TRUE. 250 */ 251 hw->phy.media_type = e1000_media_type_copper; 252 dev_spec->sgmii_active = FALSE; 253 254 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 255 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 256 case E1000_CTRL_EXT_LINK_MODE_SGMII: 257 dev_spec->sgmii_active = TRUE; 258 ctrl_ext |= E1000_CTRL_I2C_ENA; 259 break; 260 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 261 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 262 hw->phy.media_type = e1000_media_type_internal_serdes; 263 ctrl_ext |= E1000_CTRL_I2C_ENA; 264 break; 265 default: 266 ctrl_ext &= ~E1000_CTRL_I2C_ENA; 267 break; 268 } 269 270 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 271 272 /* 273 * if using i2c make certain the MDICNFG register is cleared to prevent 274 * communications from being misrouted to the mdic registers 275 */ 276 if ((ctrl_ext & E1000_CTRL_I2C_ENA) && 277 ((hw->mac.type == e1000_82580) || (hw->mac.type == e1000_82580er))) 278 E1000_WRITE_REG(hw, E1000_MDICNFG, 0); 279 280 /* Set mta register count */ 281 mac->mta_reg_count = 128; 282 /* Set uta register count */ 283 mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; 284 /* Set rar entry count */ 285 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 286 if (mac->type == e1000_82576) 287 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 288 if ((mac->type == e1000_82580) || (mac->type == e1000_82580er)) 289 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 290 /* Set if part includes ASF firmware */ 291 mac->asf_firmware_present = TRUE; 292 /* Set if manageability features are enabled. */ 293 mac->arc_subsystem_valid = 294 (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK) 295 ? TRUE : FALSE; 296 297 /* Function pointers */ 298 299 /* bus type/speed/width */ 300 mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; 301 /* reset */ 302 if ((mac->type == e1000_82580) || (mac->type == e1000_82580er)) 303 mac->ops.reset_hw = e1000_reset_hw_82580; 304 else 305 mac->ops.reset_hw = e1000_reset_hw_82575; 306 /* hw initialization */ 307 mac->ops.init_hw = e1000_init_hw_82575; 308 /* link setup */ 309 mac->ops.setup_link = e1000_setup_link_generic; 310 /* physical interface link setup */ 311 mac->ops.setup_physical_interface = 312 (hw->phy.media_type == e1000_media_type_copper) 313 ? e1000_setup_copper_link_82575 314 : e1000_setup_serdes_link_82575; 315 /* physical interface shutdown */ 316 mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; 317 /* check for link */ 318 mac->ops.check_for_link = e1000_check_for_link_82575; 319 /* receive address register setting */ 320 mac->ops.rar_set = e1000_rar_set_generic; 321 /* read mac address */ 322 mac->ops.read_mac_addr = e1000_read_mac_addr_82575; 323 /* multicast address update */ 324 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; 325 /* writing VFTA */ 326 mac->ops.write_vfta = e1000_write_vfta_generic; 327 /* clearing VFTA */ 328 mac->ops.clear_vfta = e1000_clear_vfta_generic; 329 /* setting MTA */ 330 mac->ops.mta_set = e1000_mta_set_generic; 331 /* ID LED init */ 332 mac->ops.id_led_init = e1000_id_led_init_generic; 333 /* blink LED */ 334 mac->ops.blink_led = e1000_blink_led_generic; 335 /* setup LED */ 336 mac->ops.setup_led = e1000_setup_led_generic; 337 /* cleanup LED */ 338 mac->ops.cleanup_led = e1000_cleanup_led_generic; 339 /* turn on/off LED */ 340 mac->ops.led_on = e1000_led_on_generic; 341 mac->ops.led_off = e1000_led_off_generic; 342 /* clear hardware counters */ 343 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; 344 /* link info */ 345 mac->ops.get_link_up_info = e1000_get_link_up_info_82575; 346 347 /* set lan id for port to determine which phy lock to use */ 348 hw->mac.ops.set_lan_id(hw); 349 350 return E1000_SUCCESS; 351 } 352 353 /** 354 * e1000_init_function_pointers_82575 - Init func ptrs. 355 * @hw: pointer to the HW structure 356 * 357 * Called to initialize all function pointers and parameters. 358 **/ 359 void e1000_init_function_pointers_82575(struct e1000_hw *hw) 360 { 361 DEBUGFUNC("e1000_init_function_pointers_82575"); 362 363 hw->mac.ops.init_params = e1000_init_mac_params_82575; 364 hw->nvm.ops.init_params = e1000_init_nvm_params_82575; 365 hw->phy.ops.init_params = e1000_init_phy_params_82575; 366 } 367 368 /** 369 * e1000_acquire_phy_82575 - Acquire rights to access PHY 370 * @hw: pointer to the HW structure 371 * 372 * Acquire access rights to the correct PHY. 373 **/ 374 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) 375 { 376 u16 mask = E1000_SWFW_PHY0_SM; 377 378 DEBUGFUNC("e1000_acquire_phy_82575"); 379 380 if (hw->bus.func == E1000_FUNC_1) 381 mask = E1000_SWFW_PHY1_SM; 382 else if (hw->bus.func == E1000_FUNC_2) 383 mask = E1000_SWFW_PHY2_SM; 384 else if (hw->bus.func == E1000_FUNC_3) 385 mask = E1000_SWFW_PHY3_SM; 386 387 return e1000_acquire_swfw_sync_82575(hw, mask); 388 } 389 390 /** 391 * e1000_release_phy_82575 - Release rights to access PHY 392 * @hw: pointer to the HW structure 393 * 394 * A wrapper to release access rights to the correct PHY. 395 **/ 396 static void e1000_release_phy_82575(struct e1000_hw *hw) 397 { 398 u16 mask = E1000_SWFW_PHY0_SM; 399 400 DEBUGFUNC("e1000_release_phy_82575"); 401 402 if (hw->bus.func == E1000_FUNC_1) 403 mask = E1000_SWFW_PHY1_SM; 404 else if (hw->bus.func == E1000_FUNC_2) 405 mask = E1000_SWFW_PHY2_SM; 406 else if (hw->bus.func == E1000_FUNC_3) 407 mask = E1000_SWFW_PHY3_SM; 408 409 e1000_release_swfw_sync_82575(hw, mask); 410 } 411 412 /** 413 * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii 414 * @hw: pointer to the HW structure 415 * @offset: register offset to be read 416 * @data: pointer to the read data 417 * 418 * Reads the PHY register at offset using the serial gigabit media independent 419 * interface and stores the retrieved information in data. 420 **/ 421 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 422 u16 *data) 423 { 424 s32 ret_val = -E1000_ERR_PARAM; 425 426 DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); 427 428 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 429 DEBUGOUT1("PHY Address %u is out of range\n", offset); 430 goto out; 431 } 432 433 ret_val = hw->phy.ops.acquire(hw); 434 if (ret_val) 435 goto out; 436 437 ret_val = e1000_read_phy_reg_i2c(hw, offset, data); 438 439 hw->phy.ops.release(hw); 440 441 out: 442 return ret_val; 443 } 444 445 /** 446 * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii 447 * @hw: pointer to the HW structure 448 * @offset: register offset to write to 449 * @data: data to write at register offset 450 * 451 * Writes the data to PHY register at the offset using the serial gigabit 452 * media independent interface. 453 **/ 454 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 455 u16 data) 456 { 457 s32 ret_val = -E1000_ERR_PARAM; 458 459 DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); 460 461 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 462 DEBUGOUT1("PHY Address %d is out of range\n", offset); 463 goto out; 464 } 465 466 ret_val = hw->phy.ops.acquire(hw); 467 if (ret_val) 468 goto out; 469 470 ret_val = e1000_write_phy_reg_i2c(hw, offset, data); 471 472 hw->phy.ops.release(hw); 473 474 out: 475 return ret_val; 476 } 477 478 /** 479 * e1000_get_phy_id_82575 - Retrieve PHY addr and id 480 * @hw: pointer to the HW structure 481 * 482 * Retrieves the PHY address and ID for both PHY's which do and do not use 483 * sgmi interface. 484 **/ 485 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) 486 { 487 struct e1000_phy_info *phy = &hw->phy; 488 s32 ret_val = E1000_SUCCESS; 489 u16 phy_id; 490 u32 ctrl_ext; 491 492 DEBUGFUNC("e1000_get_phy_id_82575"); 493 494 /* 495 * For SGMII PHYs, we try the list of possible addresses until 496 * we find one that works. For non-SGMII PHYs 497 * (e.g. integrated copper PHYs), an address of 1 should 498 * work. The result of this function should mean phy->phy_addr 499 * and phy->id are set correctly. 500 */ 501 if (!e1000_sgmii_active_82575(hw)) { 502 phy->addr = 1; 503 ret_val = e1000_get_phy_id(hw); 504 goto out; 505 } 506 507 /* Power on sgmii phy if it is disabled */ 508 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 509 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 510 ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 511 E1000_WRITE_FLUSH(hw); 512 msec_delay(300); 513 514 /* 515 * The address field in the I2CCMD register is 3 bits and 0 is invalid. 516 * Therefore, we need to test 1-7 517 */ 518 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 519 ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 520 if (ret_val == E1000_SUCCESS) { 521 DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", 522 phy_id, 523 phy->addr); 524 /* 525 * At the time of this writing, The M88 part is 526 * the only supported SGMII PHY product. 527 */ 528 if (phy_id == M88_VENDOR) 529 break; 530 } else { 531 DEBUGOUT1("PHY address %u was unreadable\n", 532 phy->addr); 533 } 534 } 535 536 /* A valid PHY type couldn't be found. */ 537 if (phy->addr == 8) { 538 phy->addr = 0; 539 ret_val = -E1000_ERR_PHY; 540 } else { 541 ret_val = e1000_get_phy_id(hw); 542 } 543 544 /* restore previous sfp cage power state */ 545 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 546 547 out: 548 return ret_val; 549 } 550 551 /** 552 * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset 553 * @hw: pointer to the HW structure 554 * 555 * Resets the PHY using the serial gigabit media independent interface. 556 **/ 557 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) 558 { 559 s32 ret_val = E1000_SUCCESS; 560 561 DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); 562 563 /* 564 * This isn't a TRUE "hard" reset, but is the only reset 565 * available to us at this time. 566 */ 567 568 DEBUGOUT("Soft resetting SGMII attached PHY...\n"); 569 570 if (!(hw->phy.ops.write_reg)) 571 goto out; 572 573 /* 574 * SFP documentation requires the following to configure the SPF module 575 * to work on SGMII. No further documentation is given. 576 */ 577 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 578 if (ret_val) 579 goto out; 580 581 ret_val = hw->phy.ops.commit(hw); 582 583 out: 584 return ret_val; 585 } 586 587 /** 588 * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state 589 * @hw: pointer to the HW structure 590 * @active: TRUE to enable LPLU, FALSE to disable 591 * 592 * Sets the LPLU D0 state according to the active flag. When 593 * activating LPLU this function also disables smart speed 594 * and vice versa. LPLU will not be activated unless the 595 * device autonegotiation advertisement meets standards of 596 * either 10 or 10/100 or 10/100/1000 at all duplexes. 597 * This is a function pointer entry point only called by 598 * PHY setup routines. 599 **/ 600 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) 601 { 602 struct e1000_phy_info *phy = &hw->phy; 603 s32 ret_val = E1000_SUCCESS; 604 u16 data; 605 606 DEBUGFUNC("e1000_set_d0_lplu_state_82575"); 607 608 if (!(hw->phy.ops.read_reg)) 609 goto out; 610 611 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 612 if (ret_val) 613 goto out; 614 615 if (active) { 616 data |= IGP02E1000_PM_D0_LPLU; 617 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 618 data); 619 if (ret_val) 620 goto out; 621 622 /* When LPLU is enabled, we should disable SmartSpeed */ 623 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 624 &data); 625 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 626 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 627 data); 628 if (ret_val) 629 goto out; 630 } else { 631 data &= ~IGP02E1000_PM_D0_LPLU; 632 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 633 data); 634 /* 635 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 636 * during Dx states where the power conservation is most 637 * important. During driver activity we should enable 638 * SmartSpeed, so performance is maintained. 639 */ 640 if (phy->smart_speed == e1000_smart_speed_on) { 641 ret_val = phy->ops.read_reg(hw, 642 IGP01E1000_PHY_PORT_CONFIG, 643 &data); 644 if (ret_val) 645 goto out; 646 647 data |= IGP01E1000_PSCFR_SMART_SPEED; 648 ret_val = phy->ops.write_reg(hw, 649 IGP01E1000_PHY_PORT_CONFIG, 650 data); 651 if (ret_val) 652 goto out; 653 } else if (phy->smart_speed == e1000_smart_speed_off) { 654 ret_val = phy->ops.read_reg(hw, 655 IGP01E1000_PHY_PORT_CONFIG, 656 &data); 657 if (ret_val) 658 goto out; 659 660 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 661 ret_val = phy->ops.write_reg(hw, 662 IGP01E1000_PHY_PORT_CONFIG, 663 data); 664 if (ret_val) 665 goto out; 666 } 667 } 668 669 out: 670 return ret_val; 671 } 672 673 /** 674 * e1000_acquire_nvm_82575 - Request for access to EEPROM 675 * @hw: pointer to the HW structure 676 * 677 * Acquire the necessary semaphores for exclusive access to the EEPROM. 678 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 679 * Return successful if access grant bit set, else clear the request for 680 * EEPROM access and return -E1000_ERR_NVM (-1). 681 **/ 682 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) 683 { 684 s32 ret_val; 685 686 DEBUGFUNC("e1000_acquire_nvm_82575"); 687 688 ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 689 if (ret_val) 690 goto out; 691 692 ret_val = e1000_acquire_nvm_generic(hw); 693 694 if (ret_val) 695 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 696 697 out: 698 return ret_val; 699 } 700 701 /** 702 * e1000_release_nvm_82575 - Release exclusive access to EEPROM 703 * @hw: pointer to the HW structure 704 * 705 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 706 * then release the semaphores acquired. 707 **/ 708 static void e1000_release_nvm_82575(struct e1000_hw *hw) 709 { 710 DEBUGFUNC("e1000_release_nvm_82575"); 711 712 e1000_release_nvm_generic(hw); 713 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 714 } 715 716 /** 717 * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore 718 * @hw: pointer to the HW structure 719 * @mask: specifies which semaphore to acquire 720 * 721 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 722 * will also specify which port we're acquiring the lock for. 723 **/ 724 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 725 { 726 u32 swfw_sync; 727 u32 swmask = mask; 728 u32 fwmask = mask << 16; 729 s32 ret_val = E1000_SUCCESS; 730 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 731 732 DEBUGFUNC("e1000_acquire_swfw_sync_82575"); 733 734 while (i < timeout) { 735 if (e1000_get_hw_semaphore_generic(hw)) { 736 ret_val = -E1000_ERR_SWFW_SYNC; 737 goto out; 738 } 739 740 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); 741 if (!(swfw_sync & (fwmask | swmask))) 742 break; 743 744 /* 745 * Firmware currently using resource (fwmask) 746 * or other software thread using resource (swmask) 747 */ 748 e1000_put_hw_semaphore_generic(hw); 749 msec_delay_irq(5); 750 i++; 751 } 752 753 if (i == timeout) { 754 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); 755 ret_val = -E1000_ERR_SWFW_SYNC; 756 goto out; 757 } 758 759 swfw_sync |= swmask; 760 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); 761 762 e1000_put_hw_semaphore_generic(hw); 763 764 out: 765 return ret_val; 766 } 767 768 /** 769 * e1000_release_swfw_sync_82575 - Release SW/FW semaphore 770 * @hw: pointer to the HW structure 771 * @mask: specifies which semaphore to acquire 772 * 773 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 774 * will also specify which port we're releasing the lock for. 775 **/ 776 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 777 { 778 u32 swfw_sync; 779 780 DEBUGFUNC("e1000_release_swfw_sync_82575"); 781 782 while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS); 783 /* Empty */ 784 785 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); 786 swfw_sync &= ~mask; 787 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); 788 789 e1000_put_hw_semaphore_generic(hw); 790 } 791 792 /** 793 * e1000_get_cfg_done_82575 - Read config done bit 794 * @hw: pointer to the HW structure 795 * 796 * Read the management control register for the config done bit for 797 * completion status. NOTE: silicon which is EEPROM-less will fail trying 798 * to read the config done bit, so an error is *ONLY* logged and returns 799 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon 800 * would not be able to be reset or change link. 801 **/ 802 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) 803 { 804 s32 timeout = PHY_CFG_TIMEOUT; 805 s32 ret_val = E1000_SUCCESS; 806 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 807 808 DEBUGFUNC("e1000_get_cfg_done_82575"); 809 810 if (hw->bus.func == E1000_FUNC_1) 811 mask = E1000_NVM_CFG_DONE_PORT_1; 812 else if (hw->bus.func == E1000_FUNC_2) 813 mask = E1000_NVM_CFG_DONE_PORT_2; 814 else if (hw->bus.func == E1000_FUNC_3) 815 mask = E1000_NVM_CFG_DONE_PORT_3; 816 while (timeout) { 817 if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) 818 break; 819 msec_delay(1); 820 timeout--; 821 } 822 if (!timeout) 823 DEBUGOUT("MNG configuration cycle has not completed.\n"); 824 825 /* If EEPROM is not marked present, init the PHY manually */ 826 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) && 827 (hw->phy.type == e1000_phy_igp_3)) 828 e1000_phy_init_script_igp3(hw); 829 830 return ret_val; 831 } 832 833 /** 834 * e1000_get_link_up_info_82575 - Get link speed/duplex info 835 * @hw: pointer to the HW structure 836 * @speed: stores the current speed 837 * @duplex: stores the current duplex 838 * 839 * This is a wrapper function, if using the serial gigabit media independent 840 * interface, use PCS to retrieve the link speed and duplex information. 841 * Otherwise, use the generic function to get the link speed and duplex info. 842 **/ 843 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, 844 u16 *duplex) 845 { 846 s32 ret_val; 847 848 DEBUGFUNC("e1000_get_link_up_info_82575"); 849 850 if (hw->phy.media_type != e1000_media_type_copper) 851 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, 852 duplex); 853 else 854 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, 855 duplex); 856 857 return ret_val; 858 } 859 860 /** 861 * e1000_check_for_link_82575 - Check for link 862 * @hw: pointer to the HW structure 863 * 864 * If sgmii is enabled, then use the pcs register to determine link, otherwise 865 * use the generic interface for determining link. 866 **/ 867 static s32 e1000_check_for_link_82575(struct e1000_hw *hw) 868 { 869 s32 ret_val; 870 u16 speed, duplex; 871 872 DEBUGFUNC("e1000_check_for_link_82575"); 873 874 if (hw->phy.media_type != e1000_media_type_copper) { 875 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, 876 &duplex); 877 /* 878 * Use this flag to determine if link needs to be checked or 879 * not. If we have link clear the flag so that we do not 880 * continue to check for link. 881 */ 882 hw->mac.get_link_status = !hw->mac.serdes_has_link; 883 } else { 884 ret_val = e1000_check_for_copper_link_generic(hw); 885 } 886 887 return ret_val; 888 } 889 890 /** 891 * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 892 * @hw: pointer to the HW structure 893 * @speed: stores the current speed 894 * @duplex: stores the current duplex 895 * 896 * Using the physical coding sub-layer (PCS), retrieve the current speed and 897 * duplex, then store the values in the pointers provided. 898 **/ 899 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, 900 u16 *speed, u16 *duplex) 901 { 902 struct e1000_mac_info *mac = &hw->mac; 903 u32 pcs; 904 905 DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); 906 907 /* Set up defaults for the return values of this function */ 908 mac->serdes_has_link = FALSE; 909 *speed = 0; 910 *duplex = 0; 911 912 /* 913 * Read the PCS Status register for link state. For non-copper mode, 914 * the status register is not accurate. The PCS status register is 915 * used instead. 916 */ 917 pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); 918 919 /* 920 * The link up bit determines when link is up on autoneg. The sync ok 921 * gets set once both sides sync up and agree upon link. Stable link 922 * can be determined by checking for both link up and link sync ok 923 */ 924 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { 925 mac->serdes_has_link = TRUE; 926 927 /* Detect and store PCS speed */ 928 if (pcs & E1000_PCS_LSTS_SPEED_1000) { 929 *speed = SPEED_1000; 930 } else if (pcs & E1000_PCS_LSTS_SPEED_100) { 931 *speed = SPEED_100; 932 } else { 933 *speed = SPEED_10; 934 } 935 936 /* Detect and store PCS duplex */ 937 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { 938 *duplex = FULL_DUPLEX; 939 } else { 940 *duplex = HALF_DUPLEX; 941 } 942 } 943 944 return E1000_SUCCESS; 945 } 946 947 /** 948 * e1000_shutdown_serdes_link_82575 - Remove link during power down 949 * @hw: pointer to the HW structure 950 * 951 * In the case of serdes shut down sfp and PCS on driver unload 952 * when management pass thru is not enabled. 953 **/ 954 void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) 955 { 956 u32 reg; 957 u16 eeprom_data = 0; 958 959 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 960 !e1000_sgmii_active_82575(hw)) 961 return; 962 963 if (hw->bus.func == E1000_FUNC_0) 964 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 965 else if ((hw->mac.type == e1000_82580) || 966 (hw->mac.type == e1000_82580er)) 967 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 968 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 969 &eeprom_data); 970 else if (hw->bus.func == E1000_FUNC_1) 971 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 972 973 /* 974 * If APM is not enabled in the EEPROM and management interface is 975 * not enabled, then power down. 976 */ 977 if (!(eeprom_data & E1000_NVM_APME_82575) && 978 !e1000_enable_mng_pass_thru(hw)) { 979 /* Disable PCS to turn off link */ 980 reg = E1000_READ_REG(hw, E1000_PCS_CFG0); 981 reg &= ~E1000_PCS_CFG_PCS_EN; 982 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); 983 984 /* shutdown the laser */ 985 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 986 reg |= E1000_CTRL_EXT_SDP3_DATA; 987 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 988 989 /* flush the write to verify completion */ 990 E1000_WRITE_FLUSH(hw); 991 msec_delay(1); 992 } 993 994 return; 995 } 996 997 /** 998 * e1000_reset_hw_82575 - Reset hardware 999 * @hw: pointer to the HW structure 1000 * 1001 * This resets the hardware into a known state. 1002 **/ 1003 static s32 e1000_reset_hw_82575(struct e1000_hw *hw) 1004 { 1005 u32 ctrl, icr; 1006 s32 ret_val; 1007 1008 DEBUGFUNC("e1000_reset_hw_82575"); 1009 1010 /* 1011 * Prevent the PCI-E bus from sticking if there is no TLP connection 1012 * on the last TLP read/write transaction when MAC is reset. 1013 */ 1014 ret_val = e1000_disable_pcie_master_generic(hw); 1015 if (ret_val) { 1016 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 1017 } 1018 1019 /* set the completion timeout for interface */ 1020 ret_val = e1000_set_pcie_completion_timeout(hw); 1021 if (ret_val) { 1022 DEBUGOUT("PCI-E Set completion timeout has failed.\n"); 1023 } 1024 1025 DEBUGOUT("Masking off all interrupts\n"); 1026 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 1027 1028 E1000_WRITE_REG(hw, E1000_RCTL, 0); 1029 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); 1030 E1000_WRITE_FLUSH(hw); 1031 1032 msec_delay(10); 1033 1034 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1035 1036 DEBUGOUT("Issuing a global reset to MAC\n"); 1037 E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); 1038 1039 ret_val = e1000_get_auto_rd_done_generic(hw); 1040 if (ret_val) { 1041 /* 1042 * When auto config read does not complete, do not 1043 * return with an error. This can happen in situations 1044 * where there is no eeprom and prevents getting link. 1045 */ 1046 DEBUGOUT("Auto Read Done did not complete\n"); 1047 } 1048 1049 /* If EEPROM is not present, run manual init scripts */ 1050 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) 1051 e1000_reset_init_script_82575(hw); 1052 1053 /* Clear any pending interrupt events. */ 1054 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 1055 icr = E1000_READ_REG(hw, E1000_ICR); 1056 1057 /* Install any alternate MAC address into RAR0 */ 1058 ret_val = e1000_check_alt_mac_addr_generic(hw); 1059 1060 return ret_val; 1061 } 1062 1063 /** 1064 * e1000_init_hw_82575 - Initialize hardware 1065 * @hw: pointer to the HW structure 1066 * 1067 * This inits the hardware readying it for operation. 1068 **/ 1069 static s32 e1000_init_hw_82575(struct e1000_hw *hw) 1070 { 1071 struct e1000_mac_info *mac = &hw->mac; 1072 s32 ret_val; 1073 u16 i, rar_count = mac->rar_entry_count; 1074 1075 DEBUGFUNC("e1000_init_hw_82575"); 1076 1077 /* Initialize identification LED */ 1078 ret_val = mac->ops.id_led_init(hw); 1079 if (ret_val) { 1080 DEBUGOUT("Error initializing identification LED\n"); 1081 /* This is not fatal and we should not stop init due to this */ 1082 } 1083 1084 /* Disabling VLAN filtering */ 1085 DEBUGOUT("Initializing the IEEE VLAN\n"); 1086 mac->ops.clear_vfta(hw); 1087 1088 /* Setup the receive address */ 1089 e1000_init_rx_addrs_generic(hw, rar_count); 1090 1091 /* Zero out the Multicast HASH table */ 1092 DEBUGOUT("Zeroing the MTA\n"); 1093 for (i = 0; i < mac->mta_reg_count; i++) 1094 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 1095 1096 /* Zero out the Unicast HASH table */ 1097 DEBUGOUT("Zeroing the UTA\n"); 1098 for (i = 0; i < mac->uta_reg_count; i++) 1099 E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); 1100 1101 /* Setup link and flow control */ 1102 ret_val = mac->ops.setup_link(hw); 1103 1104 /* 1105 * Clear all of the statistics registers (clear on read). It is 1106 * important that we do this after we have tried to establish link 1107 * because the symbol error count will increment wildly if there 1108 * is no link. 1109 */ 1110 e1000_clear_hw_cntrs_82575(hw); 1111 1112 return ret_val; 1113 } 1114 1115 /** 1116 * e1000_setup_copper_link_82575 - Configure copper link settings 1117 * @hw: pointer to the HW structure 1118 * 1119 * Configures the link for auto-neg or forced speed and duplex. Then we check 1120 * for link, once link is established calls to configure collision distance 1121 * and flow control are called. 1122 **/ 1123 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) 1124 { 1125 u32 ctrl; 1126 s32 ret_val; 1127 1128 DEBUGFUNC("e1000_setup_copper_link_82575"); 1129 1130 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1131 ctrl |= E1000_CTRL_SLU; 1132 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1133 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 1134 1135 ret_val = e1000_setup_serdes_link_82575(hw); 1136 if (ret_val) 1137 goto out; 1138 1139 if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) { 1140 /* allow time for SFP cage time to power up phy */ 1141 msec_delay(300); 1142 1143 ret_val = hw->phy.ops.reset(hw); 1144 if (ret_val) { 1145 DEBUGOUT("Error resetting the PHY.\n"); 1146 goto out; 1147 } 1148 } 1149 switch (hw->phy.type) { 1150 case e1000_phy_m88: 1151 ret_val = e1000_copper_link_setup_m88(hw); 1152 break; 1153 case e1000_phy_igp_3: 1154 ret_val = e1000_copper_link_setup_igp(hw); 1155 break; 1156 case e1000_phy_82580: 1157 ret_val = e1000_copper_link_setup_82577(hw); 1158 break; 1159 default: 1160 ret_val = -E1000_ERR_PHY; 1161 break; 1162 } 1163 1164 if (ret_val) 1165 goto out; 1166 1167 ret_val = e1000_setup_copper_link_generic(hw); 1168 out: 1169 return ret_val; 1170 } 1171 1172 /** 1173 * e1000_setup_serdes_link_82575 - Setup link for serdes 1174 * @hw: pointer to the HW structure 1175 * 1176 * Configure the physical coding sub-layer (PCS) link. The PCS link is 1177 * used on copper connections where the serialized gigabit media independent 1178 * interface (sgmii), or serdes fiber is being used. Configures the link 1179 * for auto-negotiation or forces speed/duplex. 1180 **/ 1181 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) 1182 { 1183 u32 ctrl_ext, ctrl_reg, reg; 1184 bool pcs_autoneg; 1185 1186 DEBUGFUNC("e1000_setup_serdes_link_82575"); 1187 1188 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1189 !e1000_sgmii_active_82575(hw)) 1190 return E1000_SUCCESS; 1191 1192 /* 1193 * On the 82575, SerDes loopback mode persists until it is 1194 * explicitly turned off or a power cycle is performed. A read to 1195 * the register does not indicate its status. Therefore, we ensure 1196 * loopback mode is disabled during initialization. 1197 */ 1198 E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1199 1200 /* power on the sfp cage if present */ 1201 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1202 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1203 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1204 1205 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); 1206 ctrl_reg |= E1000_CTRL_SLU; 1207 1208 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { 1209 /* set both sw defined pins */ 1210 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; 1211 1212 /* Set switch control to serdes energy detect */ 1213 reg = E1000_READ_REG(hw, E1000_CONNSW); 1214 reg |= E1000_CONNSW_ENRGSRC; 1215 E1000_WRITE_REG(hw, E1000_CONNSW, reg); 1216 } 1217 1218 reg = E1000_READ_REG(hw, E1000_PCS_LCTL); 1219 1220 /* default pcs_autoneg to the same setting as mac autoneg */ 1221 pcs_autoneg = hw->mac.autoneg; 1222 1223 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 1224 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1225 /* sgmii mode lets the phy handle forcing speed/duplex */ 1226 pcs_autoneg = TRUE; 1227 /* autoneg time out should be disabled for SGMII mode */ 1228 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1229 break; 1230 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1231 /* disable PCS autoneg and support parallel detect only */ 1232 pcs_autoneg = FALSE; 1233 default: 1234 /* 1235 * non-SGMII modes only supports a speed of 1000/Full for the 1236 * link so it is best to just force the MAC and let the pcs 1237 * link either autoneg or be forced to 1000/Full 1238 */ 1239 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1240 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1241 1242 /* set speed of 1000/Full if speed/duplex is forced */ 1243 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1244 break; 1245 } 1246 1247 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); 1248 1249 /* 1250 * New SerDes mode allows for forcing speed or autonegotiating speed 1251 * at 1gb. Autoneg should be default set by most drivers. This is the 1252 * mode that will be compatible with older link partners and switches. 1253 * However, both are supported by the hardware and some drivers/tools. 1254 */ 1255 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1256 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1257 1258 /* 1259 * We force flow control to prevent the CTRL register values from being 1260 * overwritten by the autonegotiated flow control values 1261 */ 1262 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1263 1264 if (pcs_autoneg) { 1265 /* Set PCS register for autoneg */ 1266 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1267 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1268 DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1269 } else { 1270 /* Set PCS register for forced link */ 1271 reg |= E1000_PCS_LCTL_FSD | /* Force Speed */ 1272 E1000_PCS_LCTL_FORCE_LINK | /* Force Link */ 1273 E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */ 1274 1275 DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1276 } 1277 1278 E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); 1279 1280 if (!e1000_sgmii_active_82575(hw)) 1281 e1000_force_mac_fc_generic(hw); 1282 1283 return E1000_SUCCESS; 1284 } 1285 1286 /** 1287 * e1000_valid_led_default_82575 - Verify a valid default LED config 1288 * @hw: pointer to the HW structure 1289 * @data: pointer to the NVM (EEPROM) 1290 * 1291 * Read the EEPROM for the current default LED configuration. If the 1292 * LED configuration is not valid, set to a valid LED configuration. 1293 **/ 1294 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) 1295 { 1296 s32 ret_val; 1297 1298 DEBUGFUNC("e1000_valid_led_default_82575"); 1299 1300 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 1301 if (ret_val) { 1302 DEBUGOUT("NVM Read Error\n"); 1303 goto out; 1304 } 1305 1306 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 1307 switch(hw->phy.media_type) { 1308 case e1000_media_type_internal_serdes: 1309 *data = ID_LED_DEFAULT_82575_SERDES; 1310 break; 1311 case e1000_media_type_copper: 1312 default: 1313 *data = ID_LED_DEFAULT; 1314 break; 1315 } 1316 } 1317 out: 1318 return ret_val; 1319 } 1320 1321 /** 1322 * e1000_sgmii_active_82575 - Return sgmii state 1323 * @hw: pointer to the HW structure 1324 * 1325 * 82575 silicon has a serialized gigabit media independent interface (sgmii) 1326 * which can be enabled for use in the embedded applications. Simply 1327 * return the current state of the sgmii interface. 1328 **/ 1329 static bool e1000_sgmii_active_82575(struct e1000_hw *hw) 1330 { 1331 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1332 return dev_spec->sgmii_active; 1333 } 1334 1335 /** 1336 * e1000_reset_init_script_82575 - Inits HW defaults after reset 1337 * @hw: pointer to the HW structure 1338 * 1339 * Inits recommended HW defaults after a reset when there is no EEPROM 1340 * detected. This is only for the 82575. 1341 **/ 1342 static s32 e1000_reset_init_script_82575(struct e1000_hw* hw) 1343 { 1344 DEBUGFUNC("e1000_reset_init_script_82575"); 1345 1346 if (hw->mac.type == e1000_82575) { 1347 DEBUGOUT("Running reset init script for 82575\n"); 1348 /* SerDes configuration via SERDESCTRL */ 1349 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); 1350 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); 1351 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); 1352 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); 1353 1354 /* CCM configuration via CCMCTL register */ 1355 e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); 1356 e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); 1357 1358 /* PCIe lanes configuration */ 1359 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); 1360 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); 1361 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); 1362 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); 1363 1364 /* PCIe PLL Configuration */ 1365 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); 1366 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); 1367 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); 1368 } 1369 1370 return E1000_SUCCESS; 1371 } 1372 1373 /** 1374 * e1000_read_mac_addr_82575 - Read device MAC address 1375 * @hw: pointer to the HW structure 1376 **/ 1377 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) 1378 { 1379 s32 ret_val = E1000_SUCCESS; 1380 1381 DEBUGFUNC("e1000_read_mac_addr_82575"); 1382 1383 /* 1384 * If there's an alternate MAC address place it in RAR0 1385 * so that it will override the Si installed default perm 1386 * address. 1387 */ 1388 ret_val = e1000_check_alt_mac_addr_generic(hw); 1389 if (ret_val) 1390 goto out; 1391 1392 ret_val = e1000_read_mac_addr_generic(hw); 1393 1394 out: 1395 return ret_val; 1396 } 1397 1398 /** 1399 * e1000_power_down_phy_copper_82575 - Remove link during PHY power down 1400 * @hw: pointer to the HW structure 1401 * 1402 * In the case of a PHY power down to save power, or to turn off link during a 1403 * driver unload, or wake on lan is not enabled, remove the link. 1404 **/ 1405 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) 1406 { 1407 struct e1000_phy_info *phy = &hw->phy; 1408 struct e1000_mac_info *mac = &hw->mac; 1409 1410 if (!(phy->ops.check_reset_block)) 1411 return; 1412 1413 /* If the management interface is not enabled, then power down */ 1414 if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) 1415 e1000_power_down_phy_copper(hw); 1416 1417 return; 1418 } 1419 1420 /** 1421 * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters 1422 * @hw: pointer to the HW structure 1423 * 1424 * Clears the hardware counters by reading the counter registers. 1425 **/ 1426 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) 1427 { 1428 DEBUGFUNC("e1000_clear_hw_cntrs_82575"); 1429 1430 e1000_clear_hw_cntrs_base_generic(hw); 1431 1432 E1000_READ_REG(hw, E1000_PRC64); 1433 E1000_READ_REG(hw, E1000_PRC127); 1434 E1000_READ_REG(hw, E1000_PRC255); 1435 E1000_READ_REG(hw, E1000_PRC511); 1436 E1000_READ_REG(hw, E1000_PRC1023); 1437 E1000_READ_REG(hw, E1000_PRC1522); 1438 E1000_READ_REG(hw, E1000_PTC64); 1439 E1000_READ_REG(hw, E1000_PTC127); 1440 E1000_READ_REG(hw, E1000_PTC255); 1441 E1000_READ_REG(hw, E1000_PTC511); 1442 E1000_READ_REG(hw, E1000_PTC1023); 1443 E1000_READ_REG(hw, E1000_PTC1522); 1444 1445 E1000_READ_REG(hw, E1000_ALGNERRC); 1446 E1000_READ_REG(hw, E1000_RXERRC); 1447 E1000_READ_REG(hw, E1000_TNCRS); 1448 E1000_READ_REG(hw, E1000_CEXTERR); 1449 E1000_READ_REG(hw, E1000_TSCTC); 1450 E1000_READ_REG(hw, E1000_TSCTFC); 1451 1452 E1000_READ_REG(hw, E1000_MGTPRC); 1453 E1000_READ_REG(hw, E1000_MGTPDC); 1454 E1000_READ_REG(hw, E1000_MGTPTC); 1455 1456 E1000_READ_REG(hw, E1000_IAC); 1457 E1000_READ_REG(hw, E1000_ICRXOC); 1458 1459 E1000_READ_REG(hw, E1000_ICRXPTC); 1460 E1000_READ_REG(hw, E1000_ICRXATC); 1461 E1000_READ_REG(hw, E1000_ICTXPTC); 1462 E1000_READ_REG(hw, E1000_ICTXATC); 1463 E1000_READ_REG(hw, E1000_ICTXQEC); 1464 E1000_READ_REG(hw, E1000_ICTXQMTC); 1465 E1000_READ_REG(hw, E1000_ICRXDMTC); 1466 1467 E1000_READ_REG(hw, E1000_CBTMPC); 1468 E1000_READ_REG(hw, E1000_HTDPMC); 1469 E1000_READ_REG(hw, E1000_CBRMPC); 1470 E1000_READ_REG(hw, E1000_RPTHC); 1471 E1000_READ_REG(hw, E1000_HGPTC); 1472 E1000_READ_REG(hw, E1000_HTCBDPC); 1473 E1000_READ_REG(hw, E1000_HGORCL); 1474 E1000_READ_REG(hw, E1000_HGORCH); 1475 E1000_READ_REG(hw, E1000_HGOTCL); 1476 E1000_READ_REG(hw, E1000_HGOTCH); 1477 E1000_READ_REG(hw, E1000_LENERRS); 1478 1479 /* This register should not be read in copper configurations */ 1480 if ((hw->phy.media_type == e1000_media_type_internal_serdes) || 1481 e1000_sgmii_active_82575(hw)) 1482 E1000_READ_REG(hw, E1000_SCVPC); 1483 } 1484 1485 /** 1486 * e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable 1487 * @hw: pointer to the HW structure 1488 * 1489 * After rx enable if managability is enabled then there is likely some 1490 * bad data at the start of the fifo and possibly in the DMA fifo. This 1491 * function clears the fifos and flushes any packets that came in as rx was 1492 * being enabled. 1493 **/ 1494 void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) 1495 { 1496 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 1497 int i, ms_wait; 1498 1499 DEBUGFUNC("e1000_rx_fifo_workaround_82575"); 1500 if (hw->mac.type != e1000_82575 || 1501 !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) 1502 return; 1503 1504 /* Disable all RX queues */ 1505 for (i = 0; i < 4; i++) { 1506 rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); 1507 E1000_WRITE_REG(hw, E1000_RXDCTL(i), 1508 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); 1509 } 1510 /* Poll all queues to verify they have shut down */ 1511 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1512 msec_delay(1); 1513 rx_enabled = 0; 1514 for (i = 0; i < 4; i++) 1515 rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); 1516 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) 1517 break; 1518 } 1519 1520 if (ms_wait == 10) 1521 DEBUGOUT("Queue disable timed out after 10ms\n"); 1522 1523 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 1524 * incoming packets are rejected. Set enable and wait 2ms so that 1525 * any packet that was coming in as RCTL.EN was set is flushed 1526 */ 1527 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 1528 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 1529 1530 rlpml = E1000_READ_REG(hw, E1000_RLPML); 1531 E1000_WRITE_REG(hw, E1000_RLPML, 0); 1532 1533 rctl = E1000_READ_REG(hw, E1000_RCTL); 1534 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); 1535 temp_rctl |= E1000_RCTL_LPE; 1536 1537 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); 1538 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1539 E1000_WRITE_FLUSH(hw); 1540 msec_delay(2); 1541 1542 /* Enable RX queues that were previously enabled and restore our 1543 * previous state 1544 */ 1545 for (i = 0; i < 4; i++) 1546 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); 1547 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1548 E1000_WRITE_FLUSH(hw); 1549 1550 E1000_WRITE_REG(hw, E1000_RLPML, rlpml); 1551 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); 1552 1553 /* Flush receive errors generated by workaround */ 1554 E1000_READ_REG(hw, E1000_ROC); 1555 E1000_READ_REG(hw, E1000_RNBC); 1556 E1000_READ_REG(hw, E1000_MPC); 1557 } 1558 1559 /** 1560 * e1000_set_pcie_completion_timeout - set pci-e completion timeout 1561 * @hw: pointer to the HW structure 1562 * 1563 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, 1564 * however the hardware default for these parts is 500us to 1ms which is less 1565 * than the 10ms recommended by the pci-e spec. To address this we need to 1566 * increase the value to either 10ms to 200ms for capability version 1 config, 1567 * or 16ms to 55ms for version 2. 1568 **/ 1569 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) 1570 { 1571 u32 gcr = E1000_READ_REG(hw, E1000_GCR); 1572 s32 ret_val = E1000_SUCCESS; 1573 u16 pcie_devctl2; 1574 1575 /* only take action if timeout value is defaulted to 0 */ 1576 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 1577 goto out; 1578 1579 /* 1580 * if capababilities version is type 1 we can write the 1581 * timeout of 10ms to 200ms through the GCR register 1582 */ 1583 if (!(gcr & E1000_GCR_CAP_VER2)) { 1584 gcr |= E1000_GCR_CMPL_TMOUT_10ms; 1585 goto out; 1586 } 1587 1588 /* 1589 * for version 2 capabilities we need to write the config space 1590 * directly in order to set the completion timeout value for 1591 * 16ms to 55ms 1592 */ 1593 ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1594 &pcie_devctl2); 1595 if (ret_val) 1596 goto out; 1597 1598 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 1599 1600 ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1601 &pcie_devctl2); 1602 out: 1603 /* disable completion timeout resend */ 1604 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 1605 1606 E1000_WRITE_REG(hw, E1000_GCR, gcr); 1607 return ret_val; 1608 } 1609 1610 /** 1611 * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback 1612 * @hw: pointer to the hardware struct 1613 * @enable: state to enter, either enabled or disabled 1614 * 1615 * enables/disables L2 switch loopback functionality. 1616 **/ 1617 void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 1618 { 1619 u32 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); 1620 1621 if (enable) 1622 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1623 else 1624 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1625 1626 E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); 1627 } 1628 1629 /** 1630 * e1000_vmdq_set_replication_pf - enable or disable vmdq replication 1631 * @hw: pointer to the hardware struct 1632 * @enable: state to enter, either enabled or disabled 1633 * 1634 * enables/disables replication of packets across multiple pools. 1635 **/ 1636 void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 1637 { 1638 u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); 1639 1640 if (enable) 1641 vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 1642 else 1643 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 1644 1645 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); 1646 } 1647 1648 /** 1649 * e1000_read_phy_reg_82580 - Read 82580 MDI control register 1650 * @hw: pointer to the HW structure 1651 * @offset: register offset to be read 1652 * @data: pointer to the read data 1653 * 1654 * Reads the MDI control register in the PHY at offset and stores the 1655 * information read to data. 1656 **/ 1657 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 1658 { 1659 u32 mdicnfg = 0; 1660 s32 ret_val; 1661 1662 DEBUGFUNC("e1000_read_phy_reg_82580"); 1663 1664 ret_val = hw->phy.ops.acquire(hw); 1665 if (ret_val) 1666 goto out; 1667 1668 /* 1669 * We config the phy address in MDICNFG register now. Same bits 1670 * as before. The values in MDIC can be written but will be 1671 * ignored. This allows us to call the old function after 1672 * configuring the PHY address in the new register 1673 */ 1674 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT); 1675 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); 1676 1677 ret_val = e1000_read_phy_reg_mdic(hw, offset, data); 1678 1679 hw->phy.ops.release(hw); 1680 1681 out: 1682 return ret_val; 1683 } 1684 1685 /** 1686 * e1000_write_phy_reg_82580 - Write 82580 MDI control register 1687 * @hw: pointer to the HW structure 1688 * @offset: register offset to write to 1689 * @data: data to write to register at offset 1690 * 1691 * Writes data to MDI control register in the PHY at offset. 1692 **/ 1693 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 1694 { 1695 u32 mdicnfg = 0; 1696 s32 ret_val; 1697 1698 DEBUGFUNC("e1000_write_phy_reg_82580"); 1699 1700 ret_val = hw->phy.ops.acquire(hw); 1701 if (ret_val) 1702 goto out; 1703 1704 /* 1705 * We config the phy address in MDICNFG register now. Same bits 1706 * as before. The values in MDIC can be written but will be 1707 * ignored. This allows us to call the old function after 1708 * configuring the PHY address in the new register 1709 */ 1710 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT); 1711 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); 1712 1713 ret_val = e1000_write_phy_reg_mdic(hw, offset, data); 1714 1715 hw->phy.ops.release(hw); 1716 1717 out: 1718 return ret_val; 1719 } 1720 /** 1721 * e1000_reset_hw_82580 - Reset hardware 1722 * @hw: pointer to the HW structure 1723 * 1724 * This resets function or entire device (all ports, etc.) 1725 * to a known state. 1726 **/ 1727 static s32 e1000_reset_hw_82580(struct e1000_hw *hw) 1728 { 1729 s32 ret_val = E1000_SUCCESS; 1730 /* BH SW mailbox bit in SW_FW_SYNC */ 1731 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 1732 u32 ctrl, icr; 1733 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 1734 1735 DEBUGFUNC("e1000_reset_hw_82580"); 1736 1737 hw->dev_spec._82575.global_device_reset = FALSE; 1738 1739 /* Get current control state. */ 1740 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1741 1742 /* 1743 * Prevent the PCI-E bus from sticking if there is no TLP connection 1744 * on the last TLP read/write transaction when MAC is reset. 1745 */ 1746 ret_val = e1000_disable_pcie_master_generic(hw); 1747 if (ret_val) 1748 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 1749 1750 DEBUGOUT("Masking off all interrupts\n"); 1751 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 1752 E1000_WRITE_REG(hw, E1000_RCTL, 0); 1753 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); 1754 E1000_WRITE_FLUSH(hw); 1755 1756 msec_delay(10); 1757 1758 /* Determine whether or not a global dev reset is requested */ 1759 if (global_device_reset && 1760 e1000_acquire_swfw_sync_82575(hw, swmbsw_mask)) 1761 global_device_reset = FALSE; 1762 1763 if (global_device_reset && 1764 !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET)) 1765 ctrl |= E1000_CTRL_DEV_RST; 1766 else 1767 ctrl |= E1000_CTRL_RST; 1768 1769 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 1770 1771 /* Add delay to insure DEV_RST has time to complete */ 1772 if (global_device_reset) 1773 msec_delay(5); 1774 1775 ret_val = e1000_get_auto_rd_done_generic(hw); 1776 if (ret_val) { 1777 /* 1778 * When auto config read does not complete, do not 1779 * return with an error. This can happen in situations 1780 * where there is no eeprom and prevents getting link. 1781 */ 1782 DEBUGOUT("Auto Read Done did not complete\n"); 1783 } 1784 1785 /* If EEPROM is not present, run manual init scripts */ 1786 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) 1787 e1000_reset_init_script_82575(hw); 1788 1789 /* clear global device reset status bit */ 1790 E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); 1791 1792 /* Clear any pending interrupt events. */ 1793 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 1794 icr = E1000_READ_REG(hw, E1000_ICR); 1795 1796 /* Install any alternate MAC address into RAR0 */ 1797 ret_val = e1000_check_alt_mac_addr_generic(hw); 1798 1799 /* Release semaphore */ 1800 if (global_device_reset) 1801 e1000_release_swfw_sync_82575(hw, swmbsw_mask); 1802 1803 return ret_val; 1804 } 1805 1806 /** 1807 * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size 1808 * @data: data received by reading RXPBS register 1809 * 1810 * The 82580 uses a table based approach for packet buffer allocation sizes. 1811 * This function converts the retrieved value into the correct table value 1812 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 1813 * 0x0 36 72 144 1 2 4 8 16 1814 * 0x8 35 70 140 rsv rsv rsv rsv rsv 1815 */ 1816 u16 e1000_rxpbs_adjust_82580(u32 data) 1817 { 1818 u16 ret_val = 0; 1819 1820 if (data < E1000_82580_RXPBS_TABLE_SIZE) 1821 ret_val = e1000_82580_rxpbs_table[data]; 1822 1823 return ret_val; 1824 } 1825 /** 1826 * e1000_erfuse_check_82580 - ER Fuse check 1827 * @hw: pointer to the HW structure 1828 * 1829 * This function returns the status of the ER Fuse 1830 **/ 1831 s32 e1000_erfuse_check_82580(struct e1000_hw *hw) 1832 { 1833 s32 ret_val = E1000_SUCCESS; 1834 s32 ufuse_reg; 1835 1836 ufuse_reg = E1000_READ_REG(hw, E1000_UFUSE); 1837 if ((ufuse_reg & E1000_ERFUSE) == E1000_ERFUSE) 1838 ret_val = E1000_ERFUSE_FAILURE; 1839 1840 return ret_val; 1841 } 1842