1 /****************************************************************************** 2 3 Copyright (c) 2001-2010, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 /* 36 * 82562G 10/100 Network Connection 37 * 82562G-2 10/100 Network Connection 38 * 82562GT 10/100 Network Connection 39 * 82562GT-2 10/100 Network Connection 40 * 82562V 10/100 Network Connection 41 * 82562V-2 10/100 Network Connection 42 * 82566DC-2 Gigabit Network Connection 43 * 82566DC Gigabit Network Connection 44 * 82566DM-2 Gigabit Network Connection 45 * 82566DM Gigabit Network Connection 46 * 82566MC Gigabit Network Connection 47 * 82566MM Gigabit Network Connection 48 * 82567LM Gigabit Network Connection 49 * 82567LF Gigabit Network Connection 50 * 82567V Gigabit Network Connection 51 * 82567LM-2 Gigabit Network Connection 52 * 82567LF-2 Gigabit Network Connection 53 * 82567V-2 Gigabit Network Connection 54 * 82567LF-3 Gigabit Network Connection 55 * 82567LM-3 Gigabit Network Connection 56 * 82567LM-4 Gigabit Network Connection 57 * 82577LM Gigabit Network Connection 58 * 82577LC Gigabit Network Connection 59 * 82578DM Gigabit Network Connection 60 * 82578DC Gigabit Network Connection 61 * 82579LM Gigabit Network Connection 62 * 82579V Gigabit Network Connection 63 */ 64 65 #include "e1000_api.h" 66 67 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw); 68 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw); 69 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw); 70 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw); 71 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw); 72 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw); 73 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw); 74 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw); 75 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 76 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 77 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); 78 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, 79 u8 *mc_addr_list, 80 u32 mc_addr_count); 81 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw); 82 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw); 83 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); 84 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, 85 bool active); 86 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, 87 bool active); 88 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, 89 u16 words, u16 *data); 90 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, 91 u16 words, u16 *data); 92 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw); 93 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw); 94 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, 95 u16 *data); 96 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); 97 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw); 98 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw); 99 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw); 100 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); 101 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); 102 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, 103 u16 *speed, u16 *duplex); 104 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); 105 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); 106 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); 107 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); 108 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); 109 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); 110 static s32 e1000_led_on_pchlan(struct e1000_hw *hw); 111 static s32 e1000_led_off_pchlan(struct e1000_hw *hw); 112 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); 113 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); 114 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout); 115 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw); 116 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); 117 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); 118 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, 119 u32 offset, u8 *data); 120 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 121 u8 size, u16 *data); 122 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, 123 u32 offset, u16 *data); 124 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 125 u32 offset, u8 byte); 126 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, 127 u32 offset, u8 data); 128 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 129 u8 size, u16 data); 130 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); 131 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); 132 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw); 133 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); 134 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw); 135 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 136 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 137 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 138 139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 140 /* Offset 04h HSFSTS */ 141 union ich8_hws_flash_status { 142 struct ich8_hsfsts { 143 u16 flcdone :1; /* bit 0 Flash Cycle Done */ 144 u16 flcerr :1; /* bit 1 Flash Cycle Error */ 145 u16 dael :1; /* bit 2 Direct Access error Log */ 146 u16 berasesz :2; /* bit 4:3 Sector Erase Size */ 147 u16 flcinprog :1; /* bit 5 flash cycle in Progress */ 148 u16 reserved1 :2; /* bit 13:6 Reserved */ 149 u16 reserved2 :6; /* bit 13:6 Reserved */ 150 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ 151 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ 152 } hsf_status; 153 u16 regval; 154 }; 155 156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ 157 /* Offset 06h FLCTL */ 158 union ich8_hws_flash_ctrl { 159 struct ich8_hsflctl { 160 u16 flcgo :1; /* 0 Flash Cycle Go */ 161 u16 flcycle :2; /* 2:1 Flash Cycle */ 162 u16 reserved :5; /* 7:3 Reserved */ 163 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ 164 u16 flockdn :6; /* 15:10 Reserved */ 165 } hsf_ctrl; 166 u16 regval; 167 }; 168 169 /* ICH Flash Region Access Permissions */ 170 union ich8_hws_flash_regacc { 171 struct ich8_flracc { 172 u32 grra :8; /* 0:7 GbE region Read Access */ 173 u32 grwa :8; /* 8:15 GbE region Write Access */ 174 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ 175 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ 176 } hsf_flregacc; 177 u16 regval; 178 }; 179 180 /** 181 * e1000_init_phy_params_pchlan - Initialize PHY function pointers 182 * @hw: pointer to the HW structure 183 * 184 * Initialize family-specific PHY parameters and function pointers. 185 **/ 186 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 187 { 188 struct e1000_phy_info *phy = &hw->phy; 189 u32 ctrl, fwsm; 190 s32 ret_val = E1000_SUCCESS; 191 192 DEBUGFUNC("e1000_init_phy_params_pchlan"); 193 194 phy->addr = 1; 195 phy->reset_delay_us = 100; 196 197 phy->ops.acquire = e1000_acquire_swflag_ich8lan; 198 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; 199 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; 200 phy->ops.read_reg = e1000_read_phy_reg_hv; 201 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; 202 phy->ops.release = e1000_release_swflag_ich8lan; 203 phy->ops.reset = e1000_phy_hw_reset_ich8lan; 204 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; 205 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; 206 phy->ops.write_reg = e1000_write_phy_reg_hv; 207 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; 208 phy->ops.power_up = e1000_power_up_phy_copper; 209 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 210 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 211 212 /* 213 * The MAC-PHY interconnect may still be in SMBus mode 214 * after Sx->S0. If the manageability engine (ME) is 215 * disabled, then toggle the LANPHYPC Value bit to force 216 * the interconnect to PCIe mode. 217 */ 218 fwsm = E1000_READ_REG(hw, E1000_FWSM); 219 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && 220 !(hw->phy.ops.check_reset_block(hw))) { 221 ctrl = E1000_READ_REG(hw, E1000_CTRL); 222 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 223 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 224 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 225 usec_delay(10); 226 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 227 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 228 msec_delay(50); 229 230 /* 231 * Gate automatic PHY configuration by hardware on 232 * non-managed 82579 233 */ 234 if (hw->mac.type == e1000_pch2lan) 235 e1000_gate_hw_phy_config_ich8lan(hw, TRUE); 236 } 237 238 /* 239 * Reset the PHY before any acccess to it. Doing so, ensures that 240 * the PHY is in a known good state before we read/write PHY registers. 241 * The generic reset is sufficient here, because we haven't determined 242 * the PHY type yet. 243 */ 244 ret_val = e1000_phy_hw_reset_generic(hw); 245 if (ret_val) 246 goto out; 247 248 /* Ungate automatic PHY configuration on non-managed 82579 */ 249 if ((hw->mac.type == e1000_pch2lan) && 250 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 251 msec_delay(10); 252 e1000_gate_hw_phy_config_ich8lan(hw, FALSE); 253 } 254 255 phy->id = e1000_phy_unknown; 256 switch (hw->mac.type) { 257 default: 258 ret_val = e1000_get_phy_id(hw); 259 if (ret_val) 260 goto out; 261 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) 262 break; 263 /* fall-through */ 264 case e1000_pch2lan: 265 /* 266 * In case the PHY needs to be in mdio slow mode, 267 * set slow mode and try to get the PHY id again. 268 */ 269 ret_val = e1000_set_mdio_slow_mode_hv(hw); 270 if (ret_val) 271 goto out; 272 ret_val = e1000_get_phy_id(hw); 273 if (ret_val) 274 goto out; 275 break; 276 } 277 phy->type = e1000_get_phy_type_from_id(phy->id); 278 279 switch (phy->type) { 280 case e1000_phy_82577: 281 case e1000_phy_82579: 282 phy->ops.check_polarity = e1000_check_polarity_82577; 283 phy->ops.force_speed_duplex = 284 e1000_phy_force_speed_duplex_82577; 285 phy->ops.get_cable_length = e1000_get_cable_length_82577; 286 phy->ops.get_info = e1000_get_phy_info_82577; 287 phy->ops.commit = e1000_phy_sw_reset_generic; 288 break; 289 case e1000_phy_82578: 290 phy->ops.check_polarity = e1000_check_polarity_m88; 291 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 292 phy->ops.get_cable_length = e1000_get_cable_length_m88; 293 phy->ops.get_info = e1000_get_phy_info_m88; 294 break; 295 default: 296 ret_val = -E1000_ERR_PHY; 297 break; 298 } 299 300 out: 301 return ret_val; 302 } 303 304 /** 305 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers 306 * @hw: pointer to the HW structure 307 * 308 * Initialize family-specific PHY parameters and function pointers. 309 **/ 310 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) 311 { 312 struct e1000_phy_info *phy = &hw->phy; 313 s32 ret_val = E1000_SUCCESS; 314 u16 i = 0; 315 316 DEBUGFUNC("e1000_init_phy_params_ich8lan"); 317 318 phy->addr = 1; 319 phy->reset_delay_us = 100; 320 321 phy->ops.acquire = e1000_acquire_swflag_ich8lan; 322 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; 323 phy->ops.get_cable_length = e1000_get_cable_length_igp_2; 324 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; 325 phy->ops.read_reg = e1000_read_phy_reg_igp; 326 phy->ops.release = e1000_release_swflag_ich8lan; 327 phy->ops.reset = e1000_phy_hw_reset_ich8lan; 328 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan; 329 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan; 330 phy->ops.write_reg = e1000_write_phy_reg_igp; 331 phy->ops.power_up = e1000_power_up_phy_copper; 332 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 333 334 /* 335 * We may need to do this twice - once for IGP and if that fails, 336 * we'll set BM func pointers and try again 337 */ 338 ret_val = e1000_determine_phy_address(hw); 339 if (ret_val) { 340 phy->ops.write_reg = e1000_write_phy_reg_bm; 341 phy->ops.read_reg = e1000_read_phy_reg_bm; 342 ret_val = e1000_determine_phy_address(hw); 343 if (ret_val) { 344 DEBUGOUT("Cannot determine PHY addr. Erroring out\n"); 345 goto out; 346 } 347 } 348 349 phy->id = 0; 350 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) && 351 (i++ < 100)) { 352 msec_delay(1); 353 ret_val = e1000_get_phy_id(hw); 354 if (ret_val) 355 goto out; 356 } 357 358 /* Verify phy id */ 359 switch (phy->id) { 360 case IGP03E1000_E_PHY_ID: 361 phy->type = e1000_phy_igp_3; 362 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 363 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked; 364 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked; 365 phy->ops.get_info = e1000_get_phy_info_igp; 366 phy->ops.check_polarity = e1000_check_polarity_igp; 367 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; 368 break; 369 case IFE_E_PHY_ID: 370 case IFE_PLUS_E_PHY_ID: 371 case IFE_C_E_PHY_ID: 372 phy->type = e1000_phy_ife; 373 phy->autoneg_mask = E1000_ALL_NOT_GIG; 374 phy->ops.get_info = e1000_get_phy_info_ife; 375 phy->ops.check_polarity = e1000_check_polarity_ife; 376 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; 377 break; 378 case BME1000_E_PHY_ID: 379 phy->type = e1000_phy_bm; 380 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 381 phy->ops.read_reg = e1000_read_phy_reg_bm; 382 phy->ops.write_reg = e1000_write_phy_reg_bm; 383 phy->ops.commit = e1000_phy_sw_reset_generic; 384 phy->ops.get_info = e1000_get_phy_info_m88; 385 phy->ops.check_polarity = e1000_check_polarity_m88; 386 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 387 break; 388 default: 389 ret_val = -E1000_ERR_PHY; 390 goto out; 391 } 392 393 out: 394 return ret_val; 395 } 396 397 /** 398 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers 399 * @hw: pointer to the HW structure 400 * 401 * Initialize family-specific NVM parameters and function 402 * pointers. 403 **/ 404 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) 405 { 406 struct e1000_nvm_info *nvm = &hw->nvm; 407 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 408 u32 gfpreg, sector_base_addr, sector_end_addr; 409 s32 ret_val = E1000_SUCCESS; 410 u16 i; 411 412 DEBUGFUNC("e1000_init_nvm_params_ich8lan"); 413 414 /* Can't read flash registers if the register set isn't mapped. */ 415 if (!hw->flash_address) { 416 DEBUGOUT("ERROR: Flash registers not mapped\n"); 417 ret_val = -E1000_ERR_CONFIG; 418 goto out; 419 } 420 421 nvm->type = e1000_nvm_flash_sw; 422 423 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); 424 425 /* 426 * sector_X_addr is a "sector"-aligned address (4096 bytes) 427 * Add 1 to sector_end_addr since this sector is included in 428 * the overall size. 429 */ 430 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 431 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 432 433 /* flash_base_addr is byte-aligned */ 434 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; 435 436 /* 437 * find total size of the NVM, then cut in half since the total 438 * size represents two separate NVM banks. 439 */ 440 nvm->flash_bank_size = (sector_end_addr - sector_base_addr) 441 << FLASH_SECTOR_ADDR_SHIFT; 442 nvm->flash_bank_size /= 2; 443 /* Adjust to word count */ 444 nvm->flash_bank_size /= sizeof(u16); 445 446 nvm->word_size = E1000_SHADOW_RAM_WORDS; 447 448 /* Clear shadow ram */ 449 for (i = 0; i < nvm->word_size; i++) { 450 dev_spec->shadow_ram[i].modified = FALSE; 451 dev_spec->shadow_ram[i].value = 0xFFFF; 452 } 453 454 E1000_MUTEX_INIT(&dev_spec->nvm_mutex); 455 E1000_MUTEX_INIT(&dev_spec->swflag_mutex); 456 457 /* Function Pointers */ 458 nvm->ops.acquire = e1000_acquire_nvm_ich8lan; 459 nvm->ops.release = e1000_release_nvm_ich8lan; 460 nvm->ops.read = e1000_read_nvm_ich8lan; 461 nvm->ops.update = e1000_update_nvm_checksum_ich8lan; 462 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan; 463 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan; 464 nvm->ops.write = e1000_write_nvm_ich8lan; 465 466 out: 467 return ret_val; 468 } 469 470 /** 471 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers 472 * @hw: pointer to the HW structure 473 * 474 * Initialize family-specific MAC parameters and function 475 * pointers. 476 **/ 477 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) 478 { 479 struct e1000_mac_info *mac = &hw->mac; 480 u16 pci_cfg; 481 482 DEBUGFUNC("e1000_init_mac_params_ich8lan"); 483 484 /* Set media type function pointer */ 485 hw->phy.media_type = e1000_media_type_copper; 486 487 /* Set mta register count */ 488 mac->mta_reg_count = 32; 489 /* Set rar entry count */ 490 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; 491 if (mac->type == e1000_ich8lan) 492 mac->rar_entry_count--; 493 /* Set if part includes ASF firmware */ 494 mac->asf_firmware_present = TRUE; 495 /* FWSM register */ 496 mac->has_fwsm = TRUE; 497 /* ARC subsystem not supported */ 498 mac->arc_subsystem_valid = FALSE; 499 /* Adaptive IFS supported */ 500 mac->adaptive_ifs = TRUE; 501 502 /* Function pointers */ 503 504 /* bus type/speed/width */ 505 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan; 506 /* function id */ 507 mac->ops.set_lan_id = e1000_set_lan_id_single_port; 508 /* reset */ 509 mac->ops.reset_hw = e1000_reset_hw_ich8lan; 510 /* hw initialization */ 511 mac->ops.init_hw = e1000_init_hw_ich8lan; 512 /* link setup */ 513 mac->ops.setup_link = e1000_setup_link_ich8lan; 514 /* physical interface setup */ 515 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan; 516 /* check for link */ 517 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan; 518 /* link info */ 519 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan; 520 /* multicast address update */ 521 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; 522 /* clear hardware counters */ 523 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan; 524 525 /* LED operations */ 526 switch (mac->type) { 527 case e1000_ich8lan: 528 case e1000_ich9lan: 529 case e1000_ich10lan: 530 /* check management mode */ 531 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; 532 /* ID LED init */ 533 mac->ops.id_led_init = e1000_id_led_init_generic; 534 /* blink LED */ 535 mac->ops.blink_led = e1000_blink_led_generic; 536 /* setup LED */ 537 mac->ops.setup_led = e1000_setup_led_generic; 538 /* cleanup LED */ 539 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; 540 /* turn on/off LED */ 541 mac->ops.led_on = e1000_led_on_ich8lan; 542 mac->ops.led_off = e1000_led_off_ich8lan; 543 break; 544 case e1000_pch2lan: 545 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; 546 mac->ops.rar_set = e1000_rar_set_pch2lan; 547 /* multicast address update for pch2 */ 548 mac->ops.update_mc_addr_list = 549 e1000_update_mc_addr_list_pch2lan; 550 /* fall-through */ 551 case e1000_pchlan: 552 /* save PCH revision_id */ 553 e1000_read_pci_cfg(hw, 0x2, &pci_cfg); 554 hw->revision_id = (u8)(pci_cfg &= 0x000F); 555 /* check management mode */ 556 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; 557 /* ID LED init */ 558 mac->ops.id_led_init = e1000_id_led_init_pchlan; 559 /* setup LED */ 560 mac->ops.setup_led = e1000_setup_led_pchlan; 561 /* cleanup LED */ 562 mac->ops.cleanup_led = e1000_cleanup_led_pchlan; 563 /* turn on/off LED */ 564 mac->ops.led_on = e1000_led_on_pchlan; 565 mac->ops.led_off = e1000_led_off_pchlan; 566 break; 567 default: 568 break; 569 } 570 571 /* Enable PCS Lock-loss workaround for ICH8 */ 572 if (mac->type == e1000_ich8lan) 573 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE); 574 575 /* Gate automatic PHY configuration by hardware on managed 82579 */ 576 if ((mac->type == e1000_pch2lan) && 577 (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) 578 e1000_gate_hw_phy_config_ich8lan(hw, TRUE); 579 580 return E1000_SUCCESS; 581 } 582 583 /** 584 * e1000_set_eee_pchlan - Enable/disable EEE support 585 * @hw: pointer to the HW structure 586 * 587 * Enable/disable EEE based on setting in dev_spec structure. The bits in 588 * the LPI Control register will remain set only if/when link is up. 589 **/ 590 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) 591 { 592 s32 ret_val = E1000_SUCCESS; 593 u16 phy_reg; 594 595 DEBUGFUNC("e1000_set_eee_pchlan"); 596 597 if (hw->phy.type != e1000_phy_82579) 598 goto out; 599 600 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg); 601 if (ret_val) 602 goto out; 603 604 if (hw->dev_spec.ich8lan.eee_disable) 605 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; 606 else 607 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; 608 609 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg); 610 out: 611 return ret_val; 612 } 613 614 /** 615 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 616 * @hw: pointer to the HW structure 617 * 618 * Checks to see of the link status of the hardware has changed. If a 619 * change in link status has been detected, then we read the PHY registers 620 * to get the current speed/duplex if link exists. 621 **/ 622 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 623 { 624 struct e1000_mac_info *mac = &hw->mac; 625 s32 ret_val; 626 bool link; 627 628 DEBUGFUNC("e1000_check_for_copper_link_ich8lan"); 629 630 /* 631 * We only want to go out to the PHY registers to see if Auto-Neg 632 * has completed and/or if our link status has changed. The 633 * get_link_status flag is set upon receiving a Link Status 634 * Change or Rx Sequence Error interrupt. 635 */ 636 if (!mac->get_link_status) { 637 ret_val = E1000_SUCCESS; 638 goto out; 639 } 640 641 /* 642 * First we want to see if the MII Status Register reports 643 * link. If so, then we want to get the current speed/duplex 644 * of the PHY. 645 */ 646 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); 647 if (ret_val) 648 goto out; 649 650 if (hw->mac.type == e1000_pchlan) { 651 ret_val = e1000_k1_gig_workaround_hv(hw, link); 652 if (ret_val) 653 goto out; 654 } 655 656 if (!link) 657 goto out; /* No link detected */ 658 659 mac->get_link_status = FALSE; 660 661 if (hw->phy.type == e1000_phy_82578) { 662 ret_val = e1000_link_stall_workaround_hv(hw); 663 if (ret_val) 664 goto out; 665 } 666 667 if (hw->mac.type == e1000_pch2lan) { 668 ret_val = e1000_k1_workaround_lv(hw); 669 if (ret_val) 670 goto out; 671 } 672 673 /* 674 * Check if there was DownShift, must be checked 675 * immediately after link-up 676 */ 677 e1000_check_downshift_generic(hw); 678 679 /* Enable/Disable EEE after link up */ 680 ret_val = e1000_set_eee_pchlan(hw); 681 if (ret_val) 682 goto out; 683 684 /* 685 * If we are forcing speed/duplex, then we simply return since 686 * we have already determined whether we have link or not. 687 */ 688 if (!mac->autoneg) { 689 ret_val = -E1000_ERR_CONFIG; 690 goto out; 691 } 692 693 /* 694 * Auto-Neg is enabled. Auto Speed Detection takes care 695 * of MAC speed/duplex configuration. So we only need to 696 * configure Collision Distance in the MAC. 697 */ 698 e1000_config_collision_dist_generic(hw); 699 700 /* 701 * Configure Flow Control now that Auto-Neg has completed. 702 * First, we need to restore the desired flow control 703 * settings because we may have had to re-autoneg with a 704 * different link partner. 705 */ 706 ret_val = e1000_config_fc_after_link_up_generic(hw); 707 if (ret_val) 708 DEBUGOUT("Error configuring flow control\n"); 709 710 out: 711 return ret_val; 712 } 713 714 /** 715 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers 716 * @hw: pointer to the HW structure 717 * 718 * Initialize family-specific function pointers for PHY, MAC, and NVM. 719 **/ 720 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw) 721 { 722 DEBUGFUNC("e1000_init_function_pointers_ich8lan"); 723 724 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan; 725 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan; 726 switch (hw->mac.type) { 727 case e1000_ich8lan: 728 case e1000_ich9lan: 729 case e1000_ich10lan: 730 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan; 731 break; 732 case e1000_pchlan: 733 case e1000_pch2lan: 734 hw->phy.ops.init_params = e1000_init_phy_params_pchlan; 735 break; 736 default: 737 break; 738 } 739 } 740 741 /** 742 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex 743 * @hw: pointer to the HW structure 744 * 745 * Acquires the mutex for performing NVM operations. 746 **/ 747 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) 748 { 749 DEBUGFUNC("e1000_acquire_nvm_ich8lan"); 750 751 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex); 752 753 return E1000_SUCCESS; 754 } 755 756 /** 757 * e1000_release_nvm_ich8lan - Release NVM mutex 758 * @hw: pointer to the HW structure 759 * 760 * Releases the mutex used while performing NVM operations. 761 **/ 762 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) 763 { 764 DEBUGFUNC("e1000_release_nvm_ich8lan"); 765 766 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex); 767 768 return; 769 } 770 771 /** 772 * e1000_acquire_swflag_ich8lan - Acquire software control flag 773 * @hw: pointer to the HW structure 774 * 775 * Acquires the software control flag for performing PHY and select 776 * MAC CSR accesses. 777 **/ 778 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 779 { 780 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; 781 s32 ret_val = E1000_SUCCESS; 782 783 DEBUGFUNC("e1000_acquire_swflag_ich8lan"); 784 785 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex); 786 787 while (timeout) { 788 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 789 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) 790 break; 791 792 msec_delay_irq(1); 793 timeout--; 794 } 795 796 if (!timeout) { 797 DEBUGOUT("SW/FW/HW has locked the resource for too long.\n"); 798 ret_val = -E1000_ERR_CONFIG; 799 goto out; 800 } 801 802 timeout = SW_FLAG_TIMEOUT; 803 804 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 805 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 806 807 while (timeout) { 808 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 809 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 810 break; 811 812 msec_delay_irq(1); 813 timeout--; 814 } 815 816 if (!timeout) { 817 DEBUGOUT("Failed to acquire the semaphore.\n"); 818 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 819 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 820 ret_val = -E1000_ERR_CONFIG; 821 goto out; 822 } 823 824 out: 825 if (ret_val) 826 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); 827 828 return ret_val; 829 } 830 831 /** 832 * e1000_release_swflag_ich8lan - Release software control flag 833 * @hw: pointer to the HW structure 834 * 835 * Releases the software control flag for performing PHY and select 836 * MAC CSR accesses. 837 **/ 838 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) 839 { 840 u32 extcnf_ctrl; 841 842 DEBUGFUNC("e1000_release_swflag_ich8lan"); 843 844 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 845 846 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { 847 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 848 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 849 } else { 850 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n"); 851 } 852 853 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); 854 855 return; 856 } 857 858 /** 859 * e1000_check_mng_mode_ich8lan - Checks management mode 860 * @hw: pointer to the HW structure 861 * 862 * This checks if the adapter has any manageability enabled. 863 * This is a function pointer entry point only called by read/write 864 * routines for the PHY and NVM parts. 865 **/ 866 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) 867 { 868 u32 fwsm; 869 870 DEBUGFUNC("e1000_check_mng_mode_ich8lan"); 871 872 fwsm = E1000_READ_REG(hw, E1000_FWSM); 873 874 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 875 ((fwsm & E1000_FWSM_MODE_MASK) == 876 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 877 } 878 879 /** 880 * e1000_check_mng_mode_pchlan - Checks management mode 881 * @hw: pointer to the HW structure 882 * 883 * This checks if the adapter has iAMT enabled. 884 * This is a function pointer entry point only called by read/write 885 * routines for the PHY and NVM parts. 886 **/ 887 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) 888 { 889 u32 fwsm; 890 891 DEBUGFUNC("e1000_check_mng_mode_pchlan"); 892 893 fwsm = E1000_READ_REG(hw, E1000_FWSM); 894 895 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 896 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 897 } 898 899 /** 900 * e1000_rar_set_pch2lan - Set receive address register 901 * @hw: pointer to the HW structure 902 * @addr: pointer to the receive address 903 * @index: receive address array register 904 * 905 * Sets the receive address array register at index to the address passed 906 * in by addr. For 82579, RAR[0] is the base address register that is to 907 * contain the MAC address but RAR[1-6] are reserved for manageability (ME). 908 * Use SHRA[0-3] in place of those reserved for ME. 909 **/ 910 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) 911 { 912 u32 rar_low, rar_high; 913 914 DEBUGFUNC("e1000_rar_set_pch2lan"); 915 916 /* 917 * HW expects these in little endian so we reverse the byte order 918 * from network order (big endian) to little endian 919 */ 920 rar_low = ((u32) addr[0] | 921 ((u32) addr[1] << 8) | 922 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 923 924 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 925 926 /* If MAC address zero, no need to set the AV bit */ 927 if (rar_low || rar_high) 928 rar_high |= E1000_RAH_AV; 929 930 if (index == 0) { 931 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); 932 E1000_WRITE_FLUSH(hw); 933 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); 934 E1000_WRITE_FLUSH(hw); 935 return; 936 } 937 938 if (index < hw->mac.rar_entry_count) { 939 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low); 940 E1000_WRITE_FLUSH(hw); 941 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high); 942 E1000_WRITE_FLUSH(hw); 943 944 /* verify the register updates */ 945 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) && 946 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high)) 947 return; 948 949 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", 950 (index - 1), E1000_READ_REG(hw, E1000_FWSM)); 951 } 952 953 DEBUGOUT1("Failed to write receive address at index %d\n", index); 954 } 955 956 /** 957 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses 958 * @hw: pointer to the HW structure 959 * @mc_addr_list: array of multicast addresses to program 960 * @mc_addr_count: number of multicast addresses to program 961 * 962 * Updates entire Multicast Table Array of the PCH2 MAC and PHY. 963 * The caller must have a packed mc_addr_list of multicast addresses. 964 **/ 965 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, 966 u8 *mc_addr_list, 967 u32 mc_addr_count) 968 { 969 int i; 970 971 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan"); 972 973 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count); 974 975 for (i = 0; i < hw->mac.mta_reg_count; i++) { 976 hw->phy.ops.write_reg(hw, BM_MTA(i), 977 (u16)(hw->mac.mta_shadow[i] & 0xFFFF)); 978 hw->phy.ops.write_reg(hw, (BM_MTA(i) + 1), 979 (u16)((hw->mac.mta_shadow[i] >> 16) & 980 0xFFFF)); 981 } 982 } 983 984 /** 985 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 986 * @hw: pointer to the HW structure 987 * 988 * Checks if firmware is blocking the reset of the PHY. 989 * This is a function pointer entry point only called by 990 * reset routines. 991 **/ 992 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) 993 { 994 u32 fwsm; 995 996 DEBUGFUNC("e1000_check_reset_block_ich8lan"); 997 998 if (hw->phy.reset_disable) 999 return E1000_BLK_PHY_RESET; 1000 1001 fwsm = E1000_READ_REG(hw, E1000_FWSM); 1002 1003 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS 1004 : E1000_BLK_PHY_RESET; 1005 } 1006 1007 /** 1008 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states 1009 * @hw: pointer to the HW structure 1010 * 1011 * Assumes semaphore already acquired. 1012 * 1013 **/ 1014 static s32 e1000_write_smbus_addr(struct e1000_hw *hw) 1015 { 1016 u16 phy_data; 1017 u32 strap = E1000_READ_REG(hw, E1000_STRAP); 1018 s32 ret_val = E1000_SUCCESS; 1019 1020 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; 1021 1022 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); 1023 if (ret_val) 1024 goto out; 1025 1026 phy_data &= ~HV_SMB_ADDR_MASK; 1027 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); 1028 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 1029 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); 1030 1031 out: 1032 return ret_val; 1033 } 1034 1035 /** 1036 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 1037 * @hw: pointer to the HW structure 1038 * 1039 * SW should configure the LCD from the NVM extended configuration region 1040 * as a workaround for certain parts. 1041 **/ 1042 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 1043 { 1044 struct e1000_phy_info *phy = &hw->phy; 1045 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 1046 s32 ret_val = E1000_SUCCESS; 1047 u16 word_addr, reg_data, reg_addr, phy_page = 0; 1048 1049 DEBUGFUNC("e1000_sw_lcd_config_ich8lan"); 1050 1051 /* 1052 * Initialize the PHY from the NVM on ICH platforms. This 1053 * is needed due to an issue where the NVM configuration is 1054 * not properly autoloaded after power transitions. 1055 * Therefore, after each PHY reset, we will load the 1056 * configuration data out of the NVM manually. 1057 */ 1058 switch (hw->mac.type) { 1059 case e1000_ich8lan: 1060 if (phy->type != e1000_phy_igp_3) 1061 return ret_val; 1062 1063 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) || 1064 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) { 1065 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 1066 break; 1067 } 1068 /* Fall-thru */ 1069 case e1000_pchlan: 1070 case e1000_pch2lan: 1071 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 1072 break; 1073 default: 1074 return ret_val; 1075 } 1076 1077 ret_val = hw->phy.ops.acquire(hw); 1078 if (ret_val) 1079 return ret_val; 1080 1081 data = E1000_READ_REG(hw, E1000_FEXTNVM); 1082 if (!(data & sw_cfg_mask)) 1083 goto out; 1084 1085 /* 1086 * Make sure HW does not configure LCD from PHY 1087 * extended configuration before SW configuration 1088 */ 1089 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 1090 if (!(hw->mac.type == e1000_pch2lan)) { 1091 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 1092 goto out; 1093 } 1094 1095 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE); 1096 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 1097 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; 1098 if (!cnf_size) 1099 goto out; 1100 1101 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 1102 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 1103 1104 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 1105 (hw->mac.type == e1000_pchlan)) || 1106 (hw->mac.type == e1000_pch2lan)) { 1107 /* 1108 * HW configures the SMBus address and LEDs when the 1109 * OEM and LCD Write Enable bits are set in the NVM. 1110 * When both NVM bits are cleared, SW will configure 1111 * them instead. 1112 */ 1113 ret_val = e1000_write_smbus_addr(hw); 1114 if (ret_val) 1115 goto out; 1116 1117 data = E1000_READ_REG(hw, E1000_LEDCTL); 1118 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, 1119 (u16)data); 1120 if (ret_val) 1121 goto out; 1122 } 1123 1124 /* Configure LCD from extended configuration region. */ 1125 1126 /* cnf_base_addr is in DWORD */ 1127 word_addr = (u16)(cnf_base_addr << 1); 1128 1129 for (i = 0; i < cnf_size; i++) { 1130 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1, 1131 ®_data); 1132 if (ret_val) 1133 goto out; 1134 1135 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1), 1136 1, ®_addr); 1137 if (ret_val) 1138 goto out; 1139 1140 /* Save off the PHY page for future writes. */ 1141 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { 1142 phy_page = reg_data; 1143 continue; 1144 } 1145 1146 reg_addr &= PHY_REG_MASK; 1147 reg_addr |= phy_page; 1148 1149 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, 1150 reg_data); 1151 if (ret_val) 1152 goto out; 1153 } 1154 1155 out: 1156 hw->phy.ops.release(hw); 1157 return ret_val; 1158 } 1159 1160 /** 1161 * e1000_k1_gig_workaround_hv - K1 Si workaround 1162 * @hw: pointer to the HW structure 1163 * @link: link up bool flag 1164 * 1165 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning 1166 * from a lower speed. This workaround disables K1 whenever link is at 1Gig 1167 * If link is down, the function will restore the default K1 setting located 1168 * in the NVM. 1169 **/ 1170 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) 1171 { 1172 s32 ret_val = E1000_SUCCESS; 1173 u16 status_reg = 0; 1174 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; 1175 1176 DEBUGFUNC("e1000_k1_gig_workaround_hv"); 1177 1178 if (hw->mac.type != e1000_pchlan) 1179 goto out; 1180 1181 /* Wrap the whole flow with the sw flag */ 1182 ret_val = hw->phy.ops.acquire(hw); 1183 if (ret_val) 1184 goto out; 1185 1186 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ 1187 if (link) { 1188 if (hw->phy.type == e1000_phy_82578) { 1189 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, 1190 &status_reg); 1191 if (ret_val) 1192 goto release; 1193 1194 status_reg &= BM_CS_STATUS_LINK_UP | 1195 BM_CS_STATUS_RESOLVED | 1196 BM_CS_STATUS_SPEED_MASK; 1197 1198 if (status_reg == (BM_CS_STATUS_LINK_UP | 1199 BM_CS_STATUS_RESOLVED | 1200 BM_CS_STATUS_SPEED_1000)) 1201 k1_enable = FALSE; 1202 } 1203 1204 if (hw->phy.type == e1000_phy_82577) { 1205 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, 1206 &status_reg); 1207 if (ret_val) 1208 goto release; 1209 1210 status_reg &= HV_M_STATUS_LINK_UP | 1211 HV_M_STATUS_AUTONEG_COMPLETE | 1212 HV_M_STATUS_SPEED_MASK; 1213 1214 if (status_reg == (HV_M_STATUS_LINK_UP | 1215 HV_M_STATUS_AUTONEG_COMPLETE | 1216 HV_M_STATUS_SPEED_1000)) 1217 k1_enable = FALSE; 1218 } 1219 1220 /* Link stall fix for link up */ 1221 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 1222 0x0100); 1223 if (ret_val) 1224 goto release; 1225 1226 } else { 1227 /* Link stall fix for link down */ 1228 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 1229 0x4100); 1230 if (ret_val) 1231 goto release; 1232 } 1233 1234 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); 1235 1236 release: 1237 hw->phy.ops.release(hw); 1238 out: 1239 return ret_val; 1240 } 1241 1242 /** 1243 * e1000_configure_k1_ich8lan - Configure K1 power state 1244 * @hw: pointer to the HW structure 1245 * @enable: K1 state to configure 1246 * 1247 * Configure the K1 power state based on the provided parameter. 1248 * Assumes semaphore already acquired. 1249 * 1250 * Success returns 0, Failure returns -E1000_ERR_PHY (-2) 1251 **/ 1252 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) 1253 { 1254 s32 ret_val = E1000_SUCCESS; 1255 u32 ctrl_reg = 0; 1256 u32 ctrl_ext = 0; 1257 u32 reg = 0; 1258 u16 kmrn_reg = 0; 1259 1260 DEBUGFUNC("e1000_configure_k1_ich8lan"); 1261 1262 ret_val = e1000_read_kmrn_reg_locked(hw, 1263 E1000_KMRNCTRLSTA_K1_CONFIG, 1264 &kmrn_reg); 1265 if (ret_val) 1266 goto out; 1267 1268 if (k1_enable) 1269 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; 1270 else 1271 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; 1272 1273 ret_val = e1000_write_kmrn_reg_locked(hw, 1274 E1000_KMRNCTRLSTA_K1_CONFIG, 1275 kmrn_reg); 1276 if (ret_val) 1277 goto out; 1278 1279 usec_delay(20); 1280 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1281 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); 1282 1283 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 1284 reg |= E1000_CTRL_FRCSPD; 1285 E1000_WRITE_REG(hw, E1000_CTRL, reg); 1286 1287 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); 1288 usec_delay(20); 1289 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); 1290 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1291 usec_delay(20); 1292 1293 out: 1294 return ret_val; 1295 } 1296 1297 /** 1298 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration 1299 * @hw: pointer to the HW structure 1300 * @d0_state: boolean if entering d0 or d3 device state 1301 * 1302 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are 1303 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit 1304 * in NVM determines whether HW should configure LPLU and Gbe Disable. 1305 **/ 1306 s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) 1307 { 1308 s32 ret_val = 0; 1309 u32 mac_reg; 1310 u16 oem_reg; 1311 1312 DEBUGFUNC("e1000_oem_bits_config_ich8lan"); 1313 1314 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) 1315 return ret_val; 1316 1317 ret_val = hw->phy.ops.acquire(hw); 1318 if (ret_val) 1319 return ret_val; 1320 1321 if (!(hw->mac.type == e1000_pch2lan)) { 1322 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 1323 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) 1324 goto out; 1325 } 1326 1327 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM); 1328 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) 1329 goto out; 1330 1331 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL); 1332 1333 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); 1334 if (ret_val) 1335 goto out; 1336 1337 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); 1338 1339 if (d0_state) { 1340 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) 1341 oem_reg |= HV_OEM_BITS_GBE_DIS; 1342 1343 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) 1344 oem_reg |= HV_OEM_BITS_LPLU; 1345 } else { 1346 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE) 1347 oem_reg |= HV_OEM_BITS_GBE_DIS; 1348 1349 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU) 1350 oem_reg |= HV_OEM_BITS_LPLU; 1351 } 1352 /* Restart auto-neg to activate the bits */ 1353 if (!hw->phy.ops.check_reset_block(hw)) 1354 oem_reg |= HV_OEM_BITS_RESTART_AN; 1355 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); 1356 1357 out: 1358 hw->phy.ops.release(hw); 1359 1360 return ret_val; 1361 } 1362 1363 1364 /** 1365 * e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx 1366 * @hw: pointer to the HW structure 1367 **/ 1368 s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) 1369 { 1370 DEBUGFUNC("e1000_hv_phy_powerdown_workaround_ich8lan"); 1371 1372 if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2)) 1373 return E1000_SUCCESS; 1374 1375 return hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444); 1376 } 1377 1378 /** 1379 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode 1380 * @hw: pointer to the HW structure 1381 **/ 1382 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) 1383 { 1384 s32 ret_val; 1385 u16 data; 1386 1387 DEBUGFUNC("e1000_set_mdio_slow_mode_hv"); 1388 1389 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data); 1390 if (ret_val) 1391 return ret_val; 1392 1393 data |= HV_KMRN_MDIO_SLOW; 1394 1395 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data); 1396 1397 return ret_val; 1398 } 1399 1400 /** 1401 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be 1402 * done after every PHY reset. 1403 **/ 1404 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) 1405 { 1406 s32 ret_val = E1000_SUCCESS; 1407 u16 phy_data; 1408 1409 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan"); 1410 1411 if (hw->mac.type != e1000_pchlan) 1412 goto out; 1413 1414 /* Set MDIO slow mode before any other MDIO access */ 1415 if (hw->phy.type == e1000_phy_82577) { 1416 ret_val = e1000_set_mdio_slow_mode_hv(hw); 1417 if (ret_val) 1418 goto out; 1419 } 1420 1421 /* Hanksville M Phy init for IEEE. */ 1422 if ((hw->revision_id == 2) && 1423 (hw->phy.type == e1000_phy_82577) && 1424 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) { 1425 hw->phy.ops.write_reg(hw, 0x10, 0x8823); 1426 hw->phy.ops.write_reg(hw, 0x11, 0x0018); 1427 hw->phy.ops.write_reg(hw, 0x10, 0x8824); 1428 hw->phy.ops.write_reg(hw, 0x11, 0x0016); 1429 hw->phy.ops.write_reg(hw, 0x10, 0x8825); 1430 hw->phy.ops.write_reg(hw, 0x11, 0x001A); 1431 hw->phy.ops.write_reg(hw, 0x10, 0x888C); 1432 hw->phy.ops.write_reg(hw, 0x11, 0x0007); 1433 hw->phy.ops.write_reg(hw, 0x10, 0x888D); 1434 hw->phy.ops.write_reg(hw, 0x11, 0x0007); 1435 hw->phy.ops.write_reg(hw, 0x10, 0x888E); 1436 hw->phy.ops.write_reg(hw, 0x11, 0x0007); 1437 hw->phy.ops.write_reg(hw, 0x10, 0x8827); 1438 hw->phy.ops.write_reg(hw, 0x11, 0x0001); 1439 hw->phy.ops.write_reg(hw, 0x10, 0x8835); 1440 hw->phy.ops.write_reg(hw, 0x11, 0x0001); 1441 hw->phy.ops.write_reg(hw, 0x10, 0x8834); 1442 hw->phy.ops.write_reg(hw, 0x11, 0x0001); 1443 hw->phy.ops.write_reg(hw, 0x10, 0x8833); 1444 hw->phy.ops.write_reg(hw, 0x11, 0x0002); 1445 } 1446 1447 if (((hw->phy.type == e1000_phy_82577) && 1448 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || 1449 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { 1450 /* Disable generation of early preamble */ 1451 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431); 1452 if (ret_val) 1453 goto out; 1454 1455 /* Preamble tuning for SSC */ 1456 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204); 1457 if (ret_val) 1458 goto out; 1459 } 1460 1461 if (hw->phy.type == e1000_phy_82578) { 1462 if (hw->revision_id < 3) { 1463 /* PHY config */ 1464 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29, 1465 0x66C0); 1466 if (ret_val) 1467 goto out; 1468 1469 /* PHY config */ 1470 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E, 1471 0xFFFF); 1472 if (ret_val) 1473 goto out; 1474 } 1475 1476 /* 1477 * Return registers to default by doing a soft reset then 1478 * writing 0x3140 to the control register. 1479 */ 1480 if (hw->phy.revision < 2) { 1481 e1000_phy_sw_reset_generic(hw); 1482 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, 1483 0x3140); 1484 } 1485 } 1486 1487 if ((hw->revision_id == 2) && 1488 (hw->phy.type == e1000_phy_82577) && 1489 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) { 1490 /* 1491 * Workaround for OEM (GbE) not operating after reset - 1492 * restart AN (twice) 1493 */ 1494 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400); 1495 if (ret_val) 1496 goto out; 1497 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400); 1498 if (ret_val) 1499 goto out; 1500 } 1501 1502 /* Select page 0 */ 1503 ret_val = hw->phy.ops.acquire(hw); 1504 if (ret_val) 1505 goto out; 1506 1507 hw->phy.addr = 1; 1508 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 1509 hw->phy.ops.release(hw); 1510 if (ret_val) 1511 goto out; 1512 1513 /* 1514 * Configure the K1 Si workaround during phy reset assuming there is 1515 * link so that it disables K1 if link is in 1Gbps. 1516 */ 1517 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE); 1518 if (ret_val) 1519 goto out; 1520 1521 /* Workaround for link disconnects on a busy hub in half duplex */ 1522 ret_val = hw->phy.ops.acquire(hw); 1523 if (ret_val) 1524 goto out; 1525 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG_REG, 1526 &phy_data); 1527 if (ret_val) 1528 goto release; 1529 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG_REG, 1530 phy_data & 0x00FF); 1531 release: 1532 hw->phy.ops.release(hw); 1533 out: 1534 return ret_val; 1535 } 1536 1537 /** 1538 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY 1539 * @hw: pointer to the HW structure 1540 **/ 1541 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) 1542 { 1543 u32 mac_reg; 1544 u16 i; 1545 1546 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan"); 1547 1548 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ 1549 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { 1550 mac_reg = E1000_READ_REG(hw, E1000_RAL(i)); 1551 hw->phy.ops.write_reg(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF)); 1552 hw->phy.ops.write_reg(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF)); 1553 mac_reg = E1000_READ_REG(hw, E1000_RAH(i)); 1554 hw->phy.ops.write_reg(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); 1555 hw->phy.ops.write_reg(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000)); 1556 } 1557 } 1558 1559 static u32 e1000_calc_rx_da_crc(u8 mac[]) 1560 { 1561 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */ 1562 u32 i, j, mask, crc; 1563 1564 DEBUGFUNC("e1000_calc_rx_da_crc"); 1565 1566 crc = 0xffffffff; 1567 for (i = 0; i < 6; i++) { 1568 crc = crc ^ mac[i]; 1569 for (j = 8; j > 0; j--) { 1570 mask = (crc & 1) * (-1); 1571 crc = (crc >> 1) ^ (poly & mask); 1572 } 1573 } 1574 return ~crc; 1575 } 1576 1577 /** 1578 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation 1579 * with 82579 PHY 1580 * @hw: pointer to the HW structure 1581 * @enable: flag to enable/disable workaround when enabling/disabling jumbos 1582 **/ 1583 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) 1584 { 1585 s32 ret_val = E1000_SUCCESS; 1586 u16 phy_reg, data; 1587 u32 mac_reg; 1588 u16 i; 1589 1590 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan"); 1591 1592 if (hw->mac.type != e1000_pch2lan) 1593 goto out; 1594 1595 /* disable Rx path while enabling/disabling workaround */ 1596 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg); 1597 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); 1598 if (ret_val) 1599 goto out; 1600 1601 if (enable) { 1602 /* 1603 * Write Rx addresses (rar_entry_count for RAL/H, +4 for 1604 * SHRAL/H) and initial CRC values to the MAC 1605 */ 1606 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { 1607 u8 mac_addr[ETH_ADDR_LEN] = {0}; 1608 u32 addr_high, addr_low; 1609 1610 addr_high = E1000_READ_REG(hw, E1000_RAH(i)); 1611 if (!(addr_high & E1000_RAH_AV)) 1612 continue; 1613 addr_low = E1000_READ_REG(hw, E1000_RAL(i)); 1614 mac_addr[0] = (addr_low & 0xFF); 1615 mac_addr[1] = ((addr_low >> 8) & 0xFF); 1616 mac_addr[2] = ((addr_low >> 16) & 0xFF); 1617 mac_addr[3] = ((addr_low >> 24) & 0xFF); 1618 mac_addr[4] = (addr_high & 0xFF); 1619 mac_addr[5] = ((addr_high >> 8) & 0xFF); 1620 1621 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i), 1622 e1000_calc_rx_da_crc(mac_addr)); 1623 } 1624 1625 /* Write Rx addresses to the PHY */ 1626 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 1627 1628 /* Enable jumbo frame workaround in the MAC */ 1629 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); 1630 mac_reg &= ~(1 << 14); 1631 mac_reg |= (7 << 15); 1632 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); 1633 1634 mac_reg = E1000_READ_REG(hw, E1000_RCTL); 1635 mac_reg |= E1000_RCTL_SECRC; 1636 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); 1637 1638 ret_val = e1000_read_kmrn_reg_generic(hw, 1639 E1000_KMRNCTRLSTA_CTRL_OFFSET, 1640 &data); 1641 if (ret_val) 1642 goto out; 1643 ret_val = e1000_write_kmrn_reg_generic(hw, 1644 E1000_KMRNCTRLSTA_CTRL_OFFSET, 1645 data | (1 << 0)); 1646 if (ret_val) 1647 goto out; 1648 ret_val = e1000_read_kmrn_reg_generic(hw, 1649 E1000_KMRNCTRLSTA_HD_CTRL, 1650 &data); 1651 if (ret_val) 1652 goto out; 1653 data &= ~(0xF << 8); 1654 data |= (0xB << 8); 1655 ret_val = e1000_write_kmrn_reg_generic(hw, 1656 E1000_KMRNCTRLSTA_HD_CTRL, 1657 data); 1658 if (ret_val) 1659 goto out; 1660 1661 /* Enable jumbo frame workaround in the PHY */ 1662 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); 1663 data &= ~(0x7F << 5); 1664 data |= (0x37 << 5); 1665 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); 1666 if (ret_val) 1667 goto out; 1668 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); 1669 data &= ~(1 << 13); 1670 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); 1671 if (ret_val) 1672 goto out; 1673 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); 1674 data &= ~(0x3FF << 2); 1675 data |= (0x1A << 2); 1676 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); 1677 if (ret_val) 1678 goto out; 1679 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xFE00); 1680 if (ret_val) 1681 goto out; 1682 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); 1683 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | (1 << 10)); 1684 if (ret_val) 1685 goto out; 1686 } else { 1687 /* Write MAC register values back to h/w defaults */ 1688 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); 1689 mac_reg &= ~(0xF << 14); 1690 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); 1691 1692 mac_reg = E1000_READ_REG(hw, E1000_RCTL); 1693 mac_reg &= ~E1000_RCTL_SECRC; 1694 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); 1695 1696 ret_val = e1000_read_kmrn_reg_generic(hw, 1697 E1000_KMRNCTRLSTA_CTRL_OFFSET, 1698 &data); 1699 if (ret_val) 1700 goto out; 1701 ret_val = e1000_write_kmrn_reg_generic(hw, 1702 E1000_KMRNCTRLSTA_CTRL_OFFSET, 1703 data & ~(1 << 0)); 1704 if (ret_val) 1705 goto out; 1706 ret_val = e1000_read_kmrn_reg_generic(hw, 1707 E1000_KMRNCTRLSTA_HD_CTRL, 1708 &data); 1709 if (ret_val) 1710 goto out; 1711 data &= ~(0xF << 8); 1712 data |= (0xB << 8); 1713 ret_val = e1000_write_kmrn_reg_generic(hw, 1714 E1000_KMRNCTRLSTA_HD_CTRL, 1715 data); 1716 if (ret_val) 1717 goto out; 1718 1719 /* Write PHY register values back to h/w defaults */ 1720 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); 1721 data &= ~(0x7F << 5); 1722 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); 1723 if (ret_val) 1724 goto out; 1725 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); 1726 data |= (1 << 13); 1727 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); 1728 if (ret_val) 1729 goto out; 1730 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); 1731 data &= ~(0x3FF << 2); 1732 data |= (0x8 << 2); 1733 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); 1734 if (ret_val) 1735 goto out; 1736 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00); 1737 if (ret_val) 1738 goto out; 1739 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); 1740 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & ~(1 << 10)); 1741 if (ret_val) 1742 goto out; 1743 } 1744 1745 /* re-enable Rx path after enabling/disabling workaround */ 1746 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); 1747 1748 out: 1749 return ret_val; 1750 } 1751 1752 /** 1753 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be 1754 * done after every PHY reset. 1755 **/ 1756 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) 1757 { 1758 s32 ret_val = E1000_SUCCESS; 1759 1760 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan"); 1761 1762 if (hw->mac.type != e1000_pch2lan) 1763 goto out; 1764 1765 /* Set MDIO slow mode before any other MDIO access */ 1766 ret_val = e1000_set_mdio_slow_mode_hv(hw); 1767 1768 out: 1769 return ret_val; 1770 } 1771 1772 /** 1773 * e1000_k1_gig_workaround_lv - K1 Si workaround 1774 * @hw: pointer to the HW structure 1775 * 1776 * Workaround to set the K1 beacon duration for 82579 parts 1777 **/ 1778 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 1779 { 1780 s32 ret_val = E1000_SUCCESS; 1781 u16 status_reg = 0; 1782 u32 mac_reg; 1783 1784 DEBUGFUNC("e1000_k1_workaround_lv"); 1785 1786 if (hw->mac.type != e1000_pch2lan) 1787 goto out; 1788 1789 /* Set K1 beacon duration based on 1Gbps speed or otherwise */ 1790 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg); 1791 if (ret_val) 1792 goto out; 1793 1794 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 1795 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 1796 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); 1797 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 1798 1799 if (status_reg & HV_M_STATUS_SPEED_1000) 1800 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 1801 else 1802 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 1803 1804 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); 1805 } 1806 1807 out: 1808 return ret_val; 1809 } 1810 1811 /** 1812 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware 1813 * @hw: pointer to the HW structure 1814 * @gate: boolean set to TRUE to gate, FALSE to ungate 1815 * 1816 * Gate/ungate the automatic PHY configuration via hardware; perform 1817 * the configuration via software instead. 1818 **/ 1819 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) 1820 { 1821 u32 extcnf_ctrl; 1822 1823 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan"); 1824 1825 if (hw->mac.type != e1000_pch2lan) 1826 return; 1827 1828 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 1829 1830 if (gate) 1831 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; 1832 else 1833 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; 1834 1835 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 1836 return; 1837 } 1838 1839 /** 1840 * e1000_hv_phy_tuning_workaround_ich8lan - This is a Phy tuning work around 1841 * needed for Nahum3 + Hanksville testing, requested by HW team 1842 **/ 1843 static s32 e1000_hv_phy_tuning_workaround_ich8lan(struct e1000_hw *hw) 1844 { 1845 s32 ret_val = E1000_SUCCESS; 1846 1847 DEBUGFUNC("e1000_hv_phy_tuning_workaround_ich8lan"); 1848 1849 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431); 1850 if (ret_val) 1851 goto out; 1852 1853 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204); 1854 if (ret_val) 1855 goto out; 1856 1857 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29, 0x66C0); 1858 if (ret_val) 1859 goto out; 1860 1861 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E, 0xFFFF); 1862 1863 out: 1864 return ret_val; 1865 } 1866 1867 /** 1868 * e1000_lan_init_done_ich8lan - Check for PHY config completion 1869 * @hw: pointer to the HW structure 1870 * 1871 * Check the appropriate indication the MAC has finished configuring the 1872 * PHY after a software reset. 1873 **/ 1874 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) 1875 { 1876 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; 1877 1878 DEBUGFUNC("e1000_lan_init_done_ich8lan"); 1879 1880 /* Wait for basic configuration completes before proceeding */ 1881 do { 1882 data = E1000_READ_REG(hw, E1000_STATUS); 1883 data &= E1000_STATUS_LAN_INIT_DONE; 1884 usec_delay(100); 1885 } while ((!data) && --loop); 1886 1887 /* 1888 * If basic configuration is incomplete before the above loop 1889 * count reaches 0, loading the configuration from NVM will 1890 * leave the PHY in a bad state possibly resulting in no link. 1891 */ 1892 if (loop == 0) 1893 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n"); 1894 1895 /* Clear the Init Done bit for the next init event */ 1896 data = E1000_READ_REG(hw, E1000_STATUS); 1897 data &= ~E1000_STATUS_LAN_INIT_DONE; 1898 E1000_WRITE_REG(hw, E1000_STATUS, data); 1899 } 1900 1901 /** 1902 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset 1903 * @hw: pointer to the HW structure 1904 **/ 1905 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) 1906 { 1907 s32 ret_val = E1000_SUCCESS; 1908 u16 reg; 1909 1910 DEBUGFUNC("e1000_post_phy_reset_ich8lan"); 1911 1912 if (hw->phy.ops.check_reset_block(hw)) 1913 goto out; 1914 1915 /* Allow time for h/w to get to quiescent state after reset */ 1916 msec_delay(10); 1917 1918 /* Perform any necessary post-reset workarounds */ 1919 switch (hw->mac.type) { 1920 case e1000_pchlan: 1921 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 1922 if (ret_val) 1923 goto out; 1924 break; 1925 case e1000_pch2lan: 1926 ret_val = e1000_lv_phy_workarounds_ich8lan(hw); 1927 if (ret_val) 1928 goto out; 1929 break; 1930 default: 1931 break; 1932 } 1933 1934 if (hw->device_id == E1000_DEV_ID_ICH10_HANKSVILLE) { 1935 ret_val = e1000_hv_phy_tuning_workaround_ich8lan(hw); 1936 if (ret_val) 1937 goto out; 1938 } 1939 1940 /* Dummy read to clear the phy wakeup bit after lcd reset */ 1941 if (hw->mac.type >= e1000_pchlan) 1942 hw->phy.ops.read_reg(hw, BM_WUC, ®); 1943 1944 /* Configure the LCD with the extended configuration region in NVM */ 1945 ret_val = e1000_sw_lcd_config_ich8lan(hw); 1946 if (ret_val) 1947 goto out; 1948 1949 /* Configure the LCD with the OEM bits in NVM */ 1950 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE); 1951 1952 if (hw->mac.type == e1000_pch2lan) { 1953 /* Ungate automatic PHY configuration on non-managed 82579 */ 1954 if (!(E1000_READ_REG(hw, E1000_FWSM) & 1955 E1000_ICH_FWSM_FW_VALID)) { 1956 msec_delay(10); 1957 e1000_gate_hw_phy_config_ich8lan(hw, FALSE); 1958 } 1959 1960 /* Set EEE LPI Update Timer to 200usec */ 1961 ret_val = hw->phy.ops.acquire(hw); 1962 if (ret_val) 1963 goto out; 1964 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, 1965 I82579_LPI_UPDATE_TIMER); 1966 if (ret_val) 1967 goto release; 1968 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 1969 0x1387); 1970 release: 1971 hw->phy.ops.release(hw); 1972 } 1973 1974 out: 1975 return ret_val; 1976 } 1977 1978 /** 1979 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset 1980 * @hw: pointer to the HW structure 1981 * 1982 * Resets the PHY 1983 * This is a function pointer entry point called by drivers 1984 * or other shared routines. 1985 **/ 1986 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 1987 { 1988 s32 ret_val = E1000_SUCCESS; 1989 1990 DEBUGFUNC("e1000_phy_hw_reset_ich8lan"); 1991 1992 /* Gate automatic PHY configuration by hardware on non-managed 82579 */ 1993 if ((hw->mac.type == e1000_pch2lan) && 1994 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) 1995 e1000_gate_hw_phy_config_ich8lan(hw, TRUE); 1996 1997 ret_val = e1000_phy_hw_reset_generic(hw); 1998 if (ret_val) 1999 goto out; 2000 2001 ret_val = e1000_post_phy_reset_ich8lan(hw); 2002 2003 out: 2004 return ret_val; 2005 } 2006 2007 /** 2008 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state 2009 * @hw: pointer to the HW structure 2010 * @active: TRUE to enable LPLU, FALSE to disable 2011 * 2012 * Sets the LPLU state according to the active flag. For PCH, if OEM write 2013 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set 2014 * the phy speed. This function will manually set the LPLU bit and restart 2015 * auto-neg as hw would do. D3 and D0 LPLU will call the same function 2016 * since it configures the same bit. 2017 **/ 2018 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) 2019 { 2020 s32 ret_val = E1000_SUCCESS; 2021 u16 oem_reg; 2022 2023 DEBUGFUNC("e1000_set_lplu_state_pchlan"); 2024 2025 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg); 2026 if (ret_val) 2027 goto out; 2028 2029 if (active) 2030 oem_reg |= HV_OEM_BITS_LPLU; 2031 else 2032 oem_reg &= ~HV_OEM_BITS_LPLU; 2033 2034 oem_reg |= HV_OEM_BITS_RESTART_AN; 2035 ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg); 2036 2037 out: 2038 return ret_val; 2039 } 2040 2041 /** 2042 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 2043 * @hw: pointer to the HW structure 2044 * @active: TRUE to enable LPLU, FALSE to disable 2045 * 2046 * Sets the LPLU D0 state according to the active flag. When 2047 * activating LPLU this function also disables smart speed 2048 * and vice versa. LPLU will not be activated unless the 2049 * device autonegotiation advertisement meets standards of 2050 * either 10 or 10/100 or 10/100/1000 at all duplexes. 2051 * This is a function pointer entry point only called by 2052 * PHY setup routines. 2053 **/ 2054 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 2055 { 2056 struct e1000_phy_info *phy = &hw->phy; 2057 u32 phy_ctrl; 2058 s32 ret_val = E1000_SUCCESS; 2059 u16 data; 2060 2061 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan"); 2062 2063 if (phy->type == e1000_phy_ife) 2064 goto out; 2065 2066 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 2067 2068 if (active) { 2069 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 2070 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 2071 2072 if (phy->type != e1000_phy_igp_3) 2073 goto out; 2074 2075 /* 2076 * Call gig speed drop workaround on LPLU before accessing 2077 * any PHY registers 2078 */ 2079 if (hw->mac.type == e1000_ich8lan) 2080 e1000_gig_downshift_workaround_ich8lan(hw); 2081 2082 /* When LPLU is enabled, we should disable SmartSpeed */ 2083 ret_val = phy->ops.read_reg(hw, 2084 IGP01E1000_PHY_PORT_CONFIG, 2085 &data); 2086 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 2087 ret_val = phy->ops.write_reg(hw, 2088 IGP01E1000_PHY_PORT_CONFIG, 2089 data); 2090 if (ret_val) 2091 goto out; 2092 } else { 2093 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 2094 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 2095 2096 if (phy->type != e1000_phy_igp_3) 2097 goto out; 2098 2099 /* 2100 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 2101 * during Dx states where the power conservation is most 2102 * important. During driver activity we should enable 2103 * SmartSpeed, so performance is maintained. 2104 */ 2105 if (phy->smart_speed == e1000_smart_speed_on) { 2106 ret_val = phy->ops.read_reg(hw, 2107 IGP01E1000_PHY_PORT_CONFIG, 2108 &data); 2109 if (ret_val) 2110 goto out; 2111 2112 data |= IGP01E1000_PSCFR_SMART_SPEED; 2113 ret_val = phy->ops.write_reg(hw, 2114 IGP01E1000_PHY_PORT_CONFIG, 2115 data); 2116 if (ret_val) 2117 goto out; 2118 } else if (phy->smart_speed == e1000_smart_speed_off) { 2119 ret_val = phy->ops.read_reg(hw, 2120 IGP01E1000_PHY_PORT_CONFIG, 2121 &data); 2122 if (ret_val) 2123 goto out; 2124 2125 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 2126 ret_val = phy->ops.write_reg(hw, 2127 IGP01E1000_PHY_PORT_CONFIG, 2128 data); 2129 if (ret_val) 2130 goto out; 2131 } 2132 } 2133 2134 out: 2135 return ret_val; 2136 } 2137 2138 /** 2139 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state 2140 * @hw: pointer to the HW structure 2141 * @active: TRUE to enable LPLU, FALSE to disable 2142 * 2143 * Sets the LPLU D3 state according to the active flag. When 2144 * activating LPLU this function also disables smart speed 2145 * and vice versa. LPLU will not be activated unless the 2146 * device autonegotiation advertisement meets standards of 2147 * either 10 or 10/100 or 10/100/1000 at all duplexes. 2148 * This is a function pointer entry point only called by 2149 * PHY setup routines. 2150 **/ 2151 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 2152 { 2153 struct e1000_phy_info *phy = &hw->phy; 2154 u32 phy_ctrl; 2155 s32 ret_val = E1000_SUCCESS; 2156 u16 data; 2157 2158 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan"); 2159 2160 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 2161 2162 if (!active) { 2163 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 2164 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 2165 2166 if (phy->type != e1000_phy_igp_3) 2167 goto out; 2168 2169 /* 2170 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 2171 * during Dx states where the power conservation is most 2172 * important. During driver activity we should enable 2173 * SmartSpeed, so performance is maintained. 2174 */ 2175 if (phy->smart_speed == e1000_smart_speed_on) { 2176 ret_val = phy->ops.read_reg(hw, 2177 IGP01E1000_PHY_PORT_CONFIG, 2178 &data); 2179 if (ret_val) 2180 goto out; 2181 2182 data |= IGP01E1000_PSCFR_SMART_SPEED; 2183 ret_val = phy->ops.write_reg(hw, 2184 IGP01E1000_PHY_PORT_CONFIG, 2185 data); 2186 if (ret_val) 2187 goto out; 2188 } else if (phy->smart_speed == e1000_smart_speed_off) { 2189 ret_val = phy->ops.read_reg(hw, 2190 IGP01E1000_PHY_PORT_CONFIG, 2191 &data); 2192 if (ret_val) 2193 goto out; 2194 2195 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 2196 ret_val = phy->ops.write_reg(hw, 2197 IGP01E1000_PHY_PORT_CONFIG, 2198 data); 2199 if (ret_val) 2200 goto out; 2201 } 2202 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 2203 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 2204 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 2205 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 2206 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 2207 2208 if (phy->type != e1000_phy_igp_3) 2209 goto out; 2210 2211 /* 2212 * Call gig speed drop workaround on LPLU before accessing 2213 * any PHY registers 2214 */ 2215 if (hw->mac.type == e1000_ich8lan) 2216 e1000_gig_downshift_workaround_ich8lan(hw); 2217 2218 /* When LPLU is enabled, we should disable SmartSpeed */ 2219 ret_val = phy->ops.read_reg(hw, 2220 IGP01E1000_PHY_PORT_CONFIG, 2221 &data); 2222 if (ret_val) 2223 goto out; 2224 2225 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 2226 ret_val = phy->ops.write_reg(hw, 2227 IGP01E1000_PHY_PORT_CONFIG, 2228 data); 2229 } 2230 2231 out: 2232 return ret_val; 2233 } 2234 2235 /** 2236 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 2237 * @hw: pointer to the HW structure 2238 * @bank: pointer to the variable that returns the active bank 2239 * 2240 * Reads signature byte from the NVM using the flash access registers. 2241 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. 2242 **/ 2243 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) 2244 { 2245 u32 eecd; 2246 struct e1000_nvm_info *nvm = &hw->nvm; 2247 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); 2248 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; 2249 u8 sig_byte = 0; 2250 s32 ret_val = E1000_SUCCESS; 2251 2252 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan"); 2253 2254 switch (hw->mac.type) { 2255 case e1000_ich8lan: 2256 case e1000_ich9lan: 2257 eecd = E1000_READ_REG(hw, E1000_EECD); 2258 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == 2259 E1000_EECD_SEC1VAL_VALID_MASK) { 2260 if (eecd & E1000_EECD_SEC1VAL) 2261 *bank = 1; 2262 else 2263 *bank = 0; 2264 2265 goto out; 2266 } 2267 DEBUGOUT("Unable to determine valid NVM bank via EEC - " 2268 "reading flash signature\n"); 2269 /* fall-thru */ 2270 default: 2271 /* set bank to 0 in case flash read fails */ 2272 *bank = 0; 2273 2274 /* Check bank 0 */ 2275 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, 2276 &sig_byte); 2277 if (ret_val) 2278 goto out; 2279 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 2280 E1000_ICH_NVM_SIG_VALUE) { 2281 *bank = 0; 2282 goto out; 2283 } 2284 2285 /* Check bank 1 */ 2286 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + 2287 bank1_offset, 2288 &sig_byte); 2289 if (ret_val) 2290 goto out; 2291 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 2292 E1000_ICH_NVM_SIG_VALUE) { 2293 *bank = 1; 2294 goto out; 2295 } 2296 2297 DEBUGOUT("ERROR: No valid NVM bank present\n"); 2298 ret_val = -E1000_ERR_NVM; 2299 break; 2300 } 2301 out: 2302 return ret_val; 2303 } 2304 2305 /** 2306 * e1000_read_nvm_ich8lan - Read word(s) from the NVM 2307 * @hw: pointer to the HW structure 2308 * @offset: The offset (in bytes) of the word(s) to read. 2309 * @words: Size of data to read in words 2310 * @data: Pointer to the word(s) to read at offset. 2311 * 2312 * Reads a word(s) from the NVM using the flash access registers. 2313 **/ 2314 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 2315 u16 *data) 2316 { 2317 struct e1000_nvm_info *nvm = &hw->nvm; 2318 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 2319 u32 act_offset; 2320 s32 ret_val = E1000_SUCCESS; 2321 u32 bank = 0; 2322 u16 i, word; 2323 2324 DEBUGFUNC("e1000_read_nvm_ich8lan"); 2325 2326 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 2327 (words == 0)) { 2328 DEBUGOUT("nvm parameter(s) out of bounds\n"); 2329 ret_val = -E1000_ERR_NVM; 2330 goto out; 2331 } 2332 2333 nvm->ops.acquire(hw); 2334 2335 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 2336 if (ret_val != E1000_SUCCESS) { 2337 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 2338 bank = 0; 2339 } 2340 2341 act_offset = (bank) ? nvm->flash_bank_size : 0; 2342 act_offset += offset; 2343 2344 ret_val = E1000_SUCCESS; 2345 for (i = 0; i < words; i++) { 2346 if ((dev_spec->shadow_ram) && 2347 (dev_spec->shadow_ram[offset+i].modified)) { 2348 data[i] = dev_spec->shadow_ram[offset+i].value; 2349 } else { 2350 ret_val = e1000_read_flash_word_ich8lan(hw, 2351 act_offset + i, 2352 &word); 2353 if (ret_val) 2354 break; 2355 data[i] = word; 2356 } 2357 } 2358 2359 nvm->ops.release(hw); 2360 2361 out: 2362 if (ret_val) 2363 DEBUGOUT1("NVM read error: %d\n", ret_val); 2364 2365 return ret_val; 2366 } 2367 2368 /** 2369 * e1000_flash_cycle_init_ich8lan - Initialize flash 2370 * @hw: pointer to the HW structure 2371 * 2372 * This function does initial flash setup so that a new read/write/erase cycle 2373 * can be started. 2374 **/ 2375 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) 2376 { 2377 union ich8_hws_flash_status hsfsts; 2378 s32 ret_val = -E1000_ERR_NVM; 2379 2380 DEBUGFUNC("e1000_flash_cycle_init_ich8lan"); 2381 2382 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 2383 2384 /* Check if the flash descriptor is valid */ 2385 if (hsfsts.hsf_status.fldesvalid == 0) { 2386 DEBUGOUT("Flash descriptor invalid. " 2387 "SW Sequencing must be used."); 2388 goto out; 2389 } 2390 2391 /* Clear FCERR and DAEL in hw status by writing 1 */ 2392 hsfsts.hsf_status.flcerr = 1; 2393 hsfsts.hsf_status.dael = 1; 2394 2395 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); 2396 2397 /* 2398 * Either we should have a hardware SPI cycle in progress 2399 * bit to check against, in order to start a new cycle or 2400 * FDONE bit should be changed in the hardware so that it 2401 * is 1 after hardware reset, which can then be used as an 2402 * indication whether a cycle is in progress or has been 2403 * completed. 2404 */ 2405 2406 if (hsfsts.hsf_status.flcinprog == 0) { 2407 /* 2408 * There is no cycle running at present, 2409 * so we can start a cycle. 2410 * Begin by setting Flash Cycle Done. 2411 */ 2412 hsfsts.hsf_status.flcdone = 1; 2413 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); 2414 ret_val = E1000_SUCCESS; 2415 } else { 2416 s32 i; 2417 2418 /* 2419 * Otherwise poll for sometime so the current 2420 * cycle has a chance to end before giving up. 2421 */ 2422 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 2423 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 2424 ICH_FLASH_HSFSTS); 2425 if (hsfsts.hsf_status.flcinprog == 0) { 2426 ret_val = E1000_SUCCESS; 2427 break; 2428 } 2429 usec_delay(1); 2430 } 2431 if (ret_val == E1000_SUCCESS) { 2432 /* 2433 * Successful in waiting for previous cycle to timeout, 2434 * now set the Flash Cycle Done. 2435 */ 2436 hsfsts.hsf_status.flcdone = 1; 2437 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, 2438 hsfsts.regval); 2439 } else { 2440 DEBUGOUT("Flash controller busy, cannot get access"); 2441 } 2442 } 2443 2444 out: 2445 return ret_val; 2446 } 2447 2448 /** 2449 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) 2450 * @hw: pointer to the HW structure 2451 * @timeout: maximum time to wait for completion 2452 * 2453 * This function starts a flash cycle and waits for its completion. 2454 **/ 2455 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) 2456 { 2457 union ich8_hws_flash_ctrl hsflctl; 2458 union ich8_hws_flash_status hsfsts; 2459 s32 ret_val = -E1000_ERR_NVM; 2460 u32 i = 0; 2461 2462 DEBUGFUNC("e1000_flash_cycle_ich8lan"); 2463 2464 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 2465 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 2466 hsflctl.hsf_ctrl.flcgo = 1; 2467 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 2468 2469 /* wait till FDONE bit is set to 1 */ 2470 do { 2471 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 2472 if (hsfsts.hsf_status.flcdone == 1) 2473 break; 2474 usec_delay(1); 2475 } while (i++ < timeout); 2476 2477 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) 2478 ret_val = E1000_SUCCESS; 2479 2480 return ret_val; 2481 } 2482 2483 /** 2484 * e1000_read_flash_word_ich8lan - Read word from flash 2485 * @hw: pointer to the HW structure 2486 * @offset: offset to data location 2487 * @data: pointer to the location for storing the data 2488 * 2489 * Reads the flash word at offset into data. Offset is converted 2490 * to bytes before read. 2491 **/ 2492 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, 2493 u16 *data) 2494 { 2495 s32 ret_val; 2496 2497 DEBUGFUNC("e1000_read_flash_word_ich8lan"); 2498 2499 if (!data) { 2500 ret_val = -E1000_ERR_NVM; 2501 goto out; 2502 } 2503 2504 /* Must convert offset into bytes. */ 2505 offset <<= 1; 2506 2507 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data); 2508 2509 out: 2510 return ret_val; 2511 } 2512 2513 /** 2514 * e1000_read_flash_byte_ich8lan - Read byte from flash 2515 * @hw: pointer to the HW structure 2516 * @offset: The offset of the byte to read. 2517 * @data: Pointer to a byte to store the value read. 2518 * 2519 * Reads a single byte from the NVM using the flash access registers. 2520 **/ 2521 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 2522 u8 *data) 2523 { 2524 s32 ret_val = E1000_SUCCESS; 2525 u16 word = 0; 2526 2527 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); 2528 if (ret_val) 2529 goto out; 2530 2531 *data = (u8)word; 2532 2533 out: 2534 return ret_val; 2535 } 2536 2537 /** 2538 * e1000_read_flash_data_ich8lan - Read byte or word from NVM 2539 * @hw: pointer to the HW structure 2540 * @offset: The offset (in bytes) of the byte or word to read. 2541 * @size: Size of data to read, 1=byte 2=word 2542 * @data: Pointer to the word to store the value read. 2543 * 2544 * Reads a byte or word from the NVM using the flash access registers. 2545 **/ 2546 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 2547 u8 size, u16 *data) 2548 { 2549 union ich8_hws_flash_status hsfsts; 2550 union ich8_hws_flash_ctrl hsflctl; 2551 u32 flash_linear_addr; 2552 u32 flash_data = 0; 2553 s32 ret_val = -E1000_ERR_NVM; 2554 u8 count = 0; 2555 2556 DEBUGFUNC("e1000_read_flash_data_ich8lan"); 2557 2558 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 2559 goto out; 2560 2561 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + 2562 hw->nvm.flash_base_addr; 2563 2564 do { 2565 usec_delay(1); 2566 /* Steps */ 2567 ret_val = e1000_flash_cycle_init_ich8lan(hw); 2568 if (ret_val != E1000_SUCCESS) 2569 break; 2570 2571 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 2572 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 2573 hsflctl.hsf_ctrl.fldbcount = size - 1; 2574 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; 2575 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 2576 2577 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 2578 2579 ret_val = e1000_flash_cycle_ich8lan(hw, 2580 ICH_FLASH_READ_COMMAND_TIMEOUT); 2581 2582 /* 2583 * Check if FCERR is set to 1, if set to 1, clear it 2584 * and try the whole sequence a few more times, else 2585 * read in (shift in) the Flash Data0, the order is 2586 * least significant byte first msb to lsb 2587 */ 2588 if (ret_val == E1000_SUCCESS) { 2589 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); 2590 if (size == 1) 2591 *data = (u8)(flash_data & 0x000000FF); 2592 else if (size == 2) 2593 *data = (u16)(flash_data & 0x0000FFFF); 2594 break; 2595 } else { 2596 /* 2597 * If we've gotten here, then things are probably 2598 * completely hosed, but if the error condition is 2599 * detected, it won't hurt to give it another try... 2600 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 2601 */ 2602 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 2603 ICH_FLASH_HSFSTS); 2604 if (hsfsts.hsf_status.flcerr == 1) { 2605 /* Repeat for some time before giving up. */ 2606 continue; 2607 } else if (hsfsts.hsf_status.flcdone == 0) { 2608 DEBUGOUT("Timeout error - flash cycle " 2609 "did not complete."); 2610 break; 2611 } 2612 } 2613 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 2614 2615 out: 2616 return ret_val; 2617 } 2618 2619 /** 2620 * e1000_write_nvm_ich8lan - Write word(s) to the NVM 2621 * @hw: pointer to the HW structure 2622 * @offset: The offset (in bytes) of the word(s) to write. 2623 * @words: Size of data to write in words 2624 * @data: Pointer to the word(s) to write at offset. 2625 * 2626 * Writes a byte or word to the NVM using the flash access registers. 2627 **/ 2628 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 2629 u16 *data) 2630 { 2631 struct e1000_nvm_info *nvm = &hw->nvm; 2632 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 2633 s32 ret_val = E1000_SUCCESS; 2634 u16 i; 2635 2636 DEBUGFUNC("e1000_write_nvm_ich8lan"); 2637 2638 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 2639 (words == 0)) { 2640 DEBUGOUT("nvm parameter(s) out of bounds\n"); 2641 ret_val = -E1000_ERR_NVM; 2642 goto out; 2643 } 2644 2645 nvm->ops.acquire(hw); 2646 2647 for (i = 0; i < words; i++) { 2648 dev_spec->shadow_ram[offset+i].modified = TRUE; 2649 dev_spec->shadow_ram[offset+i].value = data[i]; 2650 } 2651 2652 nvm->ops.release(hw); 2653 2654 out: 2655 return ret_val; 2656 } 2657 2658 /** 2659 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM 2660 * @hw: pointer to the HW structure 2661 * 2662 * The NVM checksum is updated by calling the generic update_nvm_checksum, 2663 * which writes the checksum to the shadow ram. The changes in the shadow 2664 * ram are then committed to the EEPROM by processing each bank at a time 2665 * checking for the modified bit and writing only the pending changes. 2666 * After a successful commit, the shadow ram is cleared and is ready for 2667 * future writes. 2668 **/ 2669 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) 2670 { 2671 struct e1000_nvm_info *nvm = &hw->nvm; 2672 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 2673 u32 i, act_offset, new_bank_offset, old_bank_offset, bank; 2674 s32 ret_val; 2675 u16 data; 2676 2677 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan"); 2678 2679 ret_val = e1000_update_nvm_checksum_generic(hw); 2680 if (ret_val) 2681 goto out; 2682 2683 if (nvm->type != e1000_nvm_flash_sw) 2684 goto out; 2685 2686 nvm->ops.acquire(hw); 2687 2688 /* 2689 * We're writing to the opposite bank so if we're on bank 1, 2690 * write to bank 0 etc. We also need to erase the segment that 2691 * is going to be written 2692 */ 2693 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 2694 if (ret_val != E1000_SUCCESS) { 2695 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 2696 bank = 0; 2697 } 2698 2699 if (bank == 0) { 2700 new_bank_offset = nvm->flash_bank_size; 2701 old_bank_offset = 0; 2702 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 2703 if (ret_val) 2704 goto release; 2705 } else { 2706 old_bank_offset = nvm->flash_bank_size; 2707 new_bank_offset = 0; 2708 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 2709 if (ret_val) 2710 goto release; 2711 } 2712 2713 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 2714 /* 2715 * Determine whether to write the value stored 2716 * in the other NVM bank or a modified value stored 2717 * in the shadow RAM 2718 */ 2719 if (dev_spec->shadow_ram[i].modified) { 2720 data = dev_spec->shadow_ram[i].value; 2721 } else { 2722 ret_val = e1000_read_flash_word_ich8lan(hw, i + 2723 old_bank_offset, 2724 &data); 2725 if (ret_val) 2726 break; 2727 } 2728 2729 /* 2730 * If the word is 0x13, then make sure the signature bits 2731 * (15:14) are 11b until the commit has completed. 2732 * This will allow us to write 10b which indicates the 2733 * signature is valid. We want to do this after the write 2734 * has completed so that we don't mark the segment valid 2735 * while the write is still in progress 2736 */ 2737 if (i == E1000_ICH_NVM_SIG_WORD) 2738 data |= E1000_ICH_NVM_SIG_MASK; 2739 2740 /* Convert offset to bytes. */ 2741 act_offset = (i + new_bank_offset) << 1; 2742 2743 usec_delay(100); 2744 /* Write the bytes to the new bank. */ 2745 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 2746 act_offset, 2747 (u8)data); 2748 if (ret_val) 2749 break; 2750 2751 usec_delay(100); 2752 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 2753 act_offset + 1, 2754 (u8)(data >> 8)); 2755 if (ret_val) 2756 break; 2757 } 2758 2759 /* 2760 * Don't bother writing the segment valid bits if sector 2761 * programming failed. 2762 */ 2763 if (ret_val) { 2764 DEBUGOUT("Flash commit failed.\n"); 2765 goto release; 2766 } 2767 2768 /* 2769 * Finally validate the new segment by setting bit 15:14 2770 * to 10b in word 0x13 , this can be done without an 2771 * erase as well since these bits are 11 to start with 2772 * and we need to change bit 14 to 0b 2773 */ 2774 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 2775 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 2776 if (ret_val) 2777 goto release; 2778 2779 data &= 0xBFFF; 2780 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 2781 act_offset * 2 + 1, 2782 (u8)(data >> 8)); 2783 if (ret_val) 2784 goto release; 2785 2786 /* 2787 * And invalidate the previously valid segment by setting 2788 * its signature word (0x13) high_byte to 0b. This can be 2789 * done without an erase because flash erase sets all bits 2790 * to 1's. We can write 1's to 0's without an erase 2791 */ 2792 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 2793 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 2794 if (ret_val) 2795 goto release; 2796 2797 /* Great! Everything worked, we can now clear the cached entries. */ 2798 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 2799 dev_spec->shadow_ram[i].modified = FALSE; 2800 dev_spec->shadow_ram[i].value = 0xFFFF; 2801 } 2802 2803 release: 2804 nvm->ops.release(hw); 2805 2806 /* 2807 * Reload the EEPROM, or else modifications will not appear 2808 * until after the next adapter reset. 2809 */ 2810 if (!ret_val) { 2811 nvm->ops.reload(hw); 2812 msec_delay(10); 2813 } 2814 2815 out: 2816 if (ret_val) 2817 DEBUGOUT1("NVM update error: %d\n", ret_val); 2818 2819 return ret_val; 2820 } 2821 2822 /** 2823 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum 2824 * @hw: pointer to the HW structure 2825 * 2826 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. 2827 * If the bit is 0, that the EEPROM had been modified, but the checksum was not 2828 * calculated, in which case we need to calculate the checksum and set bit 6. 2829 **/ 2830 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) 2831 { 2832 s32 ret_val = E1000_SUCCESS; 2833 u16 data; 2834 2835 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan"); 2836 2837 /* 2838 * Read 0x19 and check bit 6. If this bit is 0, the checksum 2839 * needs to be fixed. This bit is an indication that the NVM 2840 * was prepared by OEM software and did not calculate the 2841 * checksum...a likely scenario. 2842 */ 2843 ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data); 2844 if (ret_val) 2845 goto out; 2846 2847 if ((data & 0x40) == 0) { 2848 data |= 0x40; 2849 ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data); 2850 if (ret_val) 2851 goto out; 2852 ret_val = hw->nvm.ops.update(hw); 2853 if (ret_val) 2854 goto out; 2855 } 2856 2857 ret_val = e1000_validate_nvm_checksum_generic(hw); 2858 2859 out: 2860 return ret_val; 2861 } 2862 2863 /** 2864 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM 2865 * @hw: pointer to the HW structure 2866 * @offset: The offset (in bytes) of the byte/word to read. 2867 * @size: Size of data to read, 1=byte 2=word 2868 * @data: The byte(s) to write to the NVM. 2869 * 2870 * Writes one/two bytes to the NVM using the flash access registers. 2871 **/ 2872 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 2873 u8 size, u16 data) 2874 { 2875 union ich8_hws_flash_status hsfsts; 2876 union ich8_hws_flash_ctrl hsflctl; 2877 u32 flash_linear_addr; 2878 u32 flash_data = 0; 2879 s32 ret_val = -E1000_ERR_NVM; 2880 u8 count = 0; 2881 2882 DEBUGFUNC("e1000_write_ich8_data"); 2883 2884 if (size < 1 || size > 2 || data > size * 0xff || 2885 offset > ICH_FLASH_LINEAR_ADDR_MASK) 2886 goto out; 2887 2888 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + 2889 hw->nvm.flash_base_addr; 2890 2891 do { 2892 usec_delay(1); 2893 /* Steps */ 2894 ret_val = e1000_flash_cycle_init_ich8lan(hw); 2895 if (ret_val != E1000_SUCCESS) 2896 break; 2897 2898 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 2899 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 2900 hsflctl.hsf_ctrl.fldbcount = size - 1; 2901 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 2902 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 2903 2904 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 2905 2906 if (size == 1) 2907 flash_data = (u32)data & 0x00FF; 2908 else 2909 flash_data = (u32)data; 2910 2911 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); 2912 2913 /* 2914 * check if FCERR is set to 1 , if set to 1, clear it 2915 * and try the whole sequence a few more times else done 2916 */ 2917 ret_val = e1000_flash_cycle_ich8lan(hw, 2918 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 2919 if (ret_val == E1000_SUCCESS) 2920 break; 2921 2922 /* 2923 * If we're here, then things are most likely 2924 * completely hosed, but if the error condition 2925 * is detected, it won't hurt to give it another 2926 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 2927 */ 2928 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 2929 if (hsfsts.hsf_status.flcerr == 1) 2930 /* Repeat for some time before giving up. */ 2931 continue; 2932 if (hsfsts.hsf_status.flcdone == 0) { 2933 DEBUGOUT("Timeout error - flash cycle " 2934 "did not complete."); 2935 break; 2936 } 2937 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 2938 2939 out: 2940 return ret_val; 2941 } 2942 2943 /** 2944 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM 2945 * @hw: pointer to the HW structure 2946 * @offset: The index of the byte to read. 2947 * @data: The byte to write to the NVM. 2948 * 2949 * Writes a single byte to the NVM using the flash access registers. 2950 **/ 2951 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 2952 u8 data) 2953 { 2954 u16 word = (u16)data; 2955 2956 DEBUGFUNC("e1000_write_flash_byte_ich8lan"); 2957 2958 return e1000_write_flash_data_ich8lan(hw, offset, 1, word); 2959 } 2960 2961 /** 2962 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM 2963 * @hw: pointer to the HW structure 2964 * @offset: The offset of the byte to write. 2965 * @byte: The byte to write to the NVM. 2966 * 2967 * Writes a single byte to the NVM using the flash access registers. 2968 * Goes through a retry algorithm before giving up. 2969 **/ 2970 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 2971 u32 offset, u8 byte) 2972 { 2973 s32 ret_val; 2974 u16 program_retries; 2975 2976 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan"); 2977 2978 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 2979 if (ret_val == E1000_SUCCESS) 2980 goto out; 2981 2982 for (program_retries = 0; program_retries < 100; program_retries++) { 2983 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset); 2984 usec_delay(100); 2985 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 2986 if (ret_val == E1000_SUCCESS) 2987 break; 2988 } 2989 if (program_retries == 100) { 2990 ret_val = -E1000_ERR_NVM; 2991 goto out; 2992 } 2993 2994 out: 2995 return ret_val; 2996 } 2997 2998 /** 2999 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM 3000 * @hw: pointer to the HW structure 3001 * @bank: 0 for first bank, 1 for second bank, etc. 3002 * 3003 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. 3004 * bank N is 4096 * N + flash_reg_addr. 3005 **/ 3006 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) 3007 { 3008 struct e1000_nvm_info *nvm = &hw->nvm; 3009 union ich8_hws_flash_status hsfsts; 3010 union ich8_hws_flash_ctrl hsflctl; 3011 u32 flash_linear_addr; 3012 /* bank size is in 16bit words - adjust to bytes */ 3013 u32 flash_bank_size = nvm->flash_bank_size * 2; 3014 s32 ret_val = E1000_SUCCESS; 3015 s32 count = 0; 3016 s32 j, iteration, sector_size; 3017 3018 DEBUGFUNC("e1000_erase_flash_bank_ich8lan"); 3019 3020 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 3021 3022 /* 3023 * Determine HW Sector size: Read BERASE bits of hw flash status 3024 * register 3025 * 00: The Hw sector is 256 bytes, hence we need to erase 16 3026 * consecutive sectors. The start index for the nth Hw sector 3027 * can be calculated as = bank * 4096 + n * 256 3028 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. 3029 * The start index for the nth Hw sector can be calculated 3030 * as = bank * 4096 3031 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 3032 * (ich9 only, otherwise error condition) 3033 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 3034 */ 3035 switch (hsfsts.hsf_status.berasesz) { 3036 case 0: 3037 /* Hw sector size 256 */ 3038 sector_size = ICH_FLASH_SEG_SIZE_256; 3039 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; 3040 break; 3041 case 1: 3042 sector_size = ICH_FLASH_SEG_SIZE_4K; 3043 iteration = 1; 3044 break; 3045 case 2: 3046 sector_size = ICH_FLASH_SEG_SIZE_8K; 3047 iteration = 1; 3048 break; 3049 case 3: 3050 sector_size = ICH_FLASH_SEG_SIZE_64K; 3051 iteration = 1; 3052 break; 3053 default: 3054 ret_val = -E1000_ERR_NVM; 3055 goto out; 3056 } 3057 3058 /* Start with the base address, then add the sector offset. */ 3059 flash_linear_addr = hw->nvm.flash_base_addr; 3060 flash_linear_addr += (bank) ? flash_bank_size : 0; 3061 3062 for (j = 0; j < iteration ; j++) { 3063 do { 3064 /* Steps */ 3065 ret_val = e1000_flash_cycle_init_ich8lan(hw); 3066 if (ret_val) 3067 goto out; 3068 3069 /* 3070 * Write a value 11 (block Erase) in Flash 3071 * Cycle field in hw flash control 3072 */ 3073 hsflctl.regval = E1000_READ_FLASH_REG16(hw, 3074 ICH_FLASH_HSFCTL); 3075 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 3076 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, 3077 hsflctl.regval); 3078 3079 /* 3080 * Write the last 24 bits of an index within the 3081 * block into Flash Linear address field in Flash 3082 * Address. 3083 */ 3084 flash_linear_addr += (j * sector_size); 3085 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, 3086 flash_linear_addr); 3087 3088 ret_val = e1000_flash_cycle_ich8lan(hw, 3089 ICH_FLASH_ERASE_COMMAND_TIMEOUT); 3090 if (ret_val == E1000_SUCCESS) 3091 break; 3092 3093 /* 3094 * Check if FCERR is set to 1. If 1, 3095 * clear it and try the whole sequence 3096 * a few more times else Done 3097 */ 3098 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 3099 ICH_FLASH_HSFSTS); 3100 if (hsfsts.hsf_status.flcerr == 1) 3101 /* repeat for some time before giving up */ 3102 continue; 3103 else if (hsfsts.hsf_status.flcdone == 0) 3104 goto out; 3105 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); 3106 } 3107 3108 out: 3109 return ret_val; 3110 } 3111 3112 /** 3113 * e1000_valid_led_default_ich8lan - Set the default LED settings 3114 * @hw: pointer to the HW structure 3115 * @data: Pointer to the LED settings 3116 * 3117 * Reads the LED default settings from the NVM to data. If the NVM LED 3118 * settings is all 0's or F's, set the LED default to a valid LED default 3119 * setting. 3120 **/ 3121 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) 3122 { 3123 s32 ret_val; 3124 3125 DEBUGFUNC("e1000_valid_led_default_ich8lan"); 3126 3127 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 3128 if (ret_val) { 3129 DEBUGOUT("NVM Read Error\n"); 3130 goto out; 3131 } 3132 3133 if (*data == ID_LED_RESERVED_0000 || 3134 *data == ID_LED_RESERVED_FFFF) 3135 *data = ID_LED_DEFAULT_ICH8LAN; 3136 3137 out: 3138 return ret_val; 3139 } 3140 3141 /** 3142 * e1000_id_led_init_pchlan - store LED configurations 3143 * @hw: pointer to the HW structure 3144 * 3145 * PCH does not control LEDs via the LEDCTL register, rather it uses 3146 * the PHY LED configuration register. 3147 * 3148 * PCH also does not have an "always on" or "always off" mode which 3149 * complicates the ID feature. Instead of using the "on" mode to indicate 3150 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()), 3151 * use "link_up" mode. The LEDs will still ID on request if there is no 3152 * link based on logic in e1000_led_[on|off]_pchlan(). 3153 **/ 3154 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) 3155 { 3156 struct e1000_mac_info *mac = &hw->mac; 3157 s32 ret_val; 3158 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; 3159 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; 3160 u16 data, i, temp, shift; 3161 3162 DEBUGFUNC("e1000_id_led_init_pchlan"); 3163 3164 /* Get default ID LED modes */ 3165 ret_val = hw->nvm.ops.valid_led_default(hw, &data); 3166 if (ret_val) 3167 goto out; 3168 3169 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); 3170 mac->ledctl_mode1 = mac->ledctl_default; 3171 mac->ledctl_mode2 = mac->ledctl_default; 3172 3173 for (i = 0; i < 4; i++) { 3174 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; 3175 shift = (i * 5); 3176 switch (temp) { 3177 case ID_LED_ON1_DEF2: 3178 case ID_LED_ON1_ON2: 3179 case ID_LED_ON1_OFF2: 3180 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 3181 mac->ledctl_mode1 |= (ledctl_on << shift); 3182 break; 3183 case ID_LED_OFF1_DEF2: 3184 case ID_LED_OFF1_ON2: 3185 case ID_LED_OFF1_OFF2: 3186 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 3187 mac->ledctl_mode1 |= (ledctl_off << shift); 3188 break; 3189 default: 3190 /* Do nothing */ 3191 break; 3192 } 3193 switch (temp) { 3194 case ID_LED_DEF1_ON2: 3195 case ID_LED_ON1_ON2: 3196 case ID_LED_OFF1_ON2: 3197 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 3198 mac->ledctl_mode2 |= (ledctl_on << shift); 3199 break; 3200 case ID_LED_DEF1_OFF2: 3201 case ID_LED_ON1_OFF2: 3202 case ID_LED_OFF1_OFF2: 3203 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 3204 mac->ledctl_mode2 |= (ledctl_off << shift); 3205 break; 3206 default: 3207 /* Do nothing */ 3208 break; 3209 } 3210 } 3211 3212 out: 3213 return ret_val; 3214 } 3215 3216 /** 3217 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width 3218 * @hw: pointer to the HW structure 3219 * 3220 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability 3221 * register, so the bus width is hard coded. 3222 **/ 3223 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) 3224 { 3225 struct e1000_bus_info *bus = &hw->bus; 3226 s32 ret_val; 3227 3228 DEBUGFUNC("e1000_get_bus_info_ich8lan"); 3229 3230 ret_val = e1000_get_bus_info_pcie_generic(hw); 3231 3232 /* 3233 * ICH devices are "PCI Express"-ish. They have 3234 * a configuration space, but do not contain 3235 * PCI Express Capability registers, so bus width 3236 * must be hardcoded. 3237 */ 3238 if (bus->width == e1000_bus_width_unknown) 3239 bus->width = e1000_bus_width_pcie_x1; 3240 3241 return ret_val; 3242 } 3243 3244 /** 3245 * e1000_reset_hw_ich8lan - Reset the hardware 3246 * @hw: pointer to the HW structure 3247 * 3248 * Does a full reset of the hardware which includes a reset of the PHY and 3249 * MAC. 3250 **/ 3251 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 3252 { 3253 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3254 u16 reg; 3255 u32 ctrl, kab; 3256 s32 ret_val; 3257 3258 DEBUGFUNC("e1000_reset_hw_ich8lan"); 3259 3260 /* 3261 * Prevent the PCI-E bus from sticking if there is no TLP connection 3262 * on the last TLP read/write transaction when MAC is reset. 3263 */ 3264 ret_val = e1000_disable_pcie_master_generic(hw); 3265 if (ret_val) 3266 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 3267 3268 DEBUGOUT("Masking off all interrupts\n"); 3269 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 3270 3271 /* 3272 * Disable the Transmit and Receive units. Then delay to allow 3273 * any pending transactions to complete before we hit the MAC 3274 * with the global reset. 3275 */ 3276 E1000_WRITE_REG(hw, E1000_RCTL, 0); 3277 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); 3278 E1000_WRITE_FLUSH(hw); 3279 3280 msec_delay(10); 3281 3282 /* Workaround for ICH8 bit corruption issue in FIFO memory */ 3283 if (hw->mac.type == e1000_ich8lan) { 3284 /* Set Tx and Rx buffer allocation to 8k apiece. */ 3285 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K); 3286 /* Set Packet Buffer Size to 16k. */ 3287 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K); 3288 } 3289 3290 if (hw->mac.type == e1000_pchlan) { 3291 /* Save the NVM K1 bit setting*/ 3292 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); 3293 if (ret_val) 3294 return ret_val; 3295 3296 if (reg & E1000_NVM_K1_ENABLE) 3297 dev_spec->nvm_k1_enabled = TRUE; 3298 else 3299 dev_spec->nvm_k1_enabled = FALSE; 3300 } 3301 3302 ctrl = E1000_READ_REG(hw, E1000_CTRL); 3303 3304 if (!hw->phy.ops.check_reset_block(hw)) { 3305 /* 3306 * Full-chip reset requires MAC and PHY reset at the same 3307 * time to make sure the interface between MAC and the 3308 * external PHY is reset. 3309 */ 3310 ctrl |= E1000_CTRL_PHY_RST; 3311 3312 /* 3313 * Gate automatic PHY configuration by hardware on 3314 * non-managed 82579 3315 */ 3316 if ((hw->mac.type == e1000_pch2lan) && 3317 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) 3318 e1000_gate_hw_phy_config_ich8lan(hw, TRUE); 3319 } 3320 ret_val = e1000_acquire_swflag_ich8lan(hw); 3321 DEBUGOUT("Issuing a global reset to ich8lan\n"); 3322 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST)); 3323 msec_delay(20); 3324 3325 if (!ret_val) 3326 e1000_release_swflag_ich8lan(hw); 3327 3328 if (ctrl & E1000_CTRL_PHY_RST) { 3329 ret_val = hw->phy.ops.get_cfg_done(hw); 3330 if (ret_val) 3331 goto out; 3332 3333 ret_val = e1000_post_phy_reset_ich8lan(hw); 3334 if (ret_val) 3335 goto out; 3336 } 3337 3338 /* 3339 * For PCH, this write will make sure that any noise 3340 * will be detected as a CRC error and be dropped rather than show up 3341 * as a bad packet to the DMA engine. 3342 */ 3343 if (hw->mac.type == e1000_pchlan) 3344 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565); 3345 3346 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 3347 E1000_READ_REG(hw, E1000_ICR); 3348 3349 kab = E1000_READ_REG(hw, E1000_KABGTXD); 3350 kab |= E1000_KABGTXD_BGSQLBIAS; 3351 E1000_WRITE_REG(hw, E1000_KABGTXD, kab); 3352 3353 out: 3354 return ret_val; 3355 } 3356 3357 /** 3358 * e1000_init_hw_ich8lan - Initialize the hardware 3359 * @hw: pointer to the HW structure 3360 * 3361 * Prepares the hardware for transmit and receive by doing the following: 3362 * - initialize hardware bits 3363 * - initialize LED identification 3364 * - setup receive address registers 3365 * - setup flow control 3366 * - setup transmit descriptors 3367 * - clear statistics 3368 **/ 3369 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) 3370 { 3371 struct e1000_mac_info *mac = &hw->mac; 3372 u32 ctrl_ext, txdctl, snoop; 3373 s32 ret_val; 3374 u16 i; 3375 3376 DEBUGFUNC("e1000_init_hw_ich8lan"); 3377 3378 e1000_initialize_hw_bits_ich8lan(hw); 3379 3380 /* Initialize identification LED */ 3381 ret_val = mac->ops.id_led_init(hw); 3382 if (ret_val) 3383 DEBUGOUT("Error initializing identification LED\n"); 3384 /* This is not fatal and we should not stop init due to this */ 3385 3386 /* Setup the receive address. */ 3387 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); 3388 3389 /* Zero out the Multicast HASH table */ 3390 DEBUGOUT("Zeroing the MTA\n"); 3391 for (i = 0; i < mac->mta_reg_count; i++) 3392 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 3393 3394 /* 3395 * The 82578 Rx buffer will stall if wakeup is enabled in host and 3396 * the ME. Reading the BM_WUC register will clear the host wakeup bit. 3397 * Reset the phy after disabling host wakeup to reset the Rx buffer. 3398 */ 3399 if (hw->phy.type == e1000_phy_82578) { 3400 hw->phy.ops.read_reg(hw, BM_WUC, &i); 3401 ret_val = e1000_phy_hw_reset_ich8lan(hw); 3402 if (ret_val) 3403 return ret_val; 3404 } 3405 3406 /* Setup link and flow control */ 3407 ret_val = mac->ops.setup_link(hw); 3408 3409 /* Set the transmit descriptor write-back policy for both queues */ 3410 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); 3411 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 3412 E1000_TXDCTL_FULL_TX_DESC_WB; 3413 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 3414 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 3415 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); 3416 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1)); 3417 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 3418 E1000_TXDCTL_FULL_TX_DESC_WB; 3419 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 3420 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 3421 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl); 3422 3423 /* 3424 * ICH8 has opposite polarity of no_snoop bits. 3425 * By default, we should use snoop behavior. 3426 */ 3427 if (mac->type == e1000_ich8lan) 3428 snoop = PCIE_ICH8_SNOOP_ALL; 3429 else 3430 snoop = (u32) ~(PCIE_NO_SNOOP_ALL); 3431 e1000_set_pcie_no_snoop_generic(hw, snoop); 3432 3433 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 3434 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 3435 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 3436 3437 /* 3438 * Clear all of the statistics registers (clear on read). It is 3439 * important that we do this after we have tried to establish link 3440 * because the symbol error count will increment wildly if there 3441 * is no link. 3442 */ 3443 e1000_clear_hw_cntrs_ich8lan(hw); 3444 3445 return ret_val; 3446 } 3447 /** 3448 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits 3449 * @hw: pointer to the HW structure 3450 * 3451 * Sets/Clears required hardware bits necessary for correctly setting up the 3452 * hardware for transmit and receive. 3453 **/ 3454 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) 3455 { 3456 u32 reg; 3457 3458 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan"); 3459 3460 /* Extended Device Control */ 3461 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 3462 reg |= (1 << 22); 3463 /* Enable PHY low-power state when MAC is at D3 w/o WoL */ 3464 if (hw->mac.type >= e1000_pchlan) 3465 reg |= E1000_CTRL_EXT_PHYPDEN; 3466 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 3467 3468 /* Transmit Descriptor Control 0 */ 3469 reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); 3470 reg |= (1 << 22); 3471 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); 3472 3473 /* Transmit Descriptor Control 1 */ 3474 reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); 3475 reg |= (1 << 22); 3476 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); 3477 3478 /* Transmit Arbitration Control 0 */ 3479 reg = E1000_READ_REG(hw, E1000_TARC(0)); 3480 if (hw->mac.type == e1000_ich8lan) 3481 reg |= (1 << 28) | (1 << 29); 3482 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); 3483 E1000_WRITE_REG(hw, E1000_TARC(0), reg); 3484 3485 /* Transmit Arbitration Control 1 */ 3486 reg = E1000_READ_REG(hw, E1000_TARC(1)); 3487 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) 3488 reg &= ~(1 << 28); 3489 else 3490 reg |= (1 << 28); 3491 reg |= (1 << 24) | (1 << 26) | (1 << 30); 3492 E1000_WRITE_REG(hw, E1000_TARC(1), reg); 3493 3494 /* Device Status */ 3495 if (hw->mac.type == e1000_ich8lan) { 3496 reg = E1000_READ_REG(hw, E1000_STATUS); 3497 reg &= ~(1 << 31); 3498 E1000_WRITE_REG(hw, E1000_STATUS, reg); 3499 } 3500 3501 /* 3502 * work-around descriptor data corruption issue during nfs v2 udp 3503 * traffic, just disable the nfs filtering capability 3504 */ 3505 reg = E1000_READ_REG(hw, E1000_RFCTL); 3506 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); 3507 E1000_WRITE_REG(hw, E1000_RFCTL, reg); 3508 3509 return; 3510 } 3511 3512 /** 3513 * e1000_setup_link_ich8lan - Setup flow control and link settings 3514 * @hw: pointer to the HW structure 3515 * 3516 * Determines which flow control settings to use, then configures flow 3517 * control. Calls the appropriate media-specific link configuration 3518 * function. Assuming the adapter has a valid link partner, a valid link 3519 * should be established. Assumes the hardware has previously been reset 3520 * and the transmitter and receiver are not enabled. 3521 **/ 3522 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) 3523 { 3524 s32 ret_val = E1000_SUCCESS; 3525 3526 DEBUGFUNC("e1000_setup_link_ich8lan"); 3527 3528 if (hw->phy.ops.check_reset_block(hw)) 3529 goto out; 3530 3531 /* 3532 * ICH parts do not have a word in the NVM to determine 3533 * the default flow control setting, so we explicitly 3534 * set it to full. 3535 */ 3536 if (hw->fc.requested_mode == e1000_fc_default) 3537 hw->fc.requested_mode = e1000_fc_full; 3538 3539 /* 3540 * Save off the requested flow control mode for use later. Depending 3541 * on the link partner's capabilities, we may or may not use this mode. 3542 */ 3543 hw->fc.current_mode = hw->fc.requested_mode; 3544 3545 DEBUGOUT1("After fix-ups FlowControl is now = %x\n", 3546 hw->fc.current_mode); 3547 3548 /* Continue to configure the copper link. */ 3549 ret_val = hw->mac.ops.setup_physical_interface(hw); 3550 if (ret_val) 3551 goto out; 3552 3553 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); 3554 if ((hw->phy.type == e1000_phy_82578) || 3555 (hw->phy.type == e1000_phy_82579) || 3556 (hw->phy.type == e1000_phy_82577)) { 3557 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time); 3558 3559 ret_val = hw->phy.ops.write_reg(hw, 3560 PHY_REG(BM_PORT_CTRL_PAGE, 27), 3561 hw->fc.pause_time); 3562 if (ret_val) 3563 goto out; 3564 } 3565 3566 ret_val = e1000_set_fc_watermarks_generic(hw); 3567 3568 out: 3569 return ret_val; 3570 } 3571 3572 /** 3573 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface 3574 * @hw: pointer to the HW structure 3575 * 3576 * Configures the kumeran interface to the PHY to wait the appropriate time 3577 * when polling the PHY, then call the generic setup_copper_link to finish 3578 * configuring the copper link. 3579 **/ 3580 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) 3581 { 3582 u32 ctrl; 3583 s32 ret_val; 3584 u16 reg_data; 3585 3586 DEBUGFUNC("e1000_setup_copper_link_ich8lan"); 3587 3588 ctrl = E1000_READ_REG(hw, E1000_CTRL); 3589 ctrl |= E1000_CTRL_SLU; 3590 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 3591 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 3592 3593 /* 3594 * Set the mac to wait the maximum time between each iteration 3595 * and increase the max iterations when polling the phy; 3596 * this fixes erroneous timeouts at 10Mbps. 3597 */ 3598 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 3599 0xFFFF); 3600 if (ret_val) 3601 goto out; 3602 ret_val = e1000_read_kmrn_reg_generic(hw, 3603 E1000_KMRNCTRLSTA_INBAND_PARAM, 3604 ®_data); 3605 if (ret_val) 3606 goto out; 3607 reg_data |= 0x3F; 3608 ret_val = e1000_write_kmrn_reg_generic(hw, 3609 E1000_KMRNCTRLSTA_INBAND_PARAM, 3610 reg_data); 3611 if (ret_val) 3612 goto out; 3613 3614 switch (hw->phy.type) { 3615 case e1000_phy_igp_3: 3616 ret_val = e1000_copper_link_setup_igp(hw); 3617 if (ret_val) 3618 goto out; 3619 break; 3620 case e1000_phy_bm: 3621 case e1000_phy_82578: 3622 ret_val = e1000_copper_link_setup_m88(hw); 3623 if (ret_val) 3624 goto out; 3625 break; 3626 case e1000_phy_82577: 3627 case e1000_phy_82579: 3628 ret_val = e1000_copper_link_setup_82577(hw); 3629 if (ret_val) 3630 goto out; 3631 break; 3632 case e1000_phy_ife: 3633 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, 3634 ®_data); 3635 if (ret_val) 3636 goto out; 3637 3638 reg_data &= ~IFE_PMC_AUTO_MDIX; 3639 3640 switch (hw->phy.mdix) { 3641 case 1: 3642 reg_data &= ~IFE_PMC_FORCE_MDIX; 3643 break; 3644 case 2: 3645 reg_data |= IFE_PMC_FORCE_MDIX; 3646 break; 3647 case 0: 3648 default: 3649 reg_data |= IFE_PMC_AUTO_MDIX; 3650 break; 3651 } 3652 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, 3653 reg_data); 3654 if (ret_val) 3655 goto out; 3656 break; 3657 default: 3658 break; 3659 } 3660 ret_val = e1000_setup_copper_link_generic(hw); 3661 3662 out: 3663 return ret_val; 3664 } 3665 3666 /** 3667 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex 3668 * @hw: pointer to the HW structure 3669 * @speed: pointer to store current link speed 3670 * @duplex: pointer to store the current link duplex 3671 * 3672 * Calls the generic get_speed_and_duplex to retrieve the current link 3673 * information and then calls the Kumeran lock loss workaround for links at 3674 * gigabit speeds. 3675 **/ 3676 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, 3677 u16 *duplex) 3678 { 3679 s32 ret_val; 3680 3681 DEBUGFUNC("e1000_get_link_up_info_ich8lan"); 3682 3683 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); 3684 if (ret_val) 3685 goto out; 3686 3687 if ((hw->mac.type == e1000_ich8lan) && 3688 (hw->phy.type == e1000_phy_igp_3) && 3689 (*speed == SPEED_1000)) { 3690 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); 3691 } 3692 3693 out: 3694 return ret_val; 3695 } 3696 3697 /** 3698 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround 3699 * @hw: pointer to the HW structure 3700 * 3701 * Work-around for 82566 Kumeran PCS lock loss: 3702 * On link status change (i.e. PCI reset, speed change) and link is up and 3703 * speed is gigabit- 3704 * 0) if workaround is optionally disabled do nothing 3705 * 1) wait 1ms for Kumeran link to come up 3706 * 2) check Kumeran Diagnostic register PCS lock loss bit 3707 * 3) if not set the link is locked (all is good), otherwise... 3708 * 4) reset the PHY 3709 * 5) repeat up to 10 times 3710 * Note: this is only called for IGP3 copper when speed is 1gb. 3711 **/ 3712 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) 3713 { 3714 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3715 u32 phy_ctrl; 3716 s32 ret_val = E1000_SUCCESS; 3717 u16 i, data; 3718 bool link; 3719 3720 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan"); 3721 3722 if (!dev_spec->kmrn_lock_loss_workaround_enabled) 3723 goto out; 3724 3725 /* 3726 * Make sure link is up before proceeding. If not just return. 3727 * Attempting this while link is negotiating fouled up link 3728 * stability 3729 */ 3730 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); 3731 if (!link) { 3732 ret_val = E1000_SUCCESS; 3733 goto out; 3734 } 3735 3736 for (i = 0; i < 10; i++) { 3737 /* read once to clear */ 3738 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); 3739 if (ret_val) 3740 goto out; 3741 /* and again to get new status */ 3742 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); 3743 if (ret_val) 3744 goto out; 3745 3746 /* check for PCS lock */ 3747 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) { 3748 ret_val = E1000_SUCCESS; 3749 goto out; 3750 } 3751 3752 /* Issue PHY reset */ 3753 hw->phy.ops.reset(hw); 3754 msec_delay_irq(5); 3755 } 3756 /* Disable GigE link negotiation */ 3757 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 3758 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | 3759 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 3760 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3761 3762 /* 3763 * Call gig speed drop workaround on Gig disable before accessing 3764 * any PHY registers 3765 */ 3766 e1000_gig_downshift_workaround_ich8lan(hw); 3767 3768 /* unable to acquire PCS lock */ 3769 ret_val = -E1000_ERR_PHY; 3770 3771 out: 3772 return ret_val; 3773 } 3774 3775 /** 3776 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state 3777 * @hw: pointer to the HW structure 3778 * @state: boolean value used to set the current Kumeran workaround state 3779 * 3780 * If ICH8, set the current Kumeran workaround state (enabled - TRUE 3781 * /disabled - FALSE). 3782 **/ 3783 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 3784 bool state) 3785 { 3786 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3787 3788 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan"); 3789 3790 if (hw->mac.type != e1000_ich8lan) { 3791 DEBUGOUT("Workaround applies to ICH8 only.\n"); 3792 return; 3793 } 3794 3795 dev_spec->kmrn_lock_loss_workaround_enabled = state; 3796 3797 return; 3798 } 3799 3800 /** 3801 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 3802 * @hw: pointer to the HW structure 3803 * 3804 * Workaround for 82566 power-down on D3 entry: 3805 * 1) disable gigabit link 3806 * 2) write VR power-down enable 3807 * 3) read it back 3808 * Continue if successful, else issue LCD reset and repeat 3809 **/ 3810 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) 3811 { 3812 u32 reg; 3813 u16 data; 3814 u8 retry = 0; 3815 3816 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan"); 3817 3818 if (hw->phy.type != e1000_phy_igp_3) 3819 goto out; 3820 3821 /* Try the workaround twice (if needed) */ 3822 do { 3823 /* Disable link */ 3824 reg = E1000_READ_REG(hw, E1000_PHY_CTRL); 3825 reg |= (E1000_PHY_CTRL_GBE_DISABLE | 3826 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 3827 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg); 3828 3829 /* 3830 * Call gig speed drop workaround on Gig disable before 3831 * accessing any PHY registers 3832 */ 3833 if (hw->mac.type == e1000_ich8lan) 3834 e1000_gig_downshift_workaround_ich8lan(hw); 3835 3836 /* Write VR power-down enable */ 3837 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); 3838 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 3839 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL, 3840 data | IGP3_VR_CTRL_MODE_SHUTDOWN); 3841 3842 /* Read it back and test */ 3843 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); 3844 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 3845 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) 3846 break; 3847 3848 /* Issue PHY reset and repeat at most one more time */ 3849 reg = E1000_READ_REG(hw, E1000_CTRL); 3850 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST); 3851 retry++; 3852 } while (retry); 3853 3854 out: 3855 return; 3856 } 3857 3858 /** 3859 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working 3860 * @hw: pointer to the HW structure 3861 * 3862 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), 3863 * LPLU, Gig disable, MDIC PHY reset): 3864 * 1) Set Kumeran Near-end loopback 3865 * 2) Clear Kumeran Near-end loopback 3866 * Should only be called for ICH8[m] devices with IGP_3 Phy. 3867 **/ 3868 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) 3869 { 3870 s32 ret_val = E1000_SUCCESS; 3871 u16 reg_data; 3872 3873 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan"); 3874 3875 if ((hw->mac.type != e1000_ich8lan) || 3876 (hw->phy.type != e1000_phy_igp_3)) 3877 goto out; 3878 3879 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 3880 ®_data); 3881 if (ret_val) 3882 goto out; 3883 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; 3884 ret_val = e1000_write_kmrn_reg_generic(hw, 3885 E1000_KMRNCTRLSTA_DIAG_OFFSET, 3886 reg_data); 3887 if (ret_val) 3888 goto out; 3889 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; 3890 ret_val = e1000_write_kmrn_reg_generic(hw, 3891 E1000_KMRNCTRLSTA_DIAG_OFFSET, 3892 reg_data); 3893 out: 3894 return; 3895 } 3896 3897 /** 3898 * e1000_disable_gig_wol_ich8lan - disable gig during WoL 3899 * @hw: pointer to the HW structure 3900 * 3901 * During S0 to Sx transition, it is possible the link remains at gig 3902 * instead of negotiating to a lower speed. Before going to Sx, set 3903 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation 3904 * to a lower speed. 3905 * 3906 * Should only be called for applicable parts. 3907 **/ 3908 void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw) 3909 { 3910 u32 phy_ctrl; 3911 s32 ret_val; 3912 3913 DEBUGFUNC("e1000_disable_gig_wol_ich8lan"); 3914 3915 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 3916 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; 3917 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3918 3919 if (hw->mac.type >= e1000_pchlan) { 3920 e1000_oem_bits_config_ich8lan(hw, FALSE); 3921 ret_val = hw->phy.ops.acquire(hw); 3922 if (ret_val) 3923 return; 3924 e1000_write_smbus_addr(hw); 3925 hw->phy.ops.release(hw); 3926 } 3927 3928 return; 3929 } 3930 3931 /** 3932 * e1000_cleanup_led_ich8lan - Restore the default LED operation 3933 * @hw: pointer to the HW structure 3934 * 3935 * Return the LED back to the default configuration. 3936 **/ 3937 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) 3938 { 3939 DEBUGFUNC("e1000_cleanup_led_ich8lan"); 3940 3941 if (hw->phy.type == e1000_phy_ife) 3942 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 3943 0); 3944 3945 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); 3946 return E1000_SUCCESS; 3947 } 3948 3949 /** 3950 * e1000_led_on_ich8lan - Turn LEDs on 3951 * @hw: pointer to the HW structure 3952 * 3953 * Turn on the LEDs. 3954 **/ 3955 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) 3956 { 3957 DEBUGFUNC("e1000_led_on_ich8lan"); 3958 3959 if (hw->phy.type == e1000_phy_ife) 3960 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 3961 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); 3962 3963 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); 3964 return E1000_SUCCESS; 3965 } 3966 3967 /** 3968 * e1000_led_off_ich8lan - Turn LEDs off 3969 * @hw: pointer to the HW structure 3970 * 3971 * Turn off the LEDs. 3972 **/ 3973 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) 3974 { 3975 DEBUGFUNC("e1000_led_off_ich8lan"); 3976 3977 if (hw->phy.type == e1000_phy_ife) 3978 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 3979 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); 3980 3981 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); 3982 return E1000_SUCCESS; 3983 } 3984 3985 /** 3986 * e1000_setup_led_pchlan - Configures SW controllable LED 3987 * @hw: pointer to the HW structure 3988 * 3989 * This prepares the SW controllable LED for use. 3990 **/ 3991 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 3992 { 3993 DEBUGFUNC("e1000_setup_led_pchlan"); 3994 3995 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 3996 (u16)hw->mac.ledctl_mode1); 3997 } 3998 3999 /** 4000 * e1000_cleanup_led_pchlan - Restore the default LED operation 4001 * @hw: pointer to the HW structure 4002 * 4003 * Return the LED back to the default configuration. 4004 **/ 4005 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 4006 { 4007 DEBUGFUNC("e1000_cleanup_led_pchlan"); 4008 4009 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 4010 (u16)hw->mac.ledctl_default); 4011 } 4012 4013 /** 4014 * e1000_led_on_pchlan - Turn LEDs on 4015 * @hw: pointer to the HW structure 4016 * 4017 * Turn on the LEDs. 4018 **/ 4019 static s32 e1000_led_on_pchlan(struct e1000_hw *hw) 4020 { 4021 u16 data = (u16)hw->mac.ledctl_mode2; 4022 u32 i, led; 4023 4024 DEBUGFUNC("e1000_led_on_pchlan"); 4025 4026 /* 4027 * If no link, then turn LED on by setting the invert bit 4028 * for each LED that's mode is "link_up" in ledctl_mode2. 4029 */ 4030 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 4031 for (i = 0; i < 3; i++) { 4032 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 4033 if ((led & E1000_PHY_LED0_MODE_MASK) != 4034 E1000_LEDCTL_MODE_LINK_UP) 4035 continue; 4036 if (led & E1000_PHY_LED0_IVRT) 4037 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 4038 else 4039 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 4040 } 4041 } 4042 4043 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 4044 } 4045 4046 /** 4047 * e1000_led_off_pchlan - Turn LEDs off 4048 * @hw: pointer to the HW structure 4049 * 4050 * Turn off the LEDs. 4051 **/ 4052 static s32 e1000_led_off_pchlan(struct e1000_hw *hw) 4053 { 4054 u16 data = (u16)hw->mac.ledctl_mode1; 4055 u32 i, led; 4056 4057 DEBUGFUNC("e1000_led_off_pchlan"); 4058 4059 /* 4060 * If no link, then turn LED off by clearing the invert bit 4061 * for each LED that's mode is "link_up" in ledctl_mode1. 4062 */ 4063 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 4064 for (i = 0; i < 3; i++) { 4065 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 4066 if ((led & E1000_PHY_LED0_MODE_MASK) != 4067 E1000_LEDCTL_MODE_LINK_UP) 4068 continue; 4069 if (led & E1000_PHY_LED0_IVRT) 4070 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 4071 else 4072 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 4073 } 4074 } 4075 4076 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 4077 } 4078 4079 /** 4080 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset 4081 * @hw: pointer to the HW structure 4082 * 4083 * Read appropriate register for the config done bit for completion status 4084 * and configure the PHY through s/w for EEPROM-less parts. 4085 * 4086 * NOTE: some silicon which is EEPROM-less will fail trying to read the 4087 * config done bit, so only an error is logged and continues. If we were 4088 * to return with error, EEPROM-less silicon would not be able to be reset 4089 * or change link. 4090 **/ 4091 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) 4092 { 4093 s32 ret_val = E1000_SUCCESS; 4094 u32 bank = 0; 4095 u32 status; 4096 4097 DEBUGFUNC("e1000_get_cfg_done_ich8lan"); 4098 4099 e1000_get_cfg_done_generic(hw); 4100 4101 /* Wait for indication from h/w that it has completed basic config */ 4102 if (hw->mac.type >= e1000_ich10lan) { 4103 e1000_lan_init_done_ich8lan(hw); 4104 } else { 4105 ret_val = e1000_get_auto_rd_done_generic(hw); 4106 if (ret_val) { 4107 /* 4108 * When auto config read does not complete, do not 4109 * return with an error. This can happen in situations 4110 * where there is no eeprom and prevents getting link. 4111 */ 4112 DEBUGOUT("Auto Read Done did not complete\n"); 4113 ret_val = E1000_SUCCESS; 4114 } 4115 } 4116 4117 /* Clear PHY Reset Asserted bit */ 4118 status = E1000_READ_REG(hw, E1000_STATUS); 4119 if (status & E1000_STATUS_PHYRA) 4120 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA); 4121 else 4122 DEBUGOUT("PHY Reset Asserted not set - needs delay\n"); 4123 4124 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 4125 if (hw->mac.type <= e1000_ich9lan) { 4126 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) && 4127 (hw->phy.type == e1000_phy_igp_3)) { 4128 e1000_phy_init_script_igp3(hw); 4129 } 4130 } else { 4131 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { 4132 /* Maybe we should do a basic PHY config */ 4133 DEBUGOUT("EEPROM not present\n"); 4134 ret_val = -E1000_ERR_CONFIG; 4135 } 4136 } 4137 4138 return ret_val; 4139 } 4140 4141 /** 4142 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down 4143 * @hw: pointer to the HW structure 4144 * 4145 * In the case of a PHY power down to save power, or to turn off link during a 4146 * driver unload, or wake on lan is not enabled, remove the link. 4147 **/ 4148 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) 4149 { 4150 /* If the management interface is not enabled, then power down */ 4151 if (!(hw->mac.ops.check_mng_mode(hw) || 4152 hw->phy.ops.check_reset_block(hw))) 4153 e1000_power_down_phy_copper(hw); 4154 4155 return; 4156 } 4157 4158 /** 4159 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters 4160 * @hw: pointer to the HW structure 4161 * 4162 * Clears hardware counters specific to the silicon family and calls 4163 * clear_hw_cntrs_generic to clear all general purpose counters. 4164 **/ 4165 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) 4166 { 4167 u16 phy_data; 4168 4169 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan"); 4170 4171 e1000_clear_hw_cntrs_base_generic(hw); 4172 4173 E1000_READ_REG(hw, E1000_ALGNERRC); 4174 E1000_READ_REG(hw, E1000_RXERRC); 4175 E1000_READ_REG(hw, E1000_TNCRS); 4176 E1000_READ_REG(hw, E1000_CEXTERR); 4177 E1000_READ_REG(hw, E1000_TSCTC); 4178 E1000_READ_REG(hw, E1000_TSCTFC); 4179 4180 E1000_READ_REG(hw, E1000_MGTPRC); 4181 E1000_READ_REG(hw, E1000_MGTPDC); 4182 E1000_READ_REG(hw, E1000_MGTPTC); 4183 4184 E1000_READ_REG(hw, E1000_IAC); 4185 E1000_READ_REG(hw, E1000_ICRXOC); 4186 4187 /* Clear PHY statistics registers */ 4188 if ((hw->phy.type == e1000_phy_82578) || 4189 (hw->phy.type == e1000_phy_82579) || 4190 (hw->phy.type == e1000_phy_82577)) { 4191 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); 4192 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); 4193 hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); 4194 hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); 4195 hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); 4196 hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); 4197 hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); 4198 hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); 4199 hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); 4200 hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); 4201 hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); 4202 hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); 4203 hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); 4204 hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); 4205 } 4206 } 4207 4208