1 /****************************************************************************** 2 3 Copyright (c) 2001-2010, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 /* 36 * 82562G 10/100 Network Connection 37 * 82562G-2 10/100 Network Connection 38 * 82562GT 10/100 Network Connection 39 * 82562GT-2 10/100 Network Connection 40 * 82562V 10/100 Network Connection 41 * 82562V-2 10/100 Network Connection 42 * 82566DC-2 Gigabit Network Connection 43 * 82566DC Gigabit Network Connection 44 * 82566DM-2 Gigabit Network Connection 45 * 82566DM Gigabit Network Connection 46 * 82566MC Gigabit Network Connection 47 * 82566MM Gigabit Network Connection 48 * 82567LM Gigabit Network Connection 49 * 82567LF Gigabit Network Connection 50 * 82567V Gigabit Network Connection 51 * 82567LM-2 Gigabit Network Connection 52 * 82567LF-2 Gigabit Network Connection 53 * 82567V-2 Gigabit Network Connection 54 * 82567LF-3 Gigabit Network Connection 55 * 82567LM-3 Gigabit Network Connection 56 * 82567LM-4 Gigabit Network Connection 57 * 82577LM Gigabit Network Connection 58 * 82577LC Gigabit Network Connection 59 * 82578DM Gigabit Network Connection 60 * 82578DC Gigabit Network Connection 61 */ 62 63 #include "e1000_api.h" 64 65 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw); 66 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw); 67 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw); 68 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw); 69 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw); 70 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw); 71 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw); 72 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw); 73 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 74 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw); 75 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw); 76 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); 77 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, 78 bool active); 79 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, 80 bool active); 81 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, 82 u16 words, u16 *data); 83 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, 84 u16 words, u16 *data); 85 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw); 86 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw); 87 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, 88 u16 *data); 89 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); 90 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw); 91 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw); 92 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw); 93 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); 94 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); 95 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, 96 u16 *speed, u16 *duplex); 97 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); 98 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); 99 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); 100 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); 101 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); 102 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); 103 static s32 e1000_led_on_pchlan(struct e1000_hw *hw); 104 static s32 e1000_led_off_pchlan(struct e1000_hw *hw); 105 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); 106 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); 107 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout); 108 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw); 109 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); 110 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); 111 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, 112 u32 offset, u8 *data); 113 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 114 u8 size, u16 *data); 115 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, 116 u32 offset, u16 *data); 117 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 118 u32 offset, u8 byte); 119 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, 120 u32 offset, u8 data); 121 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 122 u8 size, u16 data); 123 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); 124 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); 125 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw); 126 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); 127 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw); 128 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 129 130 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 131 /* Offset 04h HSFSTS */ 132 union ich8_hws_flash_status { 133 struct ich8_hsfsts { 134 u16 flcdone :1; /* bit 0 Flash Cycle Done */ 135 u16 flcerr :1; /* bit 1 Flash Cycle Error */ 136 u16 dael :1; /* bit 2 Direct Access error Log */ 137 u16 berasesz :2; /* bit 4:3 Sector Erase Size */ 138 u16 flcinprog :1; /* bit 5 flash cycle in Progress */ 139 u16 reserved1 :2; /* bit 13:6 Reserved */ 140 u16 reserved2 :6; /* bit 13:6 Reserved */ 141 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ 142 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ 143 } hsf_status; 144 u16 regval; 145 }; 146 147 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ 148 /* Offset 06h FLCTL */ 149 union ich8_hws_flash_ctrl { 150 struct ich8_hsflctl { 151 u16 flcgo :1; /* 0 Flash Cycle Go */ 152 u16 flcycle :2; /* 2:1 Flash Cycle */ 153 u16 reserved :5; /* 7:3 Reserved */ 154 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ 155 u16 flockdn :6; /* 15:10 Reserved */ 156 } hsf_ctrl; 157 u16 regval; 158 }; 159 160 /* ICH Flash Region Access Permissions */ 161 union ich8_hws_flash_regacc { 162 struct ich8_flracc { 163 u32 grra :8; /* 0:7 GbE region Read Access */ 164 u32 grwa :8; /* 8:15 GbE region Write Access */ 165 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ 166 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ 167 } hsf_flregacc; 168 u16 regval; 169 }; 170 171 /** 172 * e1000_init_phy_params_pchlan - Initialize PHY function pointers 173 * @hw: pointer to the HW structure 174 * 175 * Initialize family-specific PHY parameters and function pointers. 176 **/ 177 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 178 { 179 struct e1000_phy_info *phy = &hw->phy; 180 u32 ctrl; 181 s32 ret_val = E1000_SUCCESS; 182 183 DEBUGFUNC("e1000_init_phy_params_pchlan"); 184 185 phy->addr = 1; 186 phy->reset_delay_us = 100; 187 188 phy->ops.acquire = e1000_acquire_swflag_ich8lan; 189 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; 190 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; 191 phy->ops.read_reg = e1000_read_phy_reg_hv; 192 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; 193 phy->ops.release = e1000_release_swflag_ich8lan; 194 phy->ops.reset = e1000_phy_hw_reset_ich8lan; 195 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; 196 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; 197 phy->ops.write_reg = e1000_write_phy_reg_hv; 198 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; 199 phy->ops.power_up = e1000_power_up_phy_copper; 200 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 201 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 202 203 if ((hw->mac.type == e1000_pchlan) && 204 (!(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))) { 205 206 /* 207 * The MAC-PHY interconnect may still be in SMBus mode 208 * after Sx->S0. Toggle the LANPHYPC Value bit to force 209 * the interconnect to PCIe mode, but only if there is no 210 * firmware present otherwise firmware will have done it. 211 */ 212 ctrl = E1000_READ_REG(hw, E1000_CTRL); 213 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 214 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 215 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 216 usec_delay(10); 217 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 218 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 219 msec_delay(50); 220 } 221 222 /* 223 * Reset the PHY before any acccess to it. Doing so, ensures that 224 * the PHY is in a known good state before we read/write PHY registers. 225 * The generic reset is sufficient here, because we haven't determined 226 * the PHY type yet. 227 */ 228 ret_val = e1000_phy_hw_reset_generic(hw); 229 if (ret_val) 230 goto out; 231 232 phy->id = e1000_phy_unknown; 233 ret_val = e1000_get_phy_id(hw); 234 if (ret_val) 235 goto out; 236 if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) { 237 /* 238 * In case the PHY needs to be in mdio slow mode (eg. 82577), 239 * set slow mode and try to get the PHY id again. 240 */ 241 ret_val = e1000_set_mdio_slow_mode_hv(hw); 242 if (ret_val) 243 goto out; 244 ret_val = e1000_get_phy_id(hw); 245 if (ret_val) 246 goto out; 247 } 248 phy->type = e1000_get_phy_type_from_id(phy->id); 249 250 switch (phy->type) { 251 case e1000_phy_82577: 252 phy->ops.check_polarity = e1000_check_polarity_82577; 253 phy->ops.force_speed_duplex = 254 e1000_phy_force_speed_duplex_82577; 255 phy->ops.get_cable_length = e1000_get_cable_length_82577; 256 phy->ops.get_info = e1000_get_phy_info_82577; 257 phy->ops.commit = e1000_phy_sw_reset_generic; 258 break; 259 case e1000_phy_82578: 260 phy->ops.check_polarity = e1000_check_polarity_m88; 261 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 262 phy->ops.get_cable_length = e1000_get_cable_length_m88; 263 phy->ops.get_info = e1000_get_phy_info_m88; 264 break; 265 default: 266 ret_val = -E1000_ERR_PHY; 267 break; 268 } 269 270 out: 271 return ret_val; 272 } 273 274 /** 275 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers 276 * @hw: pointer to the HW structure 277 * 278 * Initialize family-specific PHY parameters and function pointers. 279 **/ 280 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) 281 { 282 struct e1000_phy_info *phy = &hw->phy; 283 s32 ret_val = E1000_SUCCESS; 284 u16 i = 0; 285 286 DEBUGFUNC("e1000_init_phy_params_ich8lan"); 287 288 phy->addr = 1; 289 phy->reset_delay_us = 100; 290 291 phy->ops.acquire = e1000_acquire_swflag_ich8lan; 292 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; 293 phy->ops.get_cable_length = e1000_get_cable_length_igp_2; 294 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; 295 phy->ops.read_reg = e1000_read_phy_reg_igp; 296 phy->ops.release = e1000_release_swflag_ich8lan; 297 phy->ops.reset = e1000_phy_hw_reset_ich8lan; 298 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan; 299 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan; 300 phy->ops.write_reg = e1000_write_phy_reg_igp; 301 phy->ops.power_up = e1000_power_up_phy_copper; 302 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 303 304 /* 305 * We may need to do this twice - once for IGP and if that fails, 306 * we'll set BM func pointers and try again 307 */ 308 ret_val = e1000_determine_phy_address(hw); 309 if (ret_val) { 310 phy->ops.write_reg = e1000_write_phy_reg_bm; 311 phy->ops.read_reg = e1000_read_phy_reg_bm; 312 ret_val = e1000_determine_phy_address(hw); 313 if (ret_val) { 314 DEBUGOUT("Cannot determine PHY addr. Erroring out\n"); 315 goto out; 316 } 317 } 318 319 phy->id = 0; 320 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) && 321 (i++ < 100)) { 322 msec_delay(1); 323 ret_val = e1000_get_phy_id(hw); 324 if (ret_val) 325 goto out; 326 } 327 328 /* Verify phy id */ 329 switch (phy->id) { 330 case IGP03E1000_E_PHY_ID: 331 phy->type = e1000_phy_igp_3; 332 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 333 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked; 334 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked; 335 phy->ops.get_info = e1000_get_phy_info_igp; 336 phy->ops.check_polarity = e1000_check_polarity_igp; 337 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; 338 break; 339 case IFE_E_PHY_ID: 340 case IFE_PLUS_E_PHY_ID: 341 case IFE_C_E_PHY_ID: 342 phy->type = e1000_phy_ife; 343 phy->autoneg_mask = E1000_ALL_NOT_GIG; 344 phy->ops.get_info = e1000_get_phy_info_ife; 345 phy->ops.check_polarity = e1000_check_polarity_ife; 346 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; 347 break; 348 case BME1000_E_PHY_ID: 349 phy->type = e1000_phy_bm; 350 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 351 phy->ops.read_reg = e1000_read_phy_reg_bm; 352 phy->ops.write_reg = e1000_write_phy_reg_bm; 353 phy->ops.commit = e1000_phy_sw_reset_generic; 354 phy->ops.get_info = e1000_get_phy_info_m88; 355 phy->ops.check_polarity = e1000_check_polarity_m88; 356 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 357 break; 358 default: 359 ret_val = -E1000_ERR_PHY; 360 goto out; 361 } 362 363 out: 364 return ret_val; 365 } 366 367 /** 368 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers 369 * @hw: pointer to the HW structure 370 * 371 * Initialize family-specific NVM parameters and function 372 * pointers. 373 **/ 374 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) 375 { 376 struct e1000_nvm_info *nvm = &hw->nvm; 377 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 378 u32 gfpreg, sector_base_addr, sector_end_addr; 379 s32 ret_val = E1000_SUCCESS; 380 u16 i; 381 382 DEBUGFUNC("e1000_init_nvm_params_ich8lan"); 383 384 /* Can't read flash registers if the register set isn't mapped. */ 385 if (!hw->flash_address) { 386 DEBUGOUT("ERROR: Flash registers not mapped\n"); 387 ret_val = -E1000_ERR_CONFIG; 388 goto out; 389 } 390 391 nvm->type = e1000_nvm_flash_sw; 392 393 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); 394 395 /* 396 * sector_X_addr is a "sector"-aligned address (4096 bytes) 397 * Add 1 to sector_end_addr since this sector is included in 398 * the overall size. 399 */ 400 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 401 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 402 403 /* flash_base_addr is byte-aligned */ 404 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; 405 406 /* 407 * find total size of the NVM, then cut in half since the total 408 * size represents two separate NVM banks. 409 */ 410 nvm->flash_bank_size = (sector_end_addr - sector_base_addr) 411 << FLASH_SECTOR_ADDR_SHIFT; 412 nvm->flash_bank_size /= 2; 413 /* Adjust to word count */ 414 nvm->flash_bank_size /= sizeof(u16); 415 416 nvm->word_size = E1000_SHADOW_RAM_WORDS; 417 418 /* Clear shadow ram */ 419 for (i = 0; i < nvm->word_size; i++) { 420 dev_spec->shadow_ram[i].modified = FALSE; 421 dev_spec->shadow_ram[i].value = 0xFFFF; 422 } 423 424 E1000_MUTEX_INIT(&dev_spec->nvm_mutex); 425 E1000_MUTEX_INIT(&dev_spec->swflag_mutex); 426 427 /* Function Pointers */ 428 nvm->ops.acquire = e1000_acquire_nvm_ich8lan; 429 nvm->ops.release = e1000_release_nvm_ich8lan; 430 nvm->ops.read = e1000_read_nvm_ich8lan; 431 nvm->ops.update = e1000_update_nvm_checksum_ich8lan; 432 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan; 433 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan; 434 nvm->ops.write = e1000_write_nvm_ich8lan; 435 436 out: 437 return ret_val; 438 } 439 440 /** 441 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers 442 * @hw: pointer to the HW structure 443 * 444 * Initialize family-specific MAC parameters and function 445 * pointers. 446 **/ 447 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) 448 { 449 struct e1000_mac_info *mac = &hw->mac; 450 u16 pci_cfg; 451 452 DEBUGFUNC("e1000_init_mac_params_ich8lan"); 453 454 /* Set media type function pointer */ 455 hw->phy.media_type = e1000_media_type_copper; 456 457 /* Set mta register count */ 458 mac->mta_reg_count = 32; 459 /* Set rar entry count */ 460 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; 461 if (mac->type == e1000_ich8lan) 462 mac->rar_entry_count--; 463 /* Set if part includes ASF firmware */ 464 mac->asf_firmware_present = TRUE; 465 /* FWSM register */ 466 mac->has_fwsm = TRUE; 467 /* ARC subsystem not supported */ 468 mac->arc_subsystem_valid = FALSE; 469 /* Adaptive IFS supported */ 470 mac->adaptive_ifs = TRUE; 471 472 /* Function pointers */ 473 474 /* bus type/speed/width */ 475 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan; 476 /* function id */ 477 mac->ops.set_lan_id = e1000_set_lan_id_single_port; 478 /* reset */ 479 mac->ops.reset_hw = e1000_reset_hw_ich8lan; 480 /* hw initialization */ 481 mac->ops.init_hw = e1000_init_hw_ich8lan; 482 /* link setup */ 483 mac->ops.setup_link = e1000_setup_link_ich8lan; 484 /* physical interface setup */ 485 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan; 486 /* check for link */ 487 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan; 488 /* check management mode */ 489 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; 490 /* link info */ 491 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan; 492 /* multicast address update */ 493 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; 494 /* clear hardware counters */ 495 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan; 496 497 /* LED operations */ 498 switch (mac->type) { 499 case e1000_ich8lan: 500 case e1000_ich9lan: 501 case e1000_ich10lan: 502 /* ID LED init */ 503 mac->ops.id_led_init = e1000_id_led_init_generic; 504 /* blink LED */ 505 mac->ops.blink_led = e1000_blink_led_generic; 506 /* setup LED */ 507 mac->ops.setup_led = e1000_setup_led_generic; 508 /* cleanup LED */ 509 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; 510 /* turn on/off LED */ 511 mac->ops.led_on = e1000_led_on_ich8lan; 512 mac->ops.led_off = e1000_led_off_ich8lan; 513 break; 514 case e1000_pchlan: 515 /* save PCH revision_id */ 516 e1000_read_pci_cfg(hw, 0x2, &pci_cfg); 517 hw->revision_id = (u8)(pci_cfg &= 0x000F); 518 /* ID LED init */ 519 mac->ops.id_led_init = e1000_id_led_init_pchlan; 520 /* setup LED */ 521 mac->ops.setup_led = e1000_setup_led_pchlan; 522 /* cleanup LED */ 523 mac->ops.cleanup_led = e1000_cleanup_led_pchlan; 524 /* turn on/off LED */ 525 mac->ops.led_on = e1000_led_on_pchlan; 526 mac->ops.led_off = e1000_led_off_pchlan; 527 break; 528 default: 529 break; 530 } 531 532 /* Enable PCS Lock-loss workaround for ICH8 */ 533 if (mac->type == e1000_ich8lan) 534 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE); 535 536 return E1000_SUCCESS; 537 } 538 539 /** 540 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 541 * @hw: pointer to the HW structure 542 * 543 * Checks to see of the link status of the hardware has changed. If a 544 * change in link status has been detected, then we read the PHY registers 545 * to get the current speed/duplex if link exists. 546 **/ 547 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 548 { 549 struct e1000_mac_info *mac = &hw->mac; 550 s32 ret_val; 551 bool link; 552 553 DEBUGFUNC("e1000_check_for_copper_link_ich8lan"); 554 555 /* 556 * We only want to go out to the PHY registers to see if Auto-Neg 557 * has completed and/or if our link status has changed. The 558 * get_link_status flag is set upon receiving a Link Status 559 * Change or Rx Sequence Error interrupt. 560 */ 561 if (!mac->get_link_status) { 562 ret_val = E1000_SUCCESS; 563 goto out; 564 } 565 566 /* 567 * First we want to see if the MII Status Register reports 568 * link. If so, then we want to get the current speed/duplex 569 * of the PHY. 570 */ 571 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); 572 if (ret_val) 573 goto out; 574 575 if (hw->mac.type == e1000_pchlan) { 576 ret_val = e1000_k1_gig_workaround_hv(hw, link); 577 if (ret_val) 578 goto out; 579 } 580 581 if (!link) 582 goto out; /* No link detected */ 583 584 mac->get_link_status = FALSE; 585 586 if (hw->phy.type == e1000_phy_82578) { 587 ret_val = e1000_link_stall_workaround_hv(hw); 588 if (ret_val) 589 goto out; 590 } 591 592 /* 593 * Check if there was DownShift, must be checked 594 * immediately after link-up 595 */ 596 e1000_check_downshift_generic(hw); 597 598 /* 599 * If we are forcing speed/duplex, then we simply return since 600 * we have already determined whether we have link or not. 601 */ 602 if (!mac->autoneg) { 603 ret_val = -E1000_ERR_CONFIG; 604 goto out; 605 } 606 607 /* 608 * Auto-Neg is enabled. Auto Speed Detection takes care 609 * of MAC speed/duplex configuration. So we only need to 610 * configure Collision Distance in the MAC. 611 */ 612 e1000_config_collision_dist_generic(hw); 613 614 /* 615 * Configure Flow Control now that Auto-Neg has completed. 616 * First, we need to restore the desired flow control 617 * settings because we may have had to re-autoneg with a 618 * different link partner. 619 */ 620 ret_val = e1000_config_fc_after_link_up_generic(hw); 621 if (ret_val) 622 DEBUGOUT("Error configuring flow control\n"); 623 624 out: 625 return ret_val; 626 } 627 628 /** 629 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers 630 * @hw: pointer to the HW structure 631 * 632 * Initialize family-specific function pointers for PHY, MAC, and NVM. 633 **/ 634 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw) 635 { 636 DEBUGFUNC("e1000_init_function_pointers_ich8lan"); 637 638 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan; 639 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan; 640 switch (hw->mac.type) { 641 case e1000_ich8lan: 642 case e1000_ich9lan: 643 case e1000_ich10lan: 644 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan; 645 break; 646 case e1000_pchlan: 647 hw->phy.ops.init_params = e1000_init_phy_params_pchlan; 648 break; 649 default: 650 break; 651 } 652 } 653 654 /** 655 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex 656 * @hw: pointer to the HW structure 657 * 658 * Acquires the mutex for performing NVM operations. 659 **/ 660 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) 661 { 662 DEBUGFUNC("e1000_acquire_nvm_ich8lan"); 663 664 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex); 665 666 return E1000_SUCCESS; 667 } 668 669 /** 670 * e1000_release_nvm_ich8lan - Release NVM mutex 671 * @hw: pointer to the HW structure 672 * 673 * Releases the mutex used while performing NVM operations. 674 **/ 675 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) 676 { 677 DEBUGFUNC("e1000_release_nvm_ich8lan"); 678 679 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex); 680 681 return; 682 } 683 684 /** 685 * e1000_acquire_swflag_ich8lan - Acquire software control flag 686 * @hw: pointer to the HW structure 687 * 688 * Acquires the software control flag for performing PHY and select 689 * MAC CSR accesses. 690 **/ 691 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 692 { 693 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; 694 s32 ret_val = E1000_SUCCESS; 695 696 DEBUGFUNC("e1000_acquire_swflag_ich8lan"); 697 698 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex); 699 700 while (timeout) { 701 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 702 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) 703 break; 704 705 msec_delay_irq(1); 706 timeout--; 707 } 708 709 if (!timeout) { 710 DEBUGOUT("SW/FW/HW has locked the resource for too long.\n"); 711 ret_val = -E1000_ERR_CONFIG; 712 goto out; 713 } 714 715 timeout = SW_FLAG_TIMEOUT; 716 717 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 718 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 719 720 while (timeout) { 721 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 722 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 723 break; 724 725 msec_delay_irq(1); 726 timeout--; 727 } 728 729 if (!timeout) { 730 DEBUGOUT("Failed to acquire the semaphore.\n"); 731 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 732 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 733 ret_val = -E1000_ERR_CONFIG; 734 goto out; 735 } 736 737 out: 738 if (ret_val) 739 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); 740 741 return ret_val; 742 } 743 744 /** 745 * e1000_release_swflag_ich8lan - Release software control flag 746 * @hw: pointer to the HW structure 747 * 748 * Releases the software control flag for performing PHY and select 749 * MAC CSR accesses. 750 **/ 751 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) 752 { 753 u32 extcnf_ctrl; 754 755 DEBUGFUNC("e1000_release_swflag_ich8lan"); 756 757 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 758 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 759 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 760 761 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); 762 763 return; 764 } 765 766 /** 767 * e1000_check_mng_mode_ich8lan - Checks management mode 768 * @hw: pointer to the HW structure 769 * 770 * This checks if the adapter has manageability enabled. 771 * This is a function pointer entry point only called by read/write 772 * routines for the PHY and NVM parts. 773 **/ 774 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) 775 { 776 u32 fwsm; 777 778 DEBUGFUNC("e1000_check_mng_mode_ich8lan"); 779 780 fwsm = E1000_READ_REG(hw, E1000_FWSM); 781 782 return (fwsm & E1000_FWSM_MODE_MASK) == 783 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); 784 } 785 786 /** 787 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 788 * @hw: pointer to the HW structure 789 * 790 * Checks if firmware is blocking the reset of the PHY. 791 * This is a function pointer entry point only called by 792 * reset routines. 793 **/ 794 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) 795 { 796 u32 fwsm; 797 798 DEBUGFUNC("e1000_check_reset_block_ich8lan"); 799 800 if (hw->phy.reset_disable) 801 return E1000_BLK_PHY_RESET; 802 803 fwsm = E1000_READ_REG(hw, E1000_FWSM); 804 805 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS 806 : E1000_BLK_PHY_RESET; 807 } 808 809 /** 810 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 811 * @hw: pointer to the HW structure 812 * 813 * SW should configure the LCD from the NVM extended configuration region 814 * as a workaround for certain parts. 815 **/ 816 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 817 { 818 struct e1000_phy_info *phy = &hw->phy; 819 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 820 s32 ret_val = E1000_SUCCESS; 821 u16 word_addr, reg_data, reg_addr, phy_page = 0; 822 823 if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) && 824 !(hw->mac.type == e1000_pchlan)) 825 return ret_val; 826 827 ret_val = hw->phy.ops.acquire(hw); 828 if (ret_val) 829 return ret_val; 830 831 /* 832 * Initialize the PHY from the NVM on ICH platforms. This 833 * is needed due to an issue where the NVM configuration is 834 * not properly autoloaded after power transitions. 835 * Therefore, after each PHY reset, we will load the 836 * configuration data out of the NVM manually. 837 */ 838 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) || 839 (hw->device_id == E1000_DEV_ID_ICH8_IGP_M) || 840 (hw->mac.type == e1000_pchlan)) 841 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 842 else 843 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 844 845 data = E1000_READ_REG(hw, E1000_FEXTNVM); 846 if (!(data & sw_cfg_mask)) 847 goto out; 848 849 /* Wait for basic configuration completes before proceeding */ 850 e1000_lan_init_done_ich8lan(hw); 851 852 /* 853 * Make sure HW does not configure LCD from PHY 854 * extended configuration before SW configuration 855 */ 856 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 857 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 858 goto out; 859 860 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE); 861 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 862 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; 863 if (!cnf_size) 864 goto out; 865 866 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 867 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 868 869 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 870 (hw->mac.type == e1000_pchlan)) { 871 /* 872 * HW configures the SMBus address and LEDs when the 873 * OEM and LCD Write Enable bits are set in the NVM. 874 * When both NVM bits are cleared, SW will configure 875 * them instead. 876 */ 877 data = E1000_READ_REG(hw, E1000_STRAP); 878 data &= E1000_STRAP_SMBUS_ADDRESS_MASK; 879 reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT; 880 reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 881 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, 882 reg_data); 883 if (ret_val) 884 goto out; 885 886 data = E1000_READ_REG(hw, E1000_LEDCTL); 887 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, 888 (u16)data); 889 if (ret_val) 890 goto out; 891 } 892 893 /* Configure LCD from extended configuration region. */ 894 895 /* cnf_base_addr is in DWORD */ 896 word_addr = (u16)(cnf_base_addr << 1); 897 898 for (i = 0; i < cnf_size; i++) { 899 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1, 900 ®_data); 901 if (ret_val) 902 goto out; 903 904 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1), 905 1, ®_addr); 906 if (ret_val) 907 goto out; 908 909 /* Save off the PHY page for future writes. */ 910 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { 911 phy_page = reg_data; 912 continue; 913 } 914 915 reg_addr &= PHY_REG_MASK; 916 reg_addr |= phy_page; 917 918 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, 919 reg_data); 920 if (ret_val) 921 goto out; 922 } 923 924 out: 925 hw->phy.ops.release(hw); 926 return ret_val; 927 } 928 929 /** 930 * e1000_k1_gig_workaround_hv - K1 Si workaround 931 * @hw: pointer to the HW structure 932 * @link: link up bool flag 933 * 934 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning 935 * from a lower speed. This workaround disables K1 whenever link is at 1Gig 936 * If link is down, the function will restore the default K1 setting located 937 * in the NVM. 938 **/ 939 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) 940 { 941 s32 ret_val = E1000_SUCCESS; 942 u16 status_reg = 0; 943 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; 944 945 DEBUGFUNC("e1000_k1_gig_workaround_hv"); 946 947 if (hw->mac.type != e1000_pchlan) 948 goto out; 949 950 /* Wrap the whole flow with the sw flag */ 951 ret_val = hw->phy.ops.acquire(hw); 952 if (ret_val) 953 goto out; 954 955 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ 956 if (link) { 957 if (hw->phy.type == e1000_phy_82578) { 958 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, 959 &status_reg); 960 if (ret_val) 961 goto release; 962 963 status_reg &= BM_CS_STATUS_LINK_UP | 964 BM_CS_STATUS_RESOLVED | 965 BM_CS_STATUS_SPEED_MASK; 966 967 if (status_reg == (BM_CS_STATUS_LINK_UP | 968 BM_CS_STATUS_RESOLVED | 969 BM_CS_STATUS_SPEED_1000)) 970 k1_enable = FALSE; 971 } 972 973 if (hw->phy.type == e1000_phy_82577) { 974 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, 975 &status_reg); 976 if (ret_val) 977 goto release; 978 979 status_reg &= HV_M_STATUS_LINK_UP | 980 HV_M_STATUS_AUTONEG_COMPLETE | 981 HV_M_STATUS_SPEED_MASK; 982 983 if (status_reg == (HV_M_STATUS_LINK_UP | 984 HV_M_STATUS_AUTONEG_COMPLETE | 985 HV_M_STATUS_SPEED_1000)) 986 k1_enable = FALSE; 987 } 988 989 /* Link stall fix for link up */ 990 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 991 0x0100); 992 if (ret_val) 993 goto release; 994 995 } else { 996 /* Link stall fix for link down */ 997 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 998 0x4100); 999 if (ret_val) 1000 goto release; 1001 } 1002 1003 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); 1004 1005 release: 1006 hw->phy.ops.release(hw); 1007 out: 1008 return ret_val; 1009 } 1010 1011 /** 1012 * e1000_configure_k1_ich8lan - Configure K1 power state 1013 * @hw: pointer to the HW structure 1014 * @enable: K1 state to configure 1015 * 1016 * Configure the K1 power state based on the provided parameter. 1017 * Assumes semaphore already acquired. 1018 * 1019 * Success returns 0, Failure returns -E1000_ERR_PHY (-2) 1020 **/ 1021 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) 1022 { 1023 s32 ret_val = E1000_SUCCESS; 1024 u32 ctrl_reg = 0; 1025 u32 ctrl_ext = 0; 1026 u32 reg = 0; 1027 u16 kmrn_reg = 0; 1028 1029 ret_val = e1000_read_kmrn_reg_locked(hw, 1030 E1000_KMRNCTRLSTA_K1_CONFIG, 1031 &kmrn_reg); 1032 if (ret_val) 1033 goto out; 1034 1035 if (k1_enable) 1036 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; 1037 else 1038 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; 1039 1040 ret_val = e1000_write_kmrn_reg_locked(hw, 1041 E1000_KMRNCTRLSTA_K1_CONFIG, 1042 kmrn_reg); 1043 if (ret_val) 1044 goto out; 1045 1046 usec_delay(20); 1047 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1048 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); 1049 1050 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 1051 reg |= E1000_CTRL_FRCSPD; 1052 E1000_WRITE_REG(hw, E1000_CTRL, reg); 1053 1054 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); 1055 usec_delay(20); 1056 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); 1057 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1058 usec_delay(20); 1059 1060 out: 1061 return ret_val; 1062 } 1063 1064 /** 1065 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration 1066 * @hw: pointer to the HW structure 1067 * @d0_state: boolean if entering d0 or d3 device state 1068 * 1069 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are 1070 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit 1071 * in NVM determines whether HW should configure LPLU and Gbe Disable. 1072 **/ 1073 s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) 1074 { 1075 s32 ret_val = 0; 1076 u32 mac_reg; 1077 u16 oem_reg; 1078 1079 if (hw->mac.type != e1000_pchlan) 1080 return ret_val; 1081 1082 ret_val = hw->phy.ops.acquire(hw); 1083 if (ret_val) 1084 return ret_val; 1085 1086 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 1087 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) 1088 goto out; 1089 1090 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM); 1091 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) 1092 goto out; 1093 1094 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL); 1095 1096 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); 1097 if (ret_val) 1098 goto out; 1099 1100 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); 1101 1102 if (d0_state) { 1103 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) 1104 oem_reg |= HV_OEM_BITS_GBE_DIS; 1105 1106 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) 1107 oem_reg |= HV_OEM_BITS_LPLU; 1108 } else { 1109 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE) 1110 oem_reg |= HV_OEM_BITS_GBE_DIS; 1111 1112 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU) 1113 oem_reg |= HV_OEM_BITS_LPLU; 1114 } 1115 /* Restart auto-neg to activate the bits */ 1116 if (!hw->phy.ops.check_reset_block(hw)) 1117 oem_reg |= HV_OEM_BITS_RESTART_AN; 1118 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); 1119 1120 out: 1121 hw->phy.ops.release(hw); 1122 1123 return ret_val; 1124 } 1125 1126 1127 /** 1128 * e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx 1129 * @hw: pointer to the HW structure 1130 **/ 1131 s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) 1132 { 1133 if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2)) 1134 return E1000_SUCCESS; 1135 1136 return hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444); 1137 } 1138 1139 /** 1140 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode 1141 * @hw: pointer to the HW structure 1142 **/ 1143 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) 1144 { 1145 s32 ret_val; 1146 u16 data; 1147 1148 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data); 1149 if (ret_val) 1150 return ret_val; 1151 1152 data |= HV_KMRN_MDIO_SLOW; 1153 1154 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data); 1155 1156 return ret_val; 1157 } 1158 1159 /** 1160 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be 1161 * done after every PHY reset. 1162 **/ 1163 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) 1164 { 1165 s32 ret_val = E1000_SUCCESS; 1166 u16 phy_data; 1167 1168 if (hw->mac.type != e1000_pchlan) 1169 goto out; 1170 1171 /* Set MDIO slow mode before any other MDIO access */ 1172 if (hw->phy.type == e1000_phy_82577) { 1173 ret_val = e1000_set_mdio_slow_mode_hv(hw); 1174 if (ret_val) 1175 goto out; 1176 } 1177 1178 /* Hanksville M Phy init for IEEE. */ 1179 if ((hw->revision_id == 2) && 1180 (hw->phy.type == e1000_phy_82577) && 1181 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) { 1182 hw->phy.ops.write_reg(hw, 0x10, 0x8823); 1183 hw->phy.ops.write_reg(hw, 0x11, 0x0018); 1184 hw->phy.ops.write_reg(hw, 0x10, 0x8824); 1185 hw->phy.ops.write_reg(hw, 0x11, 0x0016); 1186 hw->phy.ops.write_reg(hw, 0x10, 0x8825); 1187 hw->phy.ops.write_reg(hw, 0x11, 0x001A); 1188 hw->phy.ops.write_reg(hw, 0x10, 0x888C); 1189 hw->phy.ops.write_reg(hw, 0x11, 0x0007); 1190 hw->phy.ops.write_reg(hw, 0x10, 0x888D); 1191 hw->phy.ops.write_reg(hw, 0x11, 0x0007); 1192 hw->phy.ops.write_reg(hw, 0x10, 0x888E); 1193 hw->phy.ops.write_reg(hw, 0x11, 0x0007); 1194 hw->phy.ops.write_reg(hw, 0x10, 0x8827); 1195 hw->phy.ops.write_reg(hw, 0x11, 0x0001); 1196 hw->phy.ops.write_reg(hw, 0x10, 0x8835); 1197 hw->phy.ops.write_reg(hw, 0x11, 0x0001); 1198 hw->phy.ops.write_reg(hw, 0x10, 0x8834); 1199 hw->phy.ops.write_reg(hw, 0x11, 0x0001); 1200 hw->phy.ops.write_reg(hw, 0x10, 0x8833); 1201 hw->phy.ops.write_reg(hw, 0x11, 0x0002); 1202 } 1203 1204 if (((hw->phy.type == e1000_phy_82577) && 1205 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || 1206 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { 1207 /* Disable generation of early preamble */ 1208 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431); 1209 if (ret_val) 1210 goto out; 1211 1212 /* Preamble tuning for SSC */ 1213 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204); 1214 if (ret_val) 1215 goto out; 1216 } 1217 1218 if (hw->phy.type == e1000_phy_82578) { 1219 if (hw->revision_id < 3) { 1220 /* PHY config */ 1221 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29, 1222 0x66C0); 1223 if (ret_val) 1224 goto out; 1225 1226 /* PHY config */ 1227 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E, 1228 0xFFFF); 1229 if (ret_val) 1230 goto out; 1231 } 1232 1233 /* 1234 * Return registers to default by doing a soft reset then 1235 * writing 0x3140 to the control register. 1236 */ 1237 if (hw->phy.revision < 2) { 1238 e1000_phy_sw_reset_generic(hw); 1239 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, 1240 0x3140); 1241 } 1242 } 1243 1244 if ((hw->revision_id == 2) && 1245 (hw->phy.type == e1000_phy_82577) && 1246 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) { 1247 /* 1248 * Workaround for OEM (GbE) not operating after reset - 1249 * restart AN (twice) 1250 */ 1251 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400); 1252 if (ret_val) 1253 goto out; 1254 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400); 1255 if (ret_val) 1256 goto out; 1257 } 1258 1259 /* Select page 0 */ 1260 ret_val = hw->phy.ops.acquire(hw); 1261 if (ret_val) 1262 goto out; 1263 1264 hw->phy.addr = 1; 1265 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 1266 hw->phy.ops.release(hw); 1267 if (ret_val) 1268 goto out; 1269 1270 /* 1271 * Configure the K1 Si workaround during phy reset assuming there is 1272 * link so that it disables K1 if link is in 1Gbps. 1273 */ 1274 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE); 1275 if (ret_val) 1276 goto out; 1277 1278 /* Workaround for link disconnects on a busy hub in half duplex */ 1279 ret_val = hw->phy.ops.acquire(hw); 1280 if (ret_val) 1281 goto out; 1282 ret_val = hw->phy.ops.read_reg_locked(hw, 1283 PHY_REG(BM_PORT_CTRL_PAGE, 17), 1284 &phy_data); 1285 if (ret_val) 1286 goto release; 1287 ret_val = hw->phy.ops.write_reg_locked(hw, 1288 PHY_REG(BM_PORT_CTRL_PAGE, 17), 1289 phy_data & 0x00FF); 1290 release: 1291 hw->phy.ops.release(hw); 1292 out: 1293 return ret_val; 1294 } 1295 1296 /** 1297 * e1000_lan_init_done_ich8lan - Check for PHY config completion 1298 * @hw: pointer to the HW structure 1299 * 1300 * Check the appropriate indication the MAC has finished configuring the 1301 * PHY after a software reset. 1302 **/ 1303 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) 1304 { 1305 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; 1306 1307 DEBUGFUNC("e1000_lan_init_done_ich8lan"); 1308 1309 /* Wait for basic configuration completes before proceeding */ 1310 do { 1311 data = E1000_READ_REG(hw, E1000_STATUS); 1312 data &= E1000_STATUS_LAN_INIT_DONE; 1313 usec_delay(100); 1314 } while ((!data) && --loop); 1315 1316 /* 1317 * If basic configuration is incomplete before the above loop 1318 * count reaches 0, loading the configuration from NVM will 1319 * leave the PHY in a bad state possibly resulting in no link. 1320 */ 1321 if (loop == 0) 1322 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n"); 1323 1324 /* Clear the Init Done bit for the next init event */ 1325 data = E1000_READ_REG(hw, E1000_STATUS); 1326 data &= ~E1000_STATUS_LAN_INIT_DONE; 1327 E1000_WRITE_REG(hw, E1000_STATUS, data); 1328 } 1329 1330 /** 1331 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset 1332 * @hw: pointer to the HW structure 1333 * 1334 * Resets the PHY 1335 * This is a function pointer entry point called by drivers 1336 * or other shared routines. 1337 **/ 1338 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 1339 { 1340 s32 ret_val = E1000_SUCCESS; 1341 u16 reg; 1342 1343 DEBUGFUNC("e1000_phy_hw_reset_ich8lan"); 1344 1345 ret_val = e1000_phy_hw_reset_generic(hw); 1346 if (ret_val) 1347 goto out; 1348 1349 /* Allow time for h/w to get to a quiescent state after reset */ 1350 msec_delay(10); 1351 1352 /* Perform any necessary post-reset workarounds */ 1353 switch (hw->mac.type) { 1354 case e1000_pchlan: 1355 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 1356 if (ret_val) 1357 goto out; 1358 break; 1359 default: 1360 break; 1361 } 1362 1363 /* Dummy read to clear the phy wakeup bit after lcd reset */ 1364 if (hw->mac.type == e1000_pchlan) 1365 hw->phy.ops.read_reg(hw, BM_WUC, ®); 1366 1367 /* Configure the LCD with the extended configuration region in NVM */ 1368 ret_val = e1000_sw_lcd_config_ich8lan(hw); 1369 if (ret_val) 1370 goto out; 1371 1372 /* Configure the LCD with the OEM bits in NVM */ 1373 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE); 1374 1375 out: 1376 return ret_val; 1377 } 1378 1379 /** 1380 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state 1381 * @hw: pointer to the HW structure 1382 * @active: TRUE to enable LPLU, FALSE to disable 1383 * 1384 * Sets the LPLU state according to the active flag. For PCH, if OEM write 1385 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set 1386 * the phy speed. This function will manually set the LPLU bit and restart 1387 * auto-neg as hw would do. D3 and D0 LPLU will call the same function 1388 * since it configures the same bit. 1389 **/ 1390 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) 1391 { 1392 s32 ret_val = E1000_SUCCESS; 1393 u16 oem_reg; 1394 1395 DEBUGFUNC("e1000_set_lplu_state_pchlan"); 1396 1397 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg); 1398 if (ret_val) 1399 goto out; 1400 1401 if (active) 1402 oem_reg |= HV_OEM_BITS_LPLU; 1403 else 1404 oem_reg &= ~HV_OEM_BITS_LPLU; 1405 1406 oem_reg |= HV_OEM_BITS_RESTART_AN; 1407 ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg); 1408 1409 out: 1410 return ret_val; 1411 } 1412 1413 /** 1414 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 1415 * @hw: pointer to the HW structure 1416 * @active: TRUE to enable LPLU, FALSE to disable 1417 * 1418 * Sets the LPLU D0 state according to the active flag. When 1419 * activating LPLU this function also disables smart speed 1420 * and vice versa. LPLU will not be activated unless the 1421 * device autonegotiation advertisement meets standards of 1422 * either 10 or 10/100 or 10/100/1000 at all duplexes. 1423 * This is a function pointer entry point only called by 1424 * PHY setup routines. 1425 **/ 1426 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 1427 { 1428 struct e1000_phy_info *phy = &hw->phy; 1429 u32 phy_ctrl; 1430 s32 ret_val = E1000_SUCCESS; 1431 u16 data; 1432 1433 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan"); 1434 1435 if (phy->type == e1000_phy_ife) 1436 goto out; 1437 1438 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 1439 1440 if (active) { 1441 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 1442 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 1443 1444 if (phy->type != e1000_phy_igp_3) 1445 goto out; 1446 1447 /* 1448 * Call gig speed drop workaround on LPLU before accessing 1449 * any PHY registers 1450 */ 1451 if (hw->mac.type == e1000_ich8lan) 1452 e1000_gig_downshift_workaround_ich8lan(hw); 1453 1454 /* When LPLU is enabled, we should disable SmartSpeed */ 1455 ret_val = phy->ops.read_reg(hw, 1456 IGP01E1000_PHY_PORT_CONFIG, 1457 &data); 1458 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1459 ret_val = phy->ops.write_reg(hw, 1460 IGP01E1000_PHY_PORT_CONFIG, 1461 data); 1462 if (ret_val) 1463 goto out; 1464 } else { 1465 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 1466 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 1467 1468 if (phy->type != e1000_phy_igp_3) 1469 goto out; 1470 1471 /* 1472 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 1473 * during Dx states where the power conservation is most 1474 * important. During driver activity we should enable 1475 * SmartSpeed, so performance is maintained. 1476 */ 1477 if (phy->smart_speed == e1000_smart_speed_on) { 1478 ret_val = phy->ops.read_reg(hw, 1479 IGP01E1000_PHY_PORT_CONFIG, 1480 &data); 1481 if (ret_val) 1482 goto out; 1483 1484 data |= IGP01E1000_PSCFR_SMART_SPEED; 1485 ret_val = phy->ops.write_reg(hw, 1486 IGP01E1000_PHY_PORT_CONFIG, 1487 data); 1488 if (ret_val) 1489 goto out; 1490 } else if (phy->smart_speed == e1000_smart_speed_off) { 1491 ret_val = phy->ops.read_reg(hw, 1492 IGP01E1000_PHY_PORT_CONFIG, 1493 &data); 1494 if (ret_val) 1495 goto out; 1496 1497 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1498 ret_val = phy->ops.write_reg(hw, 1499 IGP01E1000_PHY_PORT_CONFIG, 1500 data); 1501 if (ret_val) 1502 goto out; 1503 } 1504 } 1505 1506 out: 1507 return ret_val; 1508 } 1509 1510 /** 1511 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state 1512 * @hw: pointer to the HW structure 1513 * @active: TRUE to enable LPLU, FALSE to disable 1514 * 1515 * Sets the LPLU D3 state according to the active flag. When 1516 * activating LPLU this function also disables smart speed 1517 * and vice versa. LPLU will not be activated unless the 1518 * device autonegotiation advertisement meets standards of 1519 * either 10 or 10/100 or 10/100/1000 at all duplexes. 1520 * This is a function pointer entry point only called by 1521 * PHY setup routines. 1522 **/ 1523 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 1524 { 1525 struct e1000_phy_info *phy = &hw->phy; 1526 u32 phy_ctrl; 1527 s32 ret_val = E1000_SUCCESS; 1528 u16 data; 1529 1530 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan"); 1531 1532 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 1533 1534 if (!active) { 1535 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 1536 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 1537 1538 if (phy->type != e1000_phy_igp_3) 1539 goto out; 1540 1541 /* 1542 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 1543 * during Dx states where the power conservation is most 1544 * important. During driver activity we should enable 1545 * SmartSpeed, so performance is maintained. 1546 */ 1547 if (phy->smart_speed == e1000_smart_speed_on) { 1548 ret_val = phy->ops.read_reg(hw, 1549 IGP01E1000_PHY_PORT_CONFIG, 1550 &data); 1551 if (ret_val) 1552 goto out; 1553 1554 data |= IGP01E1000_PSCFR_SMART_SPEED; 1555 ret_val = phy->ops.write_reg(hw, 1556 IGP01E1000_PHY_PORT_CONFIG, 1557 data); 1558 if (ret_val) 1559 goto out; 1560 } else if (phy->smart_speed == e1000_smart_speed_off) { 1561 ret_val = phy->ops.read_reg(hw, 1562 IGP01E1000_PHY_PORT_CONFIG, 1563 &data); 1564 if (ret_val) 1565 goto out; 1566 1567 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1568 ret_val = phy->ops.write_reg(hw, 1569 IGP01E1000_PHY_PORT_CONFIG, 1570 data); 1571 if (ret_val) 1572 goto out; 1573 } 1574 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 1575 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 1576 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 1577 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 1578 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 1579 1580 if (phy->type != e1000_phy_igp_3) 1581 goto out; 1582 1583 /* 1584 * Call gig speed drop workaround on LPLU before accessing 1585 * any PHY registers 1586 */ 1587 if (hw->mac.type == e1000_ich8lan) 1588 e1000_gig_downshift_workaround_ich8lan(hw); 1589 1590 /* When LPLU is enabled, we should disable SmartSpeed */ 1591 ret_val = phy->ops.read_reg(hw, 1592 IGP01E1000_PHY_PORT_CONFIG, 1593 &data); 1594 if (ret_val) 1595 goto out; 1596 1597 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1598 ret_val = phy->ops.write_reg(hw, 1599 IGP01E1000_PHY_PORT_CONFIG, 1600 data); 1601 } 1602 1603 out: 1604 return ret_val; 1605 } 1606 1607 /** 1608 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 1609 * @hw: pointer to the HW structure 1610 * @bank: pointer to the variable that returns the active bank 1611 * 1612 * Reads signature byte from the NVM using the flash access registers. 1613 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. 1614 **/ 1615 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) 1616 { 1617 u32 eecd; 1618 struct e1000_nvm_info *nvm = &hw->nvm; 1619 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); 1620 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; 1621 u8 sig_byte = 0; 1622 s32 ret_val = E1000_SUCCESS; 1623 1624 switch (hw->mac.type) { 1625 case e1000_ich8lan: 1626 case e1000_ich9lan: 1627 eecd = E1000_READ_REG(hw, E1000_EECD); 1628 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == 1629 E1000_EECD_SEC1VAL_VALID_MASK) { 1630 if (eecd & E1000_EECD_SEC1VAL) 1631 *bank = 1; 1632 else 1633 *bank = 0; 1634 1635 goto out; 1636 } 1637 DEBUGOUT("Unable to determine valid NVM bank via EEC - " 1638 "reading flash signature\n"); 1639 /* fall-thru */ 1640 default: 1641 /* set bank to 0 in case flash read fails */ 1642 *bank = 0; 1643 1644 /* Check bank 0 */ 1645 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, 1646 &sig_byte); 1647 if (ret_val) 1648 goto out; 1649 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 1650 E1000_ICH_NVM_SIG_VALUE) { 1651 *bank = 0; 1652 goto out; 1653 } 1654 1655 /* Check bank 1 */ 1656 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + 1657 bank1_offset, 1658 &sig_byte); 1659 if (ret_val) 1660 goto out; 1661 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 1662 E1000_ICH_NVM_SIG_VALUE) { 1663 *bank = 1; 1664 goto out; 1665 } 1666 1667 DEBUGOUT("ERROR: No valid NVM bank present\n"); 1668 ret_val = -E1000_ERR_NVM; 1669 break; 1670 } 1671 out: 1672 return ret_val; 1673 } 1674 1675 /** 1676 * e1000_read_nvm_ich8lan - Read word(s) from the NVM 1677 * @hw: pointer to the HW structure 1678 * @offset: The offset (in bytes) of the word(s) to read. 1679 * @words: Size of data to read in words 1680 * @data: Pointer to the word(s) to read at offset. 1681 * 1682 * Reads a word(s) from the NVM using the flash access registers. 1683 **/ 1684 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 1685 u16 *data) 1686 { 1687 struct e1000_nvm_info *nvm = &hw->nvm; 1688 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 1689 u32 act_offset; 1690 s32 ret_val = E1000_SUCCESS; 1691 u32 bank = 0; 1692 u16 i, word; 1693 1694 DEBUGFUNC("e1000_read_nvm_ich8lan"); 1695 1696 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 1697 (words == 0)) { 1698 DEBUGOUT("nvm parameter(s) out of bounds\n"); 1699 ret_val = -E1000_ERR_NVM; 1700 goto out; 1701 } 1702 1703 nvm->ops.acquire(hw); 1704 1705 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1706 if (ret_val != E1000_SUCCESS) { 1707 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 1708 bank = 0; 1709 } 1710 1711 act_offset = (bank) ? nvm->flash_bank_size : 0; 1712 act_offset += offset; 1713 1714 ret_val = E1000_SUCCESS; 1715 for (i = 0; i < words; i++) { 1716 if ((dev_spec->shadow_ram) && 1717 (dev_spec->shadow_ram[offset+i].modified)) { 1718 data[i] = dev_spec->shadow_ram[offset+i].value; 1719 } else { 1720 ret_val = e1000_read_flash_word_ich8lan(hw, 1721 act_offset + i, 1722 &word); 1723 if (ret_val) 1724 break; 1725 data[i] = word; 1726 } 1727 } 1728 1729 nvm->ops.release(hw); 1730 1731 out: 1732 if (ret_val) 1733 DEBUGOUT1("NVM read error: %d\n", ret_val); 1734 1735 return ret_val; 1736 } 1737 1738 /** 1739 * e1000_flash_cycle_init_ich8lan - Initialize flash 1740 * @hw: pointer to the HW structure 1741 * 1742 * This function does initial flash setup so that a new read/write/erase cycle 1743 * can be started. 1744 **/ 1745 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) 1746 { 1747 union ich8_hws_flash_status hsfsts; 1748 s32 ret_val = -E1000_ERR_NVM; 1749 s32 i = 0; 1750 1751 DEBUGFUNC("e1000_flash_cycle_init_ich8lan"); 1752 1753 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 1754 1755 /* Check if the flash descriptor is valid */ 1756 if (hsfsts.hsf_status.fldesvalid == 0) { 1757 DEBUGOUT("Flash descriptor invalid. " 1758 "SW Sequencing must be used."); 1759 goto out; 1760 } 1761 1762 /* Clear FCERR and DAEL in hw status by writing 1 */ 1763 hsfsts.hsf_status.flcerr = 1; 1764 hsfsts.hsf_status.dael = 1; 1765 1766 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); 1767 1768 /* 1769 * Either we should have a hardware SPI cycle in progress 1770 * bit to check against, in order to start a new cycle or 1771 * FDONE bit should be changed in the hardware so that it 1772 * is 1 after hardware reset, which can then be used as an 1773 * indication whether a cycle is in progress or has been 1774 * completed. 1775 */ 1776 1777 if (hsfsts.hsf_status.flcinprog == 0) { 1778 /* 1779 * There is no cycle running at present, 1780 * so we can start a cycle. 1781 * Begin by setting Flash Cycle Done. 1782 */ 1783 hsfsts.hsf_status.flcdone = 1; 1784 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); 1785 ret_val = E1000_SUCCESS; 1786 } else { 1787 /* 1788 * Otherwise poll for sometime so the current 1789 * cycle has a chance to end before giving up. 1790 */ 1791 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 1792 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 1793 ICH_FLASH_HSFSTS); 1794 if (hsfsts.hsf_status.flcinprog == 0) { 1795 ret_val = E1000_SUCCESS; 1796 break; 1797 } 1798 usec_delay(1); 1799 } 1800 if (ret_val == E1000_SUCCESS) { 1801 /* 1802 * Successful in waiting for previous cycle to timeout, 1803 * now set the Flash Cycle Done. 1804 */ 1805 hsfsts.hsf_status.flcdone = 1; 1806 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, 1807 hsfsts.regval); 1808 } else { 1809 DEBUGOUT("Flash controller busy, cannot get access"); 1810 } 1811 } 1812 1813 out: 1814 return ret_val; 1815 } 1816 1817 /** 1818 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) 1819 * @hw: pointer to the HW structure 1820 * @timeout: maximum time to wait for completion 1821 * 1822 * This function starts a flash cycle and waits for its completion. 1823 **/ 1824 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) 1825 { 1826 union ich8_hws_flash_ctrl hsflctl; 1827 union ich8_hws_flash_status hsfsts; 1828 s32 ret_val = -E1000_ERR_NVM; 1829 u32 i = 0; 1830 1831 DEBUGFUNC("e1000_flash_cycle_ich8lan"); 1832 1833 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 1834 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 1835 hsflctl.hsf_ctrl.flcgo = 1; 1836 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 1837 1838 /* wait till FDONE bit is set to 1 */ 1839 do { 1840 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 1841 if (hsfsts.hsf_status.flcdone == 1) 1842 break; 1843 usec_delay(1); 1844 } while (i++ < timeout); 1845 1846 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) 1847 ret_val = E1000_SUCCESS; 1848 1849 return ret_val; 1850 } 1851 1852 /** 1853 * e1000_read_flash_word_ich8lan - Read word from flash 1854 * @hw: pointer to the HW structure 1855 * @offset: offset to data location 1856 * @data: pointer to the location for storing the data 1857 * 1858 * Reads the flash word at offset into data. Offset is converted 1859 * to bytes before read. 1860 **/ 1861 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, 1862 u16 *data) 1863 { 1864 s32 ret_val; 1865 1866 DEBUGFUNC("e1000_read_flash_word_ich8lan"); 1867 1868 if (!data) { 1869 ret_val = -E1000_ERR_NVM; 1870 goto out; 1871 } 1872 1873 /* Must convert offset into bytes. */ 1874 offset <<= 1; 1875 1876 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data); 1877 1878 out: 1879 return ret_val; 1880 } 1881 1882 /** 1883 * e1000_read_flash_byte_ich8lan - Read byte from flash 1884 * @hw: pointer to the HW structure 1885 * @offset: The offset of the byte to read. 1886 * @data: Pointer to a byte to store the value read. 1887 * 1888 * Reads a single byte from the NVM using the flash access registers. 1889 **/ 1890 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 1891 u8 *data) 1892 { 1893 s32 ret_val = E1000_SUCCESS; 1894 u16 word = 0; 1895 1896 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); 1897 if (ret_val) 1898 goto out; 1899 1900 *data = (u8)word; 1901 1902 out: 1903 return ret_val; 1904 } 1905 1906 /** 1907 * e1000_read_flash_data_ich8lan - Read byte or word from NVM 1908 * @hw: pointer to the HW structure 1909 * @offset: The offset (in bytes) of the byte or word to read. 1910 * @size: Size of data to read, 1=byte 2=word 1911 * @data: Pointer to the word to store the value read. 1912 * 1913 * Reads a byte or word from the NVM using the flash access registers. 1914 **/ 1915 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 1916 u8 size, u16 *data) 1917 { 1918 union ich8_hws_flash_status hsfsts; 1919 union ich8_hws_flash_ctrl hsflctl; 1920 u32 flash_linear_addr; 1921 u32 flash_data = 0; 1922 s32 ret_val = -E1000_ERR_NVM; 1923 u8 count = 0; 1924 1925 DEBUGFUNC("e1000_read_flash_data_ich8lan"); 1926 1927 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 1928 goto out; 1929 1930 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + 1931 hw->nvm.flash_base_addr; 1932 1933 do { 1934 usec_delay(1); 1935 /* Steps */ 1936 ret_val = e1000_flash_cycle_init_ich8lan(hw); 1937 if (ret_val != E1000_SUCCESS) 1938 break; 1939 1940 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 1941 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 1942 hsflctl.hsf_ctrl.fldbcount = size - 1; 1943 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; 1944 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 1945 1946 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 1947 1948 ret_val = e1000_flash_cycle_ich8lan(hw, 1949 ICH_FLASH_READ_COMMAND_TIMEOUT); 1950 1951 /* 1952 * Check if FCERR is set to 1, if set to 1, clear it 1953 * and try the whole sequence a few more times, else 1954 * read in (shift in) the Flash Data0, the order is 1955 * least significant byte first msb to lsb 1956 */ 1957 if (ret_val == E1000_SUCCESS) { 1958 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); 1959 if (size == 1) 1960 *data = (u8)(flash_data & 0x000000FF); 1961 else if (size == 2) 1962 *data = (u16)(flash_data & 0x0000FFFF); 1963 break; 1964 } else { 1965 /* 1966 * If we've gotten here, then things are probably 1967 * completely hosed, but if the error condition is 1968 * detected, it won't hurt to give it another try... 1969 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 1970 */ 1971 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 1972 ICH_FLASH_HSFSTS); 1973 if (hsfsts.hsf_status.flcerr == 1) { 1974 /* Repeat for some time before giving up. */ 1975 continue; 1976 } else if (hsfsts.hsf_status.flcdone == 0) { 1977 DEBUGOUT("Timeout error - flash cycle " 1978 "did not complete."); 1979 break; 1980 } 1981 } 1982 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 1983 1984 out: 1985 return ret_val; 1986 } 1987 1988 /** 1989 * e1000_write_nvm_ich8lan - Write word(s) to the NVM 1990 * @hw: pointer to the HW structure 1991 * @offset: The offset (in bytes) of the word(s) to write. 1992 * @words: Size of data to write in words 1993 * @data: Pointer to the word(s) to write at offset. 1994 * 1995 * Writes a byte or word to the NVM using the flash access registers. 1996 **/ 1997 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 1998 u16 *data) 1999 { 2000 struct e1000_nvm_info *nvm = &hw->nvm; 2001 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 2002 s32 ret_val = E1000_SUCCESS; 2003 u16 i; 2004 2005 DEBUGFUNC("e1000_write_nvm_ich8lan"); 2006 2007 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 2008 (words == 0)) { 2009 DEBUGOUT("nvm parameter(s) out of bounds\n"); 2010 ret_val = -E1000_ERR_NVM; 2011 goto out; 2012 } 2013 2014 nvm->ops.acquire(hw); 2015 2016 for (i = 0; i < words; i++) { 2017 dev_spec->shadow_ram[offset+i].modified = TRUE; 2018 dev_spec->shadow_ram[offset+i].value = data[i]; 2019 } 2020 2021 nvm->ops.release(hw); 2022 2023 out: 2024 return ret_val; 2025 } 2026 2027 /** 2028 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM 2029 * @hw: pointer to the HW structure 2030 * 2031 * The NVM checksum is updated by calling the generic update_nvm_checksum, 2032 * which writes the checksum to the shadow ram. The changes in the shadow 2033 * ram are then committed to the EEPROM by processing each bank at a time 2034 * checking for the modified bit and writing only the pending changes. 2035 * After a successful commit, the shadow ram is cleared and is ready for 2036 * future writes. 2037 **/ 2038 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) 2039 { 2040 struct e1000_nvm_info *nvm = &hw->nvm; 2041 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 2042 u32 i, act_offset, new_bank_offset, old_bank_offset, bank; 2043 s32 ret_val; 2044 u16 data; 2045 2046 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan"); 2047 2048 ret_val = e1000_update_nvm_checksum_generic(hw); 2049 if (ret_val) 2050 goto out; 2051 2052 if (nvm->type != e1000_nvm_flash_sw) 2053 goto out; 2054 2055 nvm->ops.acquire(hw); 2056 2057 /* 2058 * We're writing to the opposite bank so if we're on bank 1, 2059 * write to bank 0 etc. We also need to erase the segment that 2060 * is going to be written 2061 */ 2062 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 2063 if (ret_val != E1000_SUCCESS) { 2064 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 2065 bank = 0; 2066 } 2067 2068 if (bank == 0) { 2069 new_bank_offset = nvm->flash_bank_size; 2070 old_bank_offset = 0; 2071 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 2072 if (ret_val) 2073 goto release; 2074 } else { 2075 old_bank_offset = nvm->flash_bank_size; 2076 new_bank_offset = 0; 2077 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 2078 if (ret_val) 2079 goto release; 2080 } 2081 2082 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 2083 /* 2084 * Determine whether to write the value stored 2085 * in the other NVM bank or a modified value stored 2086 * in the shadow RAM 2087 */ 2088 if (dev_spec->shadow_ram[i].modified) { 2089 data = dev_spec->shadow_ram[i].value; 2090 } else { 2091 ret_val = e1000_read_flash_word_ich8lan(hw, i + 2092 old_bank_offset, 2093 &data); 2094 if (ret_val) 2095 break; 2096 } 2097 2098 /* 2099 * If the word is 0x13, then make sure the signature bits 2100 * (15:14) are 11b until the commit has completed. 2101 * This will allow us to write 10b which indicates the 2102 * signature is valid. We want to do this after the write 2103 * has completed so that we don't mark the segment valid 2104 * while the write is still in progress 2105 */ 2106 if (i == E1000_ICH_NVM_SIG_WORD) 2107 data |= E1000_ICH_NVM_SIG_MASK; 2108 2109 /* Convert offset to bytes. */ 2110 act_offset = (i + new_bank_offset) << 1; 2111 2112 usec_delay(100); 2113 /* Write the bytes to the new bank. */ 2114 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 2115 act_offset, 2116 (u8)data); 2117 if (ret_val) 2118 break; 2119 2120 usec_delay(100); 2121 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 2122 act_offset + 1, 2123 (u8)(data >> 8)); 2124 if (ret_val) 2125 break; 2126 } 2127 2128 /* 2129 * Don't bother writing the segment valid bits if sector 2130 * programming failed. 2131 */ 2132 if (ret_val) { 2133 DEBUGOUT("Flash commit failed.\n"); 2134 goto release; 2135 } 2136 2137 /* 2138 * Finally validate the new segment by setting bit 15:14 2139 * to 10b in word 0x13 , this can be done without an 2140 * erase as well since these bits are 11 to start with 2141 * and we need to change bit 14 to 0b 2142 */ 2143 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 2144 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 2145 if (ret_val) 2146 goto release; 2147 2148 data &= 0xBFFF; 2149 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 2150 act_offset * 2 + 1, 2151 (u8)(data >> 8)); 2152 if (ret_val) 2153 goto release; 2154 2155 /* 2156 * And invalidate the previously valid segment by setting 2157 * its signature word (0x13) high_byte to 0b. This can be 2158 * done without an erase because flash erase sets all bits 2159 * to 1's. We can write 1's to 0's without an erase 2160 */ 2161 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 2162 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 2163 if (ret_val) 2164 goto release; 2165 2166 /* Great! Everything worked, we can now clear the cached entries. */ 2167 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 2168 dev_spec->shadow_ram[i].modified = FALSE; 2169 dev_spec->shadow_ram[i].value = 0xFFFF; 2170 } 2171 2172 release: 2173 nvm->ops.release(hw); 2174 2175 /* 2176 * Reload the EEPROM, or else modifications will not appear 2177 * until after the next adapter reset. 2178 */ 2179 if (!ret_val) { 2180 nvm->ops.reload(hw); 2181 msec_delay(10); 2182 } 2183 2184 out: 2185 if (ret_val) 2186 DEBUGOUT1("NVM update error: %d\n", ret_val); 2187 2188 return ret_val; 2189 } 2190 2191 /** 2192 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum 2193 * @hw: pointer to the HW structure 2194 * 2195 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. 2196 * If the bit is 0, that the EEPROM had been modified, but the checksum was not 2197 * calculated, in which case we need to calculate the checksum and set bit 6. 2198 **/ 2199 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) 2200 { 2201 s32 ret_val = E1000_SUCCESS; 2202 u16 data; 2203 2204 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan"); 2205 2206 /* 2207 * Read 0x19 and check bit 6. If this bit is 0, the checksum 2208 * needs to be fixed. This bit is an indication that the NVM 2209 * was prepared by OEM software and did not calculate the 2210 * checksum...a likely scenario. 2211 */ 2212 ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data); 2213 if (ret_val) 2214 goto out; 2215 2216 if ((data & 0x40) == 0) { 2217 data |= 0x40; 2218 ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data); 2219 if (ret_val) 2220 goto out; 2221 ret_val = hw->nvm.ops.update(hw); 2222 if (ret_val) 2223 goto out; 2224 } 2225 2226 ret_val = e1000_validate_nvm_checksum_generic(hw); 2227 2228 out: 2229 return ret_val; 2230 } 2231 2232 /** 2233 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM 2234 * @hw: pointer to the HW structure 2235 * @offset: The offset (in bytes) of the byte/word to read. 2236 * @size: Size of data to read, 1=byte 2=word 2237 * @data: The byte(s) to write to the NVM. 2238 * 2239 * Writes one/two bytes to the NVM using the flash access registers. 2240 **/ 2241 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 2242 u8 size, u16 data) 2243 { 2244 union ich8_hws_flash_status hsfsts; 2245 union ich8_hws_flash_ctrl hsflctl; 2246 u32 flash_linear_addr; 2247 u32 flash_data = 0; 2248 s32 ret_val = -E1000_ERR_NVM; 2249 u8 count = 0; 2250 2251 DEBUGFUNC("e1000_write_ich8_data"); 2252 2253 if (size < 1 || size > 2 || data > size * 0xff || 2254 offset > ICH_FLASH_LINEAR_ADDR_MASK) 2255 goto out; 2256 2257 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + 2258 hw->nvm.flash_base_addr; 2259 2260 do { 2261 usec_delay(1); 2262 /* Steps */ 2263 ret_val = e1000_flash_cycle_init_ich8lan(hw); 2264 if (ret_val != E1000_SUCCESS) 2265 break; 2266 2267 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 2268 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 2269 hsflctl.hsf_ctrl.fldbcount = size - 1; 2270 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 2271 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 2272 2273 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 2274 2275 if (size == 1) 2276 flash_data = (u32)data & 0x00FF; 2277 else 2278 flash_data = (u32)data; 2279 2280 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); 2281 2282 /* 2283 * check if FCERR is set to 1 , if set to 1, clear it 2284 * and try the whole sequence a few more times else done 2285 */ 2286 ret_val = e1000_flash_cycle_ich8lan(hw, 2287 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 2288 if (ret_val == E1000_SUCCESS) 2289 break; 2290 2291 /* 2292 * If we're here, then things are most likely 2293 * completely hosed, but if the error condition 2294 * is detected, it won't hurt to give it another 2295 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 2296 */ 2297 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 2298 if (hsfsts.hsf_status.flcerr == 1) 2299 /* Repeat for some time before giving up. */ 2300 continue; 2301 if (hsfsts.hsf_status.flcdone == 0) { 2302 DEBUGOUT("Timeout error - flash cycle " 2303 "did not complete."); 2304 break; 2305 } 2306 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 2307 2308 out: 2309 return ret_val; 2310 } 2311 2312 /** 2313 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM 2314 * @hw: pointer to the HW structure 2315 * @offset: The index of the byte to read. 2316 * @data: The byte to write to the NVM. 2317 * 2318 * Writes a single byte to the NVM using the flash access registers. 2319 **/ 2320 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 2321 u8 data) 2322 { 2323 u16 word = (u16)data; 2324 2325 DEBUGFUNC("e1000_write_flash_byte_ich8lan"); 2326 2327 return e1000_write_flash_data_ich8lan(hw, offset, 1, word); 2328 } 2329 2330 /** 2331 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM 2332 * @hw: pointer to the HW structure 2333 * @offset: The offset of the byte to write. 2334 * @byte: The byte to write to the NVM. 2335 * 2336 * Writes a single byte to the NVM using the flash access registers. 2337 * Goes through a retry algorithm before giving up. 2338 **/ 2339 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 2340 u32 offset, u8 byte) 2341 { 2342 s32 ret_val; 2343 u16 program_retries; 2344 2345 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan"); 2346 2347 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 2348 if (ret_val == E1000_SUCCESS) 2349 goto out; 2350 2351 for (program_retries = 0; program_retries < 100; program_retries++) { 2352 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset); 2353 usec_delay(100); 2354 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 2355 if (ret_val == E1000_SUCCESS) 2356 break; 2357 } 2358 if (program_retries == 100) { 2359 ret_val = -E1000_ERR_NVM; 2360 goto out; 2361 } 2362 2363 out: 2364 return ret_val; 2365 } 2366 2367 /** 2368 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM 2369 * @hw: pointer to the HW structure 2370 * @bank: 0 for first bank, 1 for second bank, etc. 2371 * 2372 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. 2373 * bank N is 4096 * N + flash_reg_addr. 2374 **/ 2375 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) 2376 { 2377 struct e1000_nvm_info *nvm = &hw->nvm; 2378 union ich8_hws_flash_status hsfsts; 2379 union ich8_hws_flash_ctrl hsflctl; 2380 u32 flash_linear_addr; 2381 /* bank size is in 16bit words - adjust to bytes */ 2382 u32 flash_bank_size = nvm->flash_bank_size * 2; 2383 s32 ret_val = E1000_SUCCESS; 2384 s32 count = 0; 2385 s32 j, iteration, sector_size; 2386 2387 DEBUGFUNC("e1000_erase_flash_bank_ich8lan"); 2388 2389 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 2390 2391 /* 2392 * Determine HW Sector size: Read BERASE bits of hw flash status 2393 * register 2394 * 00: The Hw sector is 256 bytes, hence we need to erase 16 2395 * consecutive sectors. The start index for the nth Hw sector 2396 * can be calculated as = bank * 4096 + n * 256 2397 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. 2398 * The start index for the nth Hw sector can be calculated 2399 * as = bank * 4096 2400 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 2401 * (ich9 only, otherwise error condition) 2402 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 2403 */ 2404 switch (hsfsts.hsf_status.berasesz) { 2405 case 0: 2406 /* Hw sector size 256 */ 2407 sector_size = ICH_FLASH_SEG_SIZE_256; 2408 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; 2409 break; 2410 case 1: 2411 sector_size = ICH_FLASH_SEG_SIZE_4K; 2412 iteration = 1; 2413 break; 2414 case 2: 2415 sector_size = ICH_FLASH_SEG_SIZE_8K; 2416 iteration = 1; 2417 break; 2418 case 3: 2419 sector_size = ICH_FLASH_SEG_SIZE_64K; 2420 iteration = 1; 2421 break; 2422 default: 2423 ret_val = -E1000_ERR_NVM; 2424 goto out; 2425 } 2426 2427 /* Start with the base address, then add the sector offset. */ 2428 flash_linear_addr = hw->nvm.flash_base_addr; 2429 flash_linear_addr += (bank) ? flash_bank_size : 0; 2430 2431 for (j = 0; j < iteration ; j++) { 2432 do { 2433 /* Steps */ 2434 ret_val = e1000_flash_cycle_init_ich8lan(hw); 2435 if (ret_val) 2436 goto out; 2437 2438 /* 2439 * Write a value 11 (block Erase) in Flash 2440 * Cycle field in hw flash control 2441 */ 2442 hsflctl.regval = E1000_READ_FLASH_REG16(hw, 2443 ICH_FLASH_HSFCTL); 2444 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 2445 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, 2446 hsflctl.regval); 2447 2448 /* 2449 * Write the last 24 bits of an index within the 2450 * block into Flash Linear address field in Flash 2451 * Address. 2452 */ 2453 flash_linear_addr += (j * sector_size); 2454 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, 2455 flash_linear_addr); 2456 2457 ret_val = e1000_flash_cycle_ich8lan(hw, 2458 ICH_FLASH_ERASE_COMMAND_TIMEOUT); 2459 if (ret_val == E1000_SUCCESS) 2460 break; 2461 2462 /* 2463 * Check if FCERR is set to 1. If 1, 2464 * clear it and try the whole sequence 2465 * a few more times else Done 2466 */ 2467 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 2468 ICH_FLASH_HSFSTS); 2469 if (hsfsts.hsf_status.flcerr == 1) 2470 /* repeat for some time before giving up */ 2471 continue; 2472 else if (hsfsts.hsf_status.flcdone == 0) 2473 goto out; 2474 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); 2475 } 2476 2477 out: 2478 return ret_val; 2479 } 2480 2481 /** 2482 * e1000_valid_led_default_ich8lan - Set the default LED settings 2483 * @hw: pointer to the HW structure 2484 * @data: Pointer to the LED settings 2485 * 2486 * Reads the LED default settings from the NVM to data. If the NVM LED 2487 * settings is all 0's or F's, set the LED default to a valid LED default 2488 * setting. 2489 **/ 2490 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) 2491 { 2492 s32 ret_val; 2493 2494 DEBUGFUNC("e1000_valid_led_default_ich8lan"); 2495 2496 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 2497 if (ret_val) { 2498 DEBUGOUT("NVM Read Error\n"); 2499 goto out; 2500 } 2501 2502 if (*data == ID_LED_RESERVED_0000 || 2503 *data == ID_LED_RESERVED_FFFF) 2504 *data = ID_LED_DEFAULT_ICH8LAN; 2505 2506 out: 2507 return ret_val; 2508 } 2509 2510 /** 2511 * e1000_id_led_init_pchlan - store LED configurations 2512 * @hw: pointer to the HW structure 2513 * 2514 * PCH does not control LEDs via the LEDCTL register, rather it uses 2515 * the PHY LED configuration register. 2516 * 2517 * PCH also does not have an "always on" or "always off" mode which 2518 * complicates the ID feature. Instead of using the "on" mode to indicate 2519 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()), 2520 * use "link_up" mode. The LEDs will still ID on request if there is no 2521 * link based on logic in e1000_led_[on|off]_pchlan(). 2522 **/ 2523 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) 2524 { 2525 struct e1000_mac_info *mac = &hw->mac; 2526 s32 ret_val; 2527 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; 2528 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; 2529 u16 data, i, temp, shift; 2530 2531 DEBUGFUNC("e1000_id_led_init_pchlan"); 2532 2533 /* Get default ID LED modes */ 2534 ret_val = hw->nvm.ops.valid_led_default(hw, &data); 2535 if (ret_val) 2536 goto out; 2537 2538 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); 2539 mac->ledctl_mode1 = mac->ledctl_default; 2540 mac->ledctl_mode2 = mac->ledctl_default; 2541 2542 for (i = 0; i < 4; i++) { 2543 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; 2544 shift = (i * 5); 2545 switch (temp) { 2546 case ID_LED_ON1_DEF2: 2547 case ID_LED_ON1_ON2: 2548 case ID_LED_ON1_OFF2: 2549 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 2550 mac->ledctl_mode1 |= (ledctl_on << shift); 2551 break; 2552 case ID_LED_OFF1_DEF2: 2553 case ID_LED_OFF1_ON2: 2554 case ID_LED_OFF1_OFF2: 2555 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 2556 mac->ledctl_mode1 |= (ledctl_off << shift); 2557 break; 2558 default: 2559 /* Do nothing */ 2560 break; 2561 } 2562 switch (temp) { 2563 case ID_LED_DEF1_ON2: 2564 case ID_LED_ON1_ON2: 2565 case ID_LED_OFF1_ON2: 2566 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 2567 mac->ledctl_mode2 |= (ledctl_on << shift); 2568 break; 2569 case ID_LED_DEF1_OFF2: 2570 case ID_LED_ON1_OFF2: 2571 case ID_LED_OFF1_OFF2: 2572 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 2573 mac->ledctl_mode2 |= (ledctl_off << shift); 2574 break; 2575 default: 2576 /* Do nothing */ 2577 break; 2578 } 2579 } 2580 2581 out: 2582 return ret_val; 2583 } 2584 2585 /** 2586 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width 2587 * @hw: pointer to the HW structure 2588 * 2589 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability 2590 * register, so the the bus width is hard coded. 2591 **/ 2592 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) 2593 { 2594 struct e1000_bus_info *bus = &hw->bus; 2595 s32 ret_val; 2596 2597 DEBUGFUNC("e1000_get_bus_info_ich8lan"); 2598 2599 ret_val = e1000_get_bus_info_pcie_generic(hw); 2600 2601 /* 2602 * ICH devices are "PCI Express"-ish. They have 2603 * a configuration space, but do not contain 2604 * PCI Express Capability registers, so bus width 2605 * must be hardcoded. 2606 */ 2607 if (bus->width == e1000_bus_width_unknown) 2608 bus->width = e1000_bus_width_pcie_x1; 2609 2610 return ret_val; 2611 } 2612 2613 /** 2614 * e1000_reset_hw_ich8lan - Reset the hardware 2615 * @hw: pointer to the HW structure 2616 * 2617 * Does a full reset of the hardware which includes a reset of the PHY and 2618 * MAC. 2619 **/ 2620 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 2621 { 2622 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 2623 u16 reg; 2624 u32 ctrl, icr, kab; 2625 s32 ret_val; 2626 2627 DEBUGFUNC("e1000_reset_hw_ich8lan"); 2628 2629 /* 2630 * Prevent the PCI-E bus from sticking if there is no TLP connection 2631 * on the last TLP read/write transaction when MAC is reset. 2632 */ 2633 ret_val = e1000_disable_pcie_master_generic(hw); 2634 if (ret_val) 2635 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 2636 2637 DEBUGOUT("Masking off all interrupts\n"); 2638 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 2639 2640 /* 2641 * Disable the Transmit and Receive units. Then delay to allow 2642 * any pending transactions to complete before we hit the MAC 2643 * with the global reset. 2644 */ 2645 E1000_WRITE_REG(hw, E1000_RCTL, 0); 2646 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); 2647 E1000_WRITE_FLUSH(hw); 2648 2649 msec_delay(10); 2650 2651 /* Workaround for ICH8 bit corruption issue in FIFO memory */ 2652 if (hw->mac.type == e1000_ich8lan) { 2653 /* Set Tx and Rx buffer allocation to 8k apiece. */ 2654 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K); 2655 /* Set Packet Buffer Size to 16k. */ 2656 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K); 2657 } 2658 2659 if (hw->mac.type == e1000_pchlan) { 2660 /* Save the NVM K1 bit setting*/ 2661 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); 2662 if (ret_val) 2663 return ret_val; 2664 2665 if (reg & E1000_NVM_K1_ENABLE) 2666 dev_spec->nvm_k1_enabled = TRUE; 2667 else 2668 dev_spec->nvm_k1_enabled = FALSE; 2669 } 2670 2671 ctrl = E1000_READ_REG(hw, E1000_CTRL); 2672 2673 if (!hw->phy.ops.check_reset_block(hw) && !hw->phy.reset_disable) { 2674 /* Clear PHY Reset Asserted bit */ 2675 if (hw->mac.type >= e1000_pchlan) { 2676 u32 status = E1000_READ_REG(hw, E1000_STATUS); 2677 E1000_WRITE_REG(hw, E1000_STATUS, status & 2678 ~E1000_STATUS_PHYRA); 2679 } 2680 2681 /* 2682 * PHY HW reset requires MAC CORE reset at the same 2683 * time to make sure the interface between MAC and the 2684 * external PHY is reset. 2685 */ 2686 ctrl |= E1000_CTRL_PHY_RST; 2687 } 2688 ret_val = e1000_acquire_swflag_ich8lan(hw); 2689 DEBUGOUT("Issuing a global reset to ich8lan\n"); 2690 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST)); 2691 msec_delay(20); 2692 2693 if (!ret_val) 2694 e1000_release_swflag_ich8lan(hw); 2695 2696 /* Perform any necessary post-reset workarounds */ 2697 switch (hw->mac.type) { 2698 case e1000_pchlan: 2699 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 2700 if (ret_val) 2701 goto out; 2702 break; 2703 default: 2704 break; 2705 } 2706 2707 if (ctrl & E1000_CTRL_PHY_RST) 2708 ret_val = hw->phy.ops.get_cfg_done(hw); 2709 2710 if (hw->mac.type >= e1000_ich10lan) { 2711 e1000_lan_init_done_ich8lan(hw); 2712 } else { 2713 ret_val = e1000_get_auto_rd_done_generic(hw); 2714 if (ret_val) { 2715 /* 2716 * When auto config read does not complete, do not 2717 * return with an error. This can happen in situations 2718 * where there is no eeprom and prevents getting link. 2719 */ 2720 DEBUGOUT("Auto Read Done did not complete\n"); 2721 } 2722 } 2723 /* Dummy read to clear the phy wakeup bit after lcd reset */ 2724 if (hw->mac.type == e1000_pchlan) 2725 hw->phy.ops.read_reg(hw, BM_WUC, ®); 2726 2727 ret_val = e1000_sw_lcd_config_ich8lan(hw); 2728 if (ret_val) 2729 goto out; 2730 2731 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE); 2732 if (ret_val) 2733 goto out; 2734 /* 2735 * For PCH, this write will make sure that any noise 2736 * will be detected as a CRC error and be dropped rather than show up 2737 * as a bad packet to the DMA engine. 2738 */ 2739 if (hw->mac.type == e1000_pchlan) 2740 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565); 2741 2742 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 2743 icr = E1000_READ_REG(hw, E1000_ICR); 2744 2745 kab = E1000_READ_REG(hw, E1000_KABGTXD); 2746 kab |= E1000_KABGTXD_BGSQLBIAS; 2747 E1000_WRITE_REG(hw, E1000_KABGTXD, kab); 2748 2749 out: 2750 return ret_val; 2751 } 2752 2753 /** 2754 * e1000_init_hw_ich8lan - Initialize the hardware 2755 * @hw: pointer to the HW structure 2756 * 2757 * Prepares the hardware for transmit and receive by doing the following: 2758 * - initialize hardware bits 2759 * - initialize LED identification 2760 * - setup receive address registers 2761 * - setup flow control 2762 * - setup transmit descriptors 2763 * - clear statistics 2764 **/ 2765 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) 2766 { 2767 struct e1000_mac_info *mac = &hw->mac; 2768 u32 ctrl_ext, txdctl, snoop; 2769 s32 ret_val; 2770 u16 i; 2771 2772 DEBUGFUNC("e1000_init_hw_ich8lan"); 2773 2774 e1000_initialize_hw_bits_ich8lan(hw); 2775 2776 /* Initialize identification LED */ 2777 ret_val = mac->ops.id_led_init(hw); 2778 if (ret_val) 2779 DEBUGOUT("Error initializing identification LED\n"); 2780 /* This is not fatal and we should not stop init due to this */ 2781 2782 /* Setup the receive address. */ 2783 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); 2784 2785 /* Zero out the Multicast HASH table */ 2786 DEBUGOUT("Zeroing the MTA\n"); 2787 for (i = 0; i < mac->mta_reg_count; i++) 2788 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 2789 2790 /* 2791 * The 82578 Rx buffer will stall if wakeup is enabled in host and 2792 * the ME. Reading the BM_WUC register will clear the host wakeup bit. 2793 * Reset the phy after disabling host wakeup to reset the Rx buffer. 2794 */ 2795 if (hw->phy.type == e1000_phy_82578) { 2796 hw->phy.ops.read_reg(hw, BM_WUC, &i); 2797 ret_val = e1000_phy_hw_reset_ich8lan(hw); 2798 if (ret_val) 2799 return ret_val; 2800 } 2801 2802 /* Setup link and flow control */ 2803 ret_val = mac->ops.setup_link(hw); 2804 2805 /* Set the transmit descriptor write-back policy for both queues */ 2806 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); 2807 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 2808 E1000_TXDCTL_FULL_TX_DESC_WB; 2809 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 2810 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 2811 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); 2812 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1)); 2813 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 2814 E1000_TXDCTL_FULL_TX_DESC_WB; 2815 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 2816 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 2817 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl); 2818 2819 /* 2820 * ICH8 has opposite polarity of no_snoop bits. 2821 * By default, we should use snoop behavior. 2822 */ 2823 if (mac->type == e1000_ich8lan) 2824 snoop = PCIE_ICH8_SNOOP_ALL; 2825 else 2826 snoop = (u32) ~(PCIE_NO_SNOOP_ALL); 2827 e1000_set_pcie_no_snoop_generic(hw, snoop); 2828 2829 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2830 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 2831 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 2832 2833 /* 2834 * Clear all of the statistics registers (clear on read). It is 2835 * important that we do this after we have tried to establish link 2836 * because the symbol error count will increment wildly if there 2837 * is no link. 2838 */ 2839 e1000_clear_hw_cntrs_ich8lan(hw); 2840 2841 return ret_val; 2842 } 2843 /** 2844 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits 2845 * @hw: pointer to the HW structure 2846 * 2847 * Sets/Clears required hardware bits necessary for correctly setting up the 2848 * hardware for transmit and receive. 2849 **/ 2850 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) 2851 { 2852 u32 reg; 2853 2854 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan"); 2855 2856 /* Extended Device Control */ 2857 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2858 reg |= (1 << 22); 2859 /* Enable PHY low-power state when MAC is at D3 w/o WoL */ 2860 if (hw->mac.type >= e1000_pchlan) 2861 reg |= E1000_CTRL_EXT_PHYPDEN; 2862 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2863 2864 /* Transmit Descriptor Control 0 */ 2865 reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); 2866 reg |= (1 << 22); 2867 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); 2868 2869 /* Transmit Descriptor Control 1 */ 2870 reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); 2871 reg |= (1 << 22); 2872 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); 2873 2874 /* Transmit Arbitration Control 0 */ 2875 reg = E1000_READ_REG(hw, E1000_TARC(0)); 2876 if (hw->mac.type == e1000_ich8lan) 2877 reg |= (1 << 28) | (1 << 29); 2878 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); 2879 E1000_WRITE_REG(hw, E1000_TARC(0), reg); 2880 2881 /* Transmit Arbitration Control 1 */ 2882 reg = E1000_READ_REG(hw, E1000_TARC(1)); 2883 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) 2884 reg &= ~(1 << 28); 2885 else 2886 reg |= (1 << 28); 2887 reg |= (1 << 24) | (1 << 26) | (1 << 30); 2888 E1000_WRITE_REG(hw, E1000_TARC(1), reg); 2889 2890 /* Device Status */ 2891 if (hw->mac.type == e1000_ich8lan) { 2892 reg = E1000_READ_REG(hw, E1000_STATUS); 2893 reg &= ~(1 << 31); 2894 E1000_WRITE_REG(hw, E1000_STATUS, reg); 2895 } 2896 2897 /* 2898 * work-around descriptor data corruption issue during nfs v2 udp 2899 * traffic, just disable the nfs filtering capability 2900 */ 2901 reg = E1000_READ_REG(hw, E1000_RFCTL); 2902 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); 2903 E1000_WRITE_REG(hw, E1000_RFCTL, reg); 2904 2905 return; 2906 } 2907 2908 /** 2909 * e1000_setup_link_ich8lan - Setup flow control and link settings 2910 * @hw: pointer to the HW structure 2911 * 2912 * Determines which flow control settings to use, then configures flow 2913 * control. Calls the appropriate media-specific link configuration 2914 * function. Assuming the adapter has a valid link partner, a valid link 2915 * should be established. Assumes the hardware has previously been reset 2916 * and the transmitter and receiver are not enabled. 2917 **/ 2918 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) 2919 { 2920 s32 ret_val = E1000_SUCCESS; 2921 2922 DEBUGFUNC("e1000_setup_link_ich8lan"); 2923 2924 if (hw->phy.ops.check_reset_block(hw)) 2925 goto out; 2926 2927 /* 2928 * ICH parts do not have a word in the NVM to determine 2929 * the default flow control setting, so we explicitly 2930 * set it to full. 2931 */ 2932 if (hw->fc.requested_mode == e1000_fc_default) 2933 hw->fc.requested_mode = e1000_fc_full; 2934 2935 /* 2936 * Save off the requested flow control mode for use later. Depending 2937 * on the link partner's capabilities, we may or may not use this mode. 2938 */ 2939 hw->fc.current_mode = hw->fc.requested_mode; 2940 2941 DEBUGOUT1("After fix-ups FlowControl is now = %x\n", 2942 hw->fc.current_mode); 2943 2944 /* Continue to configure the copper link. */ 2945 ret_val = hw->mac.ops.setup_physical_interface(hw); 2946 if (ret_val) 2947 goto out; 2948 2949 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); 2950 if ((hw->phy.type == e1000_phy_82578) || 2951 (hw->phy.type == e1000_phy_82577)) { 2952 ret_val = hw->phy.ops.write_reg(hw, 2953 PHY_REG(BM_PORT_CTRL_PAGE, 27), 2954 hw->fc.pause_time); 2955 if (ret_val) 2956 goto out; 2957 } 2958 2959 ret_val = e1000_set_fc_watermarks_generic(hw); 2960 2961 out: 2962 return ret_val; 2963 } 2964 2965 /** 2966 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface 2967 * @hw: pointer to the HW structure 2968 * 2969 * Configures the kumeran interface to the PHY to wait the appropriate time 2970 * when polling the PHY, then call the generic setup_copper_link to finish 2971 * configuring the copper link. 2972 **/ 2973 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) 2974 { 2975 u32 ctrl; 2976 s32 ret_val; 2977 u16 reg_data; 2978 2979 DEBUGFUNC("e1000_setup_copper_link_ich8lan"); 2980 2981 ctrl = E1000_READ_REG(hw, E1000_CTRL); 2982 ctrl |= E1000_CTRL_SLU; 2983 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 2984 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 2985 2986 /* 2987 * Set the mac to wait the maximum time between each iteration 2988 * and increase the max iterations when polling the phy; 2989 * this fixes erroneous timeouts at 10Mbps. 2990 */ 2991 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 2992 0xFFFF); 2993 if (ret_val) 2994 goto out; 2995 ret_val = e1000_read_kmrn_reg_generic(hw, 2996 E1000_KMRNCTRLSTA_INBAND_PARAM, 2997 ®_data); 2998 if (ret_val) 2999 goto out; 3000 reg_data |= 0x3F; 3001 ret_val = e1000_write_kmrn_reg_generic(hw, 3002 E1000_KMRNCTRLSTA_INBAND_PARAM, 3003 reg_data); 3004 if (ret_val) 3005 goto out; 3006 3007 switch (hw->phy.type) { 3008 case e1000_phy_igp_3: 3009 ret_val = e1000_copper_link_setup_igp(hw); 3010 if (ret_val) 3011 goto out; 3012 break; 3013 case e1000_phy_bm: 3014 case e1000_phy_82578: 3015 ret_val = e1000_copper_link_setup_m88(hw); 3016 if (ret_val) 3017 goto out; 3018 break; 3019 case e1000_phy_82577: 3020 ret_val = e1000_copper_link_setup_82577(hw); 3021 if (ret_val) 3022 goto out; 3023 break; 3024 case e1000_phy_ife: 3025 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, 3026 ®_data); 3027 if (ret_val) 3028 goto out; 3029 3030 reg_data &= ~IFE_PMC_AUTO_MDIX; 3031 3032 switch (hw->phy.mdix) { 3033 case 1: 3034 reg_data &= ~IFE_PMC_FORCE_MDIX; 3035 break; 3036 case 2: 3037 reg_data |= IFE_PMC_FORCE_MDIX; 3038 break; 3039 case 0: 3040 default: 3041 reg_data |= IFE_PMC_AUTO_MDIX; 3042 break; 3043 } 3044 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, 3045 reg_data); 3046 if (ret_val) 3047 goto out; 3048 break; 3049 default: 3050 break; 3051 } 3052 ret_val = e1000_setup_copper_link_generic(hw); 3053 3054 out: 3055 return ret_val; 3056 } 3057 3058 /** 3059 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex 3060 * @hw: pointer to the HW structure 3061 * @speed: pointer to store current link speed 3062 * @duplex: pointer to store the current link duplex 3063 * 3064 * Calls the generic get_speed_and_duplex to retrieve the current link 3065 * information and then calls the Kumeran lock loss workaround for links at 3066 * gigabit speeds. 3067 **/ 3068 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, 3069 u16 *duplex) 3070 { 3071 s32 ret_val; 3072 3073 DEBUGFUNC("e1000_get_link_up_info_ich8lan"); 3074 3075 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); 3076 if (ret_val) 3077 goto out; 3078 3079 if ((hw->mac.type == e1000_ich8lan) && 3080 (hw->phy.type == e1000_phy_igp_3) && 3081 (*speed == SPEED_1000)) { 3082 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); 3083 } 3084 3085 out: 3086 return ret_val; 3087 } 3088 3089 /** 3090 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround 3091 * @hw: pointer to the HW structure 3092 * 3093 * Work-around for 82566 Kumeran PCS lock loss: 3094 * On link status change (i.e. PCI reset, speed change) and link is up and 3095 * speed is gigabit- 3096 * 0) if workaround is optionally disabled do nothing 3097 * 1) wait 1ms for Kumeran link to come up 3098 * 2) check Kumeran Diagnostic register PCS lock loss bit 3099 * 3) if not set the link is locked (all is good), otherwise... 3100 * 4) reset the PHY 3101 * 5) repeat up to 10 times 3102 * Note: this is only called for IGP3 copper when speed is 1gb. 3103 **/ 3104 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) 3105 { 3106 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3107 u32 phy_ctrl; 3108 s32 ret_val = E1000_SUCCESS; 3109 u16 i, data; 3110 bool link; 3111 3112 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan"); 3113 3114 if (!(dev_spec->kmrn_lock_loss_workaround_enabled)) 3115 goto out; 3116 3117 /* 3118 * Make sure link is up before proceeding. If not just return. 3119 * Attempting this while link is negotiating fouled up link 3120 * stability 3121 */ 3122 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); 3123 if (!link) { 3124 ret_val = E1000_SUCCESS; 3125 goto out; 3126 } 3127 3128 for (i = 0; i < 10; i++) { 3129 /* read once to clear */ 3130 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); 3131 if (ret_val) 3132 goto out; 3133 /* and again to get new status */ 3134 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); 3135 if (ret_val) 3136 goto out; 3137 3138 /* check for PCS lock */ 3139 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) { 3140 ret_val = E1000_SUCCESS; 3141 goto out; 3142 } 3143 3144 /* Issue PHY reset */ 3145 hw->phy.ops.reset(hw); 3146 msec_delay_irq(5); 3147 } 3148 /* Disable GigE link negotiation */ 3149 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 3150 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | 3151 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 3152 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3153 3154 /* 3155 * Call gig speed drop workaround on Gig disable before accessing 3156 * any PHY registers 3157 */ 3158 e1000_gig_downshift_workaround_ich8lan(hw); 3159 3160 /* unable to acquire PCS lock */ 3161 ret_val = -E1000_ERR_PHY; 3162 3163 out: 3164 return ret_val; 3165 } 3166 3167 /** 3168 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state 3169 * @hw: pointer to the HW structure 3170 * @state: boolean value used to set the current Kumeran workaround state 3171 * 3172 * If ICH8, set the current Kumeran workaround state (enabled - TRUE 3173 * /disabled - FALSE). 3174 **/ 3175 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 3176 bool state) 3177 { 3178 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3179 3180 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan"); 3181 3182 if (hw->mac.type != e1000_ich8lan) { 3183 DEBUGOUT("Workaround applies to ICH8 only.\n"); 3184 return; 3185 } 3186 3187 dev_spec->kmrn_lock_loss_workaround_enabled = state; 3188 3189 return; 3190 } 3191 3192 /** 3193 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 3194 * @hw: pointer to the HW structure 3195 * 3196 * Workaround for 82566 power-down on D3 entry: 3197 * 1) disable gigabit link 3198 * 2) write VR power-down enable 3199 * 3) read it back 3200 * Continue if successful, else issue LCD reset and repeat 3201 **/ 3202 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) 3203 { 3204 u32 reg; 3205 u16 data; 3206 u8 retry = 0; 3207 3208 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan"); 3209 3210 if (hw->phy.type != e1000_phy_igp_3) 3211 goto out; 3212 3213 /* Try the workaround twice (if needed) */ 3214 do { 3215 /* Disable link */ 3216 reg = E1000_READ_REG(hw, E1000_PHY_CTRL); 3217 reg |= (E1000_PHY_CTRL_GBE_DISABLE | 3218 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 3219 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg); 3220 3221 /* 3222 * Call gig speed drop workaround on Gig disable before 3223 * accessing any PHY registers 3224 */ 3225 if (hw->mac.type == e1000_ich8lan) 3226 e1000_gig_downshift_workaround_ich8lan(hw); 3227 3228 /* Write VR power-down enable */ 3229 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); 3230 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 3231 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL, 3232 data | IGP3_VR_CTRL_MODE_SHUTDOWN); 3233 3234 /* Read it back and test */ 3235 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); 3236 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 3237 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) 3238 break; 3239 3240 /* Issue PHY reset and repeat at most one more time */ 3241 reg = E1000_READ_REG(hw, E1000_CTRL); 3242 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST); 3243 retry++; 3244 } while (retry); 3245 3246 out: 3247 return; 3248 } 3249 3250 /** 3251 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working 3252 * @hw: pointer to the HW structure 3253 * 3254 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), 3255 * LPLU, Gig disable, MDIC PHY reset): 3256 * 1) Set Kumeran Near-end loopback 3257 * 2) Clear Kumeran Near-end loopback 3258 * Should only be called for ICH8[m] devices with IGP_3 Phy. 3259 **/ 3260 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) 3261 { 3262 s32 ret_val = E1000_SUCCESS; 3263 u16 reg_data; 3264 3265 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan"); 3266 3267 if ((hw->mac.type != e1000_ich8lan) || 3268 (hw->phy.type != e1000_phy_igp_3)) 3269 goto out; 3270 3271 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 3272 ®_data); 3273 if (ret_val) 3274 goto out; 3275 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; 3276 ret_val = e1000_write_kmrn_reg_generic(hw, 3277 E1000_KMRNCTRLSTA_DIAG_OFFSET, 3278 reg_data); 3279 if (ret_val) 3280 goto out; 3281 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; 3282 ret_val = e1000_write_kmrn_reg_generic(hw, 3283 E1000_KMRNCTRLSTA_DIAG_OFFSET, 3284 reg_data); 3285 out: 3286 return; 3287 } 3288 3289 /** 3290 * e1000_disable_gig_wol_ich8lan - disable gig during WoL 3291 * @hw: pointer to the HW structure 3292 * 3293 * During S0 to Sx transition, it is possible the link remains at gig 3294 * instead of negotiating to a lower speed. Before going to Sx, set 3295 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation 3296 * to a lower speed. 3297 * 3298 * Should only be called for applicable parts. 3299 **/ 3300 void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw) 3301 { 3302 u32 phy_ctrl; 3303 3304 switch (hw->mac.type) { 3305 case e1000_ich8lan: 3306 case e1000_ich9lan: 3307 case e1000_ich10lan: 3308 case e1000_pchlan: 3309 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 3310 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | 3311 E1000_PHY_CTRL_GBE_DISABLE; 3312 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3313 3314 if (hw->mac.type == e1000_pchlan) 3315 e1000_phy_hw_reset_ich8lan(hw); 3316 default: 3317 break; 3318 } 3319 3320 return; 3321 } 3322 3323 /** 3324 * e1000_cleanup_led_ich8lan - Restore the default LED operation 3325 * @hw: pointer to the HW structure 3326 * 3327 * Return the LED back to the default configuration. 3328 **/ 3329 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) 3330 { 3331 DEBUGFUNC("e1000_cleanup_led_ich8lan"); 3332 3333 if (hw->phy.type == e1000_phy_ife) 3334 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 3335 0); 3336 3337 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); 3338 return E1000_SUCCESS; 3339 } 3340 3341 /** 3342 * e1000_led_on_ich8lan - Turn LEDs on 3343 * @hw: pointer to the HW structure 3344 * 3345 * Turn on the LEDs. 3346 **/ 3347 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) 3348 { 3349 DEBUGFUNC("e1000_led_on_ich8lan"); 3350 3351 if (hw->phy.type == e1000_phy_ife) 3352 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 3353 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); 3354 3355 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); 3356 return E1000_SUCCESS; 3357 } 3358 3359 /** 3360 * e1000_led_off_ich8lan - Turn LEDs off 3361 * @hw: pointer to the HW structure 3362 * 3363 * Turn off the LEDs. 3364 **/ 3365 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) 3366 { 3367 DEBUGFUNC("e1000_led_off_ich8lan"); 3368 3369 if (hw->phy.type == e1000_phy_ife) 3370 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 3371 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); 3372 3373 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); 3374 return E1000_SUCCESS; 3375 } 3376 3377 /** 3378 * e1000_setup_led_pchlan - Configures SW controllable LED 3379 * @hw: pointer to the HW structure 3380 * 3381 * This prepares the SW controllable LED for use. 3382 **/ 3383 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 3384 { 3385 DEBUGFUNC("e1000_setup_led_pchlan"); 3386 3387 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 3388 (u16)hw->mac.ledctl_mode1); 3389 } 3390 3391 /** 3392 * e1000_cleanup_led_pchlan - Restore the default LED operation 3393 * @hw: pointer to the HW structure 3394 * 3395 * Return the LED back to the default configuration. 3396 **/ 3397 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 3398 { 3399 DEBUGFUNC("e1000_cleanup_led_pchlan"); 3400 3401 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 3402 (u16)hw->mac.ledctl_default); 3403 } 3404 3405 /** 3406 * e1000_led_on_pchlan - Turn LEDs on 3407 * @hw: pointer to the HW structure 3408 * 3409 * Turn on the LEDs. 3410 **/ 3411 static s32 e1000_led_on_pchlan(struct e1000_hw *hw) 3412 { 3413 u16 data = (u16)hw->mac.ledctl_mode2; 3414 u32 i, led; 3415 3416 DEBUGFUNC("e1000_led_on_pchlan"); 3417 3418 /* 3419 * If no link, then turn LED on by setting the invert bit 3420 * for each LED that's mode is "link_up" in ledctl_mode2. 3421 */ 3422 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 3423 for (i = 0; i < 3; i++) { 3424 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 3425 if ((led & E1000_PHY_LED0_MODE_MASK) != 3426 E1000_LEDCTL_MODE_LINK_UP) 3427 continue; 3428 if (led & E1000_PHY_LED0_IVRT) 3429 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 3430 else 3431 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 3432 } 3433 } 3434 3435 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 3436 } 3437 3438 /** 3439 * e1000_led_off_pchlan - Turn LEDs off 3440 * @hw: pointer to the HW structure 3441 * 3442 * Turn off the LEDs. 3443 **/ 3444 static s32 e1000_led_off_pchlan(struct e1000_hw *hw) 3445 { 3446 u16 data = (u16)hw->mac.ledctl_mode1; 3447 u32 i, led; 3448 3449 DEBUGFUNC("e1000_led_off_pchlan"); 3450 3451 /* 3452 * If no link, then turn LED off by clearing the invert bit 3453 * for each LED that's mode is "link_up" in ledctl_mode1. 3454 */ 3455 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 3456 for (i = 0; i < 3; i++) { 3457 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 3458 if ((led & E1000_PHY_LED0_MODE_MASK) != 3459 E1000_LEDCTL_MODE_LINK_UP) 3460 continue; 3461 if (led & E1000_PHY_LED0_IVRT) 3462 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 3463 else 3464 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 3465 } 3466 } 3467 3468 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 3469 } 3470 3471 /** 3472 * e1000_get_cfg_done_ich8lan - Read config done bit 3473 * @hw: pointer to the HW structure 3474 * 3475 * Read the management control register for the config done bit for 3476 * completion status. NOTE: silicon which is EEPROM-less will fail trying 3477 * to read the config done bit, so an error is *ONLY* logged and returns 3478 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon 3479 * would not be able to be reset or change link. 3480 **/ 3481 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) 3482 { 3483 s32 ret_val = E1000_SUCCESS; 3484 u32 bank = 0; 3485 3486 if (hw->mac.type >= e1000_pchlan) { 3487 u32 status = E1000_READ_REG(hw, E1000_STATUS); 3488 3489 if (status & E1000_STATUS_PHYRA) 3490 E1000_WRITE_REG(hw, E1000_STATUS, status & 3491 ~E1000_STATUS_PHYRA); 3492 else 3493 DEBUGOUT("PHY Reset Asserted not set - needs delay\n"); 3494 } 3495 3496 e1000_get_cfg_done_generic(hw); 3497 3498 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 3499 if (hw->mac.type <= e1000_ich9lan) { 3500 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) && 3501 (hw->phy.type == e1000_phy_igp_3)) { 3502 e1000_phy_init_script_igp3(hw); 3503 } 3504 } else { 3505 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { 3506 /* Maybe we should do a basic PHY config */ 3507 DEBUGOUT("EEPROM not present\n"); 3508 ret_val = -E1000_ERR_CONFIG; 3509 } 3510 } 3511 3512 return ret_val; 3513 } 3514 3515 /** 3516 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down 3517 * @hw: pointer to the HW structure 3518 * 3519 * In the case of a PHY power down to save power, or to turn off link during a 3520 * driver unload, or wake on lan is not enabled, remove the link. 3521 **/ 3522 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) 3523 { 3524 /* If the management interface is not enabled, then power down */ 3525 if (!(hw->mac.ops.check_mng_mode(hw) || 3526 hw->phy.ops.check_reset_block(hw))) 3527 e1000_power_down_phy_copper(hw); 3528 3529 return; 3530 } 3531 3532 /** 3533 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters 3534 * @hw: pointer to the HW structure 3535 * 3536 * Clears hardware counters specific to the silicon family and calls 3537 * clear_hw_cntrs_generic to clear all general purpose counters. 3538 **/ 3539 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) 3540 { 3541 u16 phy_data; 3542 3543 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan"); 3544 3545 e1000_clear_hw_cntrs_base_generic(hw); 3546 3547 E1000_READ_REG(hw, E1000_ALGNERRC); 3548 E1000_READ_REG(hw, E1000_RXERRC); 3549 E1000_READ_REG(hw, E1000_TNCRS); 3550 E1000_READ_REG(hw, E1000_CEXTERR); 3551 E1000_READ_REG(hw, E1000_TSCTC); 3552 E1000_READ_REG(hw, E1000_TSCTFC); 3553 3554 E1000_READ_REG(hw, E1000_MGTPRC); 3555 E1000_READ_REG(hw, E1000_MGTPDC); 3556 E1000_READ_REG(hw, E1000_MGTPTC); 3557 3558 E1000_READ_REG(hw, E1000_IAC); 3559 E1000_READ_REG(hw, E1000_ICRXOC); 3560 3561 /* Clear PHY statistics registers */ 3562 if ((hw->phy.type == e1000_phy_82578) || 3563 (hw->phy.type == e1000_phy_82577)) { 3564 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); 3565 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); 3566 hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); 3567 hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); 3568 hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); 3569 hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); 3570 hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); 3571 hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); 3572 hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); 3573 hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); 3574 hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); 3575 hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); 3576 hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); 3577 hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); 3578 } 3579 } 3580 3581