1 /****************************************************************************** 2 SPDX-License-Identifier: BSD-3-Clause 3 4 Copyright (c) 2001-2020, Intel Corporation 5 All rights reserved. 6 7 Redistribution and use in source and binary forms, with or without 8 modification, are permitted provided that the following conditions are met: 9 10 1. Redistributions of source code must retain the above copyright notice, 11 this list of conditions and the following disclaimer. 12 13 2. Redistributions in binary form must reproduce the above copyright 14 notice, this list of conditions and the following disclaimer in the 15 documentation and/or other materials provided with the distribution. 16 17 3. Neither the name of the Intel Corporation nor the names of its 18 contributors may be used to endorse or promote products derived from 19 this software without specific prior written permission. 20 21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 POSSIBILITY OF SUCH DAMAGE. 32 33 ******************************************************************************/ 34 /*$FreeBSD$*/ 35 36 /* 82562G 10/100 Network Connection 37 * 82562G-2 10/100 Network Connection 38 * 82562GT 10/100 Network Connection 39 * 82562GT-2 10/100 Network Connection 40 * 82562V 10/100 Network Connection 41 * 82562V-2 10/100 Network Connection 42 * 82566DC-2 Gigabit Network Connection 43 * 82566DC Gigabit Network Connection 44 * 82566DM-2 Gigabit Network Connection 45 * 82566DM Gigabit Network Connection 46 * 82566MC Gigabit Network Connection 47 * 82566MM Gigabit Network Connection 48 * 82567LM Gigabit Network Connection 49 * 82567LF Gigabit Network Connection 50 * 82567V Gigabit Network Connection 51 * 82567LM-2 Gigabit Network Connection 52 * 82567LF-2 Gigabit Network Connection 53 * 82567V-2 Gigabit Network Connection 54 * 82567LF-3 Gigabit Network Connection 55 * 82567LM-3 Gigabit Network Connection 56 * 82567LM-4 Gigabit Network Connection 57 * 82577LM Gigabit Network Connection 58 * 82577LC Gigabit Network Connection 59 * 82578DM Gigabit Network Connection 60 * 82578DC Gigabit Network Connection 61 * 82579LM Gigabit Network Connection 62 * 82579V Gigabit Network Connection 63 * Ethernet Connection I217-LM 64 * Ethernet Connection I217-V 65 * Ethernet Connection I218-V 66 * Ethernet Connection I218-LM 67 * Ethernet Connection (2) I218-LM 68 * Ethernet Connection (2) I218-V 69 * Ethernet Connection (3) I218-LM 70 * Ethernet Connection (3) I218-V 71 */ 72 73 #include "e1000_api.h" 74 75 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state); 76 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw); 77 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw); 78 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw); 79 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw); 80 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 81 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 82 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); 83 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); 84 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw); 85 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, 86 u8 *mc_addr_list, 87 u32 mc_addr_count); 88 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw); 89 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw); 90 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); 91 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, 92 bool active); 93 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, 94 bool active); 95 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, 96 u16 words, u16 *data); 97 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, 98 u16 *data); 99 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, 100 u16 words, u16 *data); 101 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw); 102 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw); 103 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw); 104 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, 105 u16 *data); 106 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); 107 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw); 108 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw); 109 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw); 110 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); 111 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); 112 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); 113 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, 114 u16 *speed, u16 *duplex); 115 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); 116 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); 117 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); 118 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); 119 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); 120 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); 121 static s32 e1000_led_on_pchlan(struct e1000_hw *hw); 122 static s32 e1000_led_off_pchlan(struct e1000_hw *hw); 123 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); 124 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); 125 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); 126 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); 127 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, 128 u32 offset, u8 *data); 129 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 130 u8 size, u16 *data); 131 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, 132 u32 *data); 133 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, 134 u32 offset, u32 *data); 135 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, 136 u32 offset, u32 data); 137 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, 138 u32 offset, u32 dword); 139 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, 140 u32 offset, u16 *data); 141 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 142 u32 offset, u8 byte); 143 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); 144 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); 145 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw); 146 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 147 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 148 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 149 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr); 150 151 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 152 /* Offset 04h HSFSTS */ 153 union ich8_hws_flash_status { 154 struct ich8_hsfsts { 155 u16 flcdone:1; /* bit 0 Flash Cycle Done */ 156 u16 flcerr:1; /* bit 1 Flash Cycle Error */ 157 u16 dael:1; /* bit 2 Direct Access error Log */ 158 u16 berasesz:2; /* bit 4:3 Sector Erase Size */ 159 u16 flcinprog:1; /* bit 5 flash cycle in Progress */ 160 u16 reserved1:2; /* bit 13:6 Reserved */ 161 u16 reserved2:6; /* bit 13:6 Reserved */ 162 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ 163 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ 164 } hsf_status; 165 u16 regval; 166 }; 167 168 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ 169 /* Offset 06h FLCTL */ 170 union ich8_hws_flash_ctrl { 171 struct ich8_hsflctl { 172 u16 flcgo:1; /* 0 Flash Cycle Go */ 173 u16 flcycle:2; /* 2:1 Flash Cycle */ 174 u16 reserved:5; /* 7:3 Reserved */ 175 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ 176 u16 flockdn:6; /* 15:10 Reserved */ 177 } hsf_ctrl; 178 u16 regval; 179 }; 180 181 /* ICH Flash Region Access Permissions */ 182 union ich8_hws_flash_regacc { 183 struct ich8_flracc { 184 u32 grra:8; /* 0:7 GbE region Read Access */ 185 u32 grwa:8; /* 8:15 GbE region Write Access */ 186 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ 187 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ 188 } hsf_flregacc; 189 u16 regval; 190 }; 191 192 /** 193 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers 194 * @hw: pointer to the HW structure 195 * 196 * Test access to the PHY registers by reading the PHY ID registers. If 197 * the PHY ID is already known (e.g. resume path) compare it with known ID, 198 * otherwise assume the read PHY ID is correct if it is valid. 199 * 200 * Assumes the sw/fw/hw semaphore is already acquired. 201 **/ 202 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) 203 { 204 u16 phy_reg = 0; 205 u32 phy_id = 0; 206 s32 ret_val = 0; 207 u16 retry_count; 208 u32 mac_reg = 0; 209 210 for (retry_count = 0; retry_count < 2; retry_count++) { 211 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg); 212 if (ret_val || (phy_reg == 0xFFFF)) 213 continue; 214 phy_id = (u32)(phy_reg << 16); 215 216 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg); 217 if (ret_val || (phy_reg == 0xFFFF)) { 218 phy_id = 0; 219 continue; 220 } 221 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); 222 break; 223 } 224 225 if (hw->phy.id) { 226 if (hw->phy.id == phy_id) 227 goto out; 228 } else if (phy_id) { 229 hw->phy.id = phy_id; 230 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); 231 goto out; 232 } 233 234 /* In case the PHY needs to be in mdio slow mode, 235 * set slow mode and try to get the PHY id again. 236 */ 237 if (hw->mac.type < e1000_pch_lpt) { 238 hw->phy.ops.release(hw); 239 ret_val = e1000_set_mdio_slow_mode_hv(hw); 240 if (!ret_val) 241 ret_val = e1000_get_phy_id(hw); 242 hw->phy.ops.acquire(hw); 243 } 244 245 if (ret_val) 246 return FALSE; 247 out: 248 if (hw->mac.type >= e1000_pch_lpt) { 249 /* Only unforce SMBus if ME is not active */ 250 if (!(E1000_READ_REG(hw, E1000_FWSM) & 251 E1000_ICH_FWSM_FW_VALID)) { 252 /* Unforce SMBus mode in PHY */ 253 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg); 254 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 255 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg); 256 257 /* Unforce SMBus mode in MAC */ 258 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 259 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 260 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 261 } 262 } 263 264 return TRUE; 265 } 266 267 /** 268 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value 269 * @hw: pointer to the HW structure 270 * 271 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is 272 * used to reset the PHY to a quiescent state when necessary. 273 **/ 274 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) 275 { 276 u32 mac_reg; 277 278 DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt"); 279 280 /* Set Phy Config Counter to 50msec */ 281 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3); 282 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; 283 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; 284 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg); 285 286 /* Toggle LANPHYPC Value bit */ 287 mac_reg = E1000_READ_REG(hw, E1000_CTRL); 288 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; 289 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; 290 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); 291 E1000_WRITE_FLUSH(hw); 292 msec_delay(1); 293 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 294 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); 295 E1000_WRITE_FLUSH(hw); 296 297 if (hw->mac.type < e1000_pch_lpt) { 298 msec_delay(50); 299 } else { 300 u16 count = 20; 301 302 do { 303 msec_delay(5); 304 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) & 305 E1000_CTRL_EXT_LPCD) && count--); 306 307 msec_delay(30); 308 } 309 } 310 311 /** 312 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds 313 * @hw: pointer to the HW structure 314 * 315 * Workarounds/flow necessary for PHY initialization during driver load 316 * and resume paths. 317 **/ 318 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) 319 { 320 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM); 321 s32 ret_val; 322 323 DEBUGFUNC("e1000_init_phy_workarounds_pchlan"); 324 325 /* Gate automatic PHY configuration by hardware on managed and 326 * non-managed 82579 and newer adapters. 327 */ 328 e1000_gate_hw_phy_config_ich8lan(hw, TRUE); 329 330 /* It is not possible to be certain of the current state of ULP 331 * so forcibly disable it. 332 */ 333 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; 334 e1000_disable_ulp_lpt_lp(hw, TRUE); 335 336 ret_val = hw->phy.ops.acquire(hw); 337 if (ret_val) { 338 DEBUGOUT("Failed to initialize PHY flow\n"); 339 goto out; 340 } 341 342 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is 343 * inaccessible and resetting the PHY is not blocked, toggle the 344 * LANPHYPC Value bit to force the interconnect to PCIe mode. 345 */ 346 switch (hw->mac.type) { 347 case e1000_pch_lpt: 348 case e1000_pch_spt: 349 case e1000_pch_cnp: 350 case e1000_pch_tgp: 351 case e1000_pch_adp: 352 case e1000_pch_mtp: 353 if (e1000_phy_is_accessible_pchlan(hw)) 354 break; 355 356 /* Before toggling LANPHYPC, see if PHY is accessible by 357 * forcing MAC to SMBus mode first. 358 */ 359 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 360 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 361 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 362 363 /* Wait 50 milliseconds for MAC to finish any retries 364 * that it might be trying to perform from previous 365 * attempts to acknowledge any phy read requests. 366 */ 367 msec_delay(50); 368 369 /* fall-through */ 370 case e1000_pch2lan: 371 if (e1000_phy_is_accessible_pchlan(hw)) 372 break; 373 374 /* fall-through */ 375 case e1000_pchlan: 376 if ((hw->mac.type == e1000_pchlan) && 377 (fwsm & E1000_ICH_FWSM_FW_VALID)) 378 break; 379 380 if (hw->phy.ops.check_reset_block(hw)) { 381 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n"); 382 ret_val = -E1000_ERR_PHY; 383 break; 384 } 385 386 /* Toggle LANPHYPC Value bit */ 387 e1000_toggle_lanphypc_pch_lpt(hw); 388 if (hw->mac.type >= e1000_pch_lpt) { 389 if (e1000_phy_is_accessible_pchlan(hw)) 390 break; 391 392 /* Toggling LANPHYPC brings the PHY out of SMBus mode 393 * so ensure that the MAC is also out of SMBus mode 394 */ 395 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 396 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 397 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 398 399 if (e1000_phy_is_accessible_pchlan(hw)) 400 break; 401 402 ret_val = -E1000_ERR_PHY; 403 } 404 break; 405 default: 406 break; 407 } 408 409 hw->phy.ops.release(hw); 410 if (!ret_val) { 411 412 /* Check to see if able to reset PHY. Print error if not */ 413 if (hw->phy.ops.check_reset_block(hw)) { 414 ERROR_REPORT("Reset blocked by ME\n"); 415 goto out; 416 } 417 418 /* Reset the PHY before any access to it. Doing so, ensures 419 * that the PHY is in a known good state before we read/write 420 * PHY registers. The generic reset is sufficient here, 421 * because we haven't determined the PHY type yet. 422 */ 423 ret_val = e1000_phy_hw_reset_generic(hw); 424 if (ret_val) 425 goto out; 426 427 /* On a successful reset, possibly need to wait for the PHY 428 * to quiesce to an accessible state before returning control 429 * to the calling function. If the PHY does not quiesce, then 430 * return E1000E_BLK_PHY_RESET, as this is the condition that 431 * the PHY is in. 432 */ 433 ret_val = hw->phy.ops.check_reset_block(hw); 434 if (ret_val) 435 ERROR_REPORT("ME blocked access to PHY after reset\n"); 436 } 437 438 out: 439 /* Ungate automatic PHY configuration on non-managed 82579 */ 440 if ((hw->mac.type == e1000_pch2lan) && 441 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 442 msec_delay(10); 443 e1000_gate_hw_phy_config_ich8lan(hw, FALSE); 444 } 445 446 return ret_val; 447 } 448 449 /** 450 * e1000_init_phy_params_pchlan - Initialize PHY function pointers 451 * @hw: pointer to the HW structure 452 * 453 * Initialize family-specific PHY parameters and function pointers. 454 **/ 455 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 456 { 457 struct e1000_phy_info *phy = &hw->phy; 458 s32 ret_val; 459 460 DEBUGFUNC("e1000_init_phy_params_pchlan"); 461 462 phy->addr = 1; 463 phy->reset_delay_us = 100; 464 465 phy->ops.acquire = e1000_acquire_swflag_ich8lan; 466 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; 467 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; 468 phy->ops.set_page = e1000_set_page_igp; 469 phy->ops.read_reg = e1000_read_phy_reg_hv; 470 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; 471 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; 472 phy->ops.release = e1000_release_swflag_ich8lan; 473 phy->ops.reset = e1000_phy_hw_reset_ich8lan; 474 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; 475 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; 476 phy->ops.write_reg = e1000_write_phy_reg_hv; 477 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; 478 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; 479 phy->ops.power_up = e1000_power_up_phy_copper; 480 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 481 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 482 483 phy->id = e1000_phy_unknown; 484 485 ret_val = e1000_init_phy_workarounds_pchlan(hw); 486 if (ret_val) 487 return ret_val; 488 489 if (phy->id == e1000_phy_unknown) 490 switch (hw->mac.type) { 491 default: 492 ret_val = e1000_get_phy_id(hw); 493 if (ret_val) 494 return ret_val; 495 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) 496 break; 497 /* fall-through */ 498 case e1000_pch2lan: 499 case e1000_pch_lpt: 500 case e1000_pch_spt: 501 case e1000_pch_cnp: 502 case e1000_pch_tgp: 503 case e1000_pch_adp: 504 case e1000_pch_mtp: 505 /* In case the PHY needs to be in mdio slow mode, 506 * set slow mode and try to get the PHY id again. 507 */ 508 ret_val = e1000_set_mdio_slow_mode_hv(hw); 509 if (ret_val) 510 return ret_val; 511 ret_val = e1000_get_phy_id(hw); 512 if (ret_val) 513 return ret_val; 514 break; 515 } 516 phy->type = e1000_get_phy_type_from_id(phy->id); 517 518 switch (phy->type) { 519 case e1000_phy_82577: 520 case e1000_phy_82579: 521 case e1000_phy_i217: 522 phy->ops.check_polarity = e1000_check_polarity_82577; 523 phy->ops.force_speed_duplex = 524 e1000_phy_force_speed_duplex_82577; 525 phy->ops.get_cable_length = e1000_get_cable_length_82577; 526 phy->ops.get_info = e1000_get_phy_info_82577; 527 phy->ops.commit = e1000_phy_sw_reset_generic; 528 break; 529 case e1000_phy_82578: 530 phy->ops.check_polarity = e1000_check_polarity_m88; 531 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 532 phy->ops.get_cable_length = e1000_get_cable_length_m88; 533 phy->ops.get_info = e1000_get_phy_info_m88; 534 break; 535 default: 536 ret_val = -E1000_ERR_PHY; 537 break; 538 } 539 540 return ret_val; 541 } 542 543 /** 544 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers 545 * @hw: pointer to the HW structure 546 * 547 * Initialize family-specific PHY parameters and function pointers. 548 **/ 549 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) 550 { 551 struct e1000_phy_info *phy = &hw->phy; 552 s32 ret_val; 553 u16 i = 0; 554 555 DEBUGFUNC("e1000_init_phy_params_ich8lan"); 556 557 phy->addr = 1; 558 phy->reset_delay_us = 100; 559 560 phy->ops.acquire = e1000_acquire_swflag_ich8lan; 561 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; 562 phy->ops.get_cable_length = e1000_get_cable_length_igp_2; 563 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; 564 phy->ops.read_reg = e1000_read_phy_reg_igp; 565 phy->ops.release = e1000_release_swflag_ich8lan; 566 phy->ops.reset = e1000_phy_hw_reset_ich8lan; 567 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan; 568 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan; 569 phy->ops.write_reg = e1000_write_phy_reg_igp; 570 phy->ops.power_up = e1000_power_up_phy_copper; 571 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 572 573 /* We may need to do this twice - once for IGP and if that fails, 574 * we'll set BM func pointers and try again 575 */ 576 ret_val = e1000_determine_phy_address(hw); 577 if (ret_val) { 578 phy->ops.write_reg = e1000_write_phy_reg_bm; 579 phy->ops.read_reg = e1000_read_phy_reg_bm; 580 ret_val = e1000_determine_phy_address(hw); 581 if (ret_val) { 582 DEBUGOUT("Cannot determine PHY addr. Erroring out\n"); 583 return ret_val; 584 } 585 } 586 587 phy->id = 0; 588 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) && 589 (i++ < 100)) { 590 msec_delay(1); 591 ret_val = e1000_get_phy_id(hw); 592 if (ret_val) 593 return ret_val; 594 } 595 596 /* Verify phy id */ 597 switch (phy->id) { 598 case IGP03E1000_E_PHY_ID: 599 phy->type = e1000_phy_igp_3; 600 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 601 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked; 602 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked; 603 phy->ops.get_info = e1000_get_phy_info_igp; 604 phy->ops.check_polarity = e1000_check_polarity_igp; 605 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; 606 break; 607 case IFE_E_PHY_ID: 608 case IFE_PLUS_E_PHY_ID: 609 case IFE_C_E_PHY_ID: 610 phy->type = e1000_phy_ife; 611 phy->autoneg_mask = E1000_ALL_NOT_GIG; 612 phy->ops.get_info = e1000_get_phy_info_ife; 613 phy->ops.check_polarity = e1000_check_polarity_ife; 614 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; 615 break; 616 case BME1000_E_PHY_ID: 617 phy->type = e1000_phy_bm; 618 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 619 phy->ops.read_reg = e1000_read_phy_reg_bm; 620 phy->ops.write_reg = e1000_write_phy_reg_bm; 621 phy->ops.commit = e1000_phy_sw_reset_generic; 622 phy->ops.get_info = e1000_get_phy_info_m88; 623 phy->ops.check_polarity = e1000_check_polarity_m88; 624 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 625 break; 626 default: 627 return -E1000_ERR_PHY; 628 break; 629 } 630 631 return E1000_SUCCESS; 632 } 633 634 /** 635 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers 636 * @hw: pointer to the HW structure 637 * 638 * Initialize family-specific NVM parameters and function 639 * pointers. 640 **/ 641 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) 642 { 643 struct e1000_nvm_info *nvm = &hw->nvm; 644 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 645 u32 gfpreg, sector_base_addr, sector_end_addr; 646 u16 i; 647 u32 nvm_size; 648 649 DEBUGFUNC("e1000_init_nvm_params_ich8lan"); 650 651 nvm->type = e1000_nvm_flash_sw; 652 653 if (hw->mac.type >= e1000_pch_spt) { 654 /* in SPT, gfpreg doesn't exist. NVM size is taken from the 655 * STRAP register. This is because in SPT the GbE Flash region 656 * is no longer accessed through the flash registers. Instead, 657 * the mechanism has changed, and the Flash region access 658 * registers are now implemented in GbE memory space. 659 */ 660 nvm->flash_base_addr = 0; 661 nvm_size = 662 (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1) 663 * NVM_SIZE_MULTIPLIER; 664 nvm->flash_bank_size = nvm_size / 2; 665 /* Adjust to word count */ 666 nvm->flash_bank_size /= sizeof(u16); 667 /* Set the base address for flash register access */ 668 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; 669 } else { 670 /* Can't read flash registers if register set isn't mapped. */ 671 if (!hw->flash_address) { 672 DEBUGOUT("ERROR: Flash registers not mapped\n"); 673 return -E1000_ERR_CONFIG; 674 } 675 676 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); 677 678 /* sector_X_addr is a "sector"-aligned address (4096 bytes) 679 * Add 1 to sector_end_addr since this sector is included in 680 * the overall size. 681 */ 682 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 683 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 684 685 /* flash_base_addr is byte-aligned */ 686 nvm->flash_base_addr = sector_base_addr 687 << FLASH_SECTOR_ADDR_SHIFT; 688 689 /* find total size of the NVM, then cut in half since the total 690 * size represents two separate NVM banks. 691 */ 692 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) 693 << FLASH_SECTOR_ADDR_SHIFT); 694 nvm->flash_bank_size /= 2; 695 /* Adjust to word count */ 696 nvm->flash_bank_size /= sizeof(u16); 697 } 698 699 nvm->word_size = E1000_SHADOW_RAM_WORDS; 700 701 /* Clear shadow ram */ 702 for (i = 0; i < nvm->word_size; i++) { 703 dev_spec->shadow_ram[i].modified = FALSE; 704 dev_spec->shadow_ram[i].value = 0xFFFF; 705 } 706 707 /* Function Pointers */ 708 nvm->ops.acquire = e1000_acquire_nvm_ich8lan; 709 nvm->ops.release = e1000_release_nvm_ich8lan; 710 if (hw->mac.type >= e1000_pch_spt) { 711 nvm->ops.read = e1000_read_nvm_spt; 712 nvm->ops.update = e1000_update_nvm_checksum_spt; 713 } else { 714 nvm->ops.read = e1000_read_nvm_ich8lan; 715 nvm->ops.update = e1000_update_nvm_checksum_ich8lan; 716 } 717 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan; 718 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan; 719 nvm->ops.write = e1000_write_nvm_ich8lan; 720 721 return E1000_SUCCESS; 722 } 723 724 /** 725 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers 726 * @hw: pointer to the HW structure 727 * 728 * Initialize family-specific MAC parameters and function 729 * pointers. 730 **/ 731 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) 732 { 733 struct e1000_mac_info *mac = &hw->mac; 734 u16 pci_cfg; 735 736 DEBUGFUNC("e1000_init_mac_params_ich8lan"); 737 738 /* Set media type function pointer */ 739 hw->phy.media_type = e1000_media_type_copper; 740 741 /* Set mta register count */ 742 mac->mta_reg_count = 32; 743 /* Set rar entry count */ 744 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; 745 if (mac->type == e1000_ich8lan) 746 mac->rar_entry_count--; 747 /* Set if part includes ASF firmware */ 748 mac->asf_firmware_present = TRUE; 749 /* FWSM register */ 750 mac->has_fwsm = TRUE; 751 /* ARC subsystem not supported */ 752 mac->arc_subsystem_valid = FALSE; 753 /* Adaptive IFS supported */ 754 mac->adaptive_ifs = TRUE; 755 756 /* Function pointers */ 757 758 /* bus type/speed/width */ 759 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan; 760 /* function id */ 761 mac->ops.set_lan_id = e1000_set_lan_id_single_port; 762 /* reset */ 763 mac->ops.reset_hw = e1000_reset_hw_ich8lan; 764 /* hw initialization */ 765 mac->ops.init_hw = e1000_init_hw_ich8lan; 766 /* link setup */ 767 mac->ops.setup_link = e1000_setup_link_ich8lan; 768 /* physical interface setup */ 769 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan; 770 /* check for link */ 771 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan; 772 /* link info */ 773 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan; 774 /* multicast address update */ 775 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; 776 /* clear hardware counters */ 777 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan; 778 779 /* LED and other operations */ 780 switch (mac->type) { 781 case e1000_ich8lan: 782 case e1000_ich9lan: 783 case e1000_ich10lan: 784 /* check management mode */ 785 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; 786 /* ID LED init */ 787 mac->ops.id_led_init = e1000_id_led_init_generic; 788 /* blink LED */ 789 mac->ops.blink_led = e1000_blink_led_generic; 790 /* setup LED */ 791 mac->ops.setup_led = e1000_setup_led_generic; 792 /* cleanup LED */ 793 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; 794 /* turn on/off LED */ 795 mac->ops.led_on = e1000_led_on_ich8lan; 796 mac->ops.led_off = e1000_led_off_ich8lan; 797 break; 798 case e1000_pch2lan: 799 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; 800 mac->ops.rar_set = e1000_rar_set_pch2lan; 801 /* fall-through */ 802 case e1000_pch_lpt: 803 case e1000_pch_spt: 804 case e1000_pch_cnp: 805 case e1000_pch_tgp: 806 case e1000_pch_adp: 807 case e1000_pch_mtp: 808 /* multicast address update for pch2 */ 809 mac->ops.update_mc_addr_list = 810 e1000_update_mc_addr_list_pch2lan; 811 /* fall-through */ 812 case e1000_pchlan: 813 /* save PCH revision_id */ 814 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg); 815 /* SPT uses full byte for revision ID, 816 * as opposed to previous generations 817 */ 818 if (hw->mac.type >= e1000_pch_spt) 819 hw->revision_id = (u8)(pci_cfg &= 0x00FF); 820 else 821 hw->revision_id = (u8)(pci_cfg &= 0x000F); 822 /* check management mode */ 823 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; 824 /* ID LED init */ 825 mac->ops.id_led_init = e1000_id_led_init_pchlan; 826 /* setup LED */ 827 mac->ops.setup_led = e1000_setup_led_pchlan; 828 /* cleanup LED */ 829 mac->ops.cleanup_led = e1000_cleanup_led_pchlan; 830 /* turn on/off LED */ 831 mac->ops.led_on = e1000_led_on_pchlan; 832 mac->ops.led_off = e1000_led_off_pchlan; 833 break; 834 default: 835 break; 836 } 837 838 if (mac->type >= e1000_pch_lpt) { 839 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; 840 mac->ops.rar_set = e1000_rar_set_pch_lpt; 841 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt; 842 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt; 843 } 844 845 /* Enable PCS Lock-loss workaround for ICH8 */ 846 if (mac->type == e1000_ich8lan) 847 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE); 848 849 return E1000_SUCCESS; 850 } 851 852 /** 853 * __e1000_access_emi_reg_locked - Read/write EMI register 854 * @hw: pointer to the HW structure 855 * @address: EMI address to program 856 * @data: pointer to value to read/write from/to the EMI address 857 * @read: boolean flag to indicate read or write 858 * 859 * This helper function assumes the SW/FW/HW Semaphore is already acquired. 860 **/ 861 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, 862 u16 *data, bool read) 863 { 864 s32 ret_val; 865 866 DEBUGFUNC("__e1000_access_emi_reg_locked"); 867 868 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address); 869 if (ret_val) 870 return ret_val; 871 872 if (read) 873 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA, 874 data); 875 else 876 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 877 *data); 878 879 return ret_val; 880 } 881 882 /** 883 * e1000_read_emi_reg_locked - Read Extended Management Interface register 884 * @hw: pointer to the HW structure 885 * @addr: EMI address to program 886 * @data: value to be read from the EMI address 887 * 888 * Assumes the SW/FW/HW Semaphore is already acquired. 889 **/ 890 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) 891 { 892 DEBUGFUNC("e1000_read_emi_reg_locked"); 893 894 return __e1000_access_emi_reg_locked(hw, addr, data, TRUE); 895 } 896 897 /** 898 * e1000_write_emi_reg_locked - Write Extended Management Interface register 899 * @hw: pointer to the HW structure 900 * @addr: EMI address to program 901 * @data: value to be written to the EMI address 902 * 903 * Assumes the SW/FW/HW Semaphore is already acquired. 904 **/ 905 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) 906 { 907 DEBUGFUNC("e1000_read_emi_reg_locked"); 908 909 return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE); 910 } 911 912 /** 913 * e1000_set_eee_pchlan - Enable/disable EEE support 914 * @hw: pointer to the HW structure 915 * 916 * Enable/disable EEE based on setting in dev_spec structure, the duplex of 917 * the link and the EEE capabilities of the link partner. The LPI Control 918 * register bits will remain set only if/when link is up. 919 * 920 * EEE LPI must not be asserted earlier than one second after link is up. 921 * On 82579, EEE LPI should not be enabled until such time otherwise there 922 * can be link issues with some switches. Other devices can have EEE LPI 923 * enabled immediately upon link up since they have a timer in hardware which 924 * prevents LPI from being asserted too early. 925 **/ 926 s32 e1000_set_eee_pchlan(struct e1000_hw *hw) 927 { 928 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 929 s32 ret_val; 930 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; 931 932 DEBUGFUNC("e1000_set_eee_pchlan"); 933 934 switch (hw->phy.type) { 935 case e1000_phy_82579: 936 lpa = I82579_EEE_LP_ABILITY; 937 pcs_status = I82579_EEE_PCS_STATUS; 938 adv_addr = I82579_EEE_ADVERTISEMENT; 939 break; 940 case e1000_phy_i217: 941 lpa = I217_EEE_LP_ABILITY; 942 pcs_status = I217_EEE_PCS_STATUS; 943 adv_addr = I217_EEE_ADVERTISEMENT; 944 break; 945 default: 946 return E1000_SUCCESS; 947 } 948 949 ret_val = hw->phy.ops.acquire(hw); 950 if (ret_val) 951 return ret_val; 952 953 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); 954 if (ret_val) 955 goto release; 956 957 /* Clear bits that enable EEE in various speeds */ 958 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; 959 960 /* Enable EEE if not disabled by user */ 961 if (!dev_spec->eee_disable) { 962 /* Save off link partner's EEE ability */ 963 ret_val = e1000_read_emi_reg_locked(hw, lpa, 964 &dev_spec->eee_lp_ability); 965 if (ret_val) 966 goto release; 967 968 /* Read EEE advertisement */ 969 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); 970 if (ret_val) 971 goto release; 972 973 /* Enable EEE only for speeds in which the link partner is 974 * EEE capable and for which we advertise EEE. 975 */ 976 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) 977 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; 978 979 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { 980 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data); 981 if (data & NWAY_LPAR_100TX_FD_CAPS) 982 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; 983 else 984 /* EEE is not supported in 100Half, so ignore 985 * partner's EEE in 100 ability if full-duplex 986 * is not advertised. 987 */ 988 dev_spec->eee_lp_ability &= 989 ~I82579_EEE_100_SUPPORTED; 990 } 991 } 992 993 if (hw->phy.type == e1000_phy_82579) { 994 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, 995 &data); 996 if (ret_val) 997 goto release; 998 999 data &= ~I82579_LPI_100_PLL_SHUT; 1000 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, 1001 data); 1002 } 1003 1004 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ 1005 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); 1006 if (ret_val) 1007 goto release; 1008 1009 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl); 1010 release: 1011 hw->phy.ops.release(hw); 1012 1013 return ret_val; 1014 } 1015 1016 /** 1017 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP 1018 * @hw: pointer to the HW structure 1019 * @link: link up bool flag 1020 * 1021 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications 1022 * preventing further DMA write requests. Workaround the issue by disabling 1023 * the de-assertion of the clock request when in 1Gpbs mode. 1024 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link 1025 * speeds in order to avoid Tx hangs. 1026 **/ 1027 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) 1028 { 1029 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); 1030 u32 status = E1000_READ_REG(hw, E1000_STATUS); 1031 s32 ret_val = E1000_SUCCESS; 1032 u16 reg; 1033 1034 if (link && (status & E1000_STATUS_SPEED_1000)) { 1035 ret_val = hw->phy.ops.acquire(hw); 1036 if (ret_val) 1037 return ret_val; 1038 1039 ret_val = 1040 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 1041 ®); 1042 if (ret_val) 1043 goto release; 1044 1045 ret_val = 1046 e1000_write_kmrn_reg_locked(hw, 1047 E1000_KMRNCTRLSTA_K1_CONFIG, 1048 reg & 1049 ~E1000_KMRNCTRLSTA_K1_ENABLE); 1050 if (ret_val) 1051 goto release; 1052 1053 usec_delay(10); 1054 1055 E1000_WRITE_REG(hw, E1000_FEXTNVM6, 1056 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); 1057 1058 ret_val = 1059 e1000_write_kmrn_reg_locked(hw, 1060 E1000_KMRNCTRLSTA_K1_CONFIG, 1061 reg); 1062 release: 1063 hw->phy.ops.release(hw); 1064 } else { 1065 /* clear FEXTNVM6 bit 8 on link down or 10/100 */ 1066 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; 1067 1068 if ((hw->phy.revision > 5) || !link || 1069 ((status & E1000_STATUS_SPEED_100) && 1070 (status & E1000_STATUS_FD))) 1071 goto update_fextnvm6; 1072 1073 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®); 1074 if (ret_val) 1075 return ret_val; 1076 1077 /* Clear link status transmit timeout */ 1078 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; 1079 1080 if (status & E1000_STATUS_SPEED_100) { 1081 /* Set inband Tx timeout to 5x10us for 100Half */ 1082 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; 1083 1084 /* Do not extend the K1 entry latency for 100Half */ 1085 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; 1086 } else { 1087 /* Set inband Tx timeout to 50x10us for 10Full/Half */ 1088 reg |= 50 << 1089 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; 1090 1091 /* Extend the K1 entry latency for 10 Mbps */ 1092 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; 1093 } 1094 1095 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg); 1096 if (ret_val) 1097 return ret_val; 1098 1099 update_fextnvm6: 1100 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6); 1101 } 1102 1103 return ret_val; 1104 } 1105 1106 static u64 e1000_ltr2ns(u16 ltr) 1107 { 1108 u32 value, scale; 1109 1110 /* Determine the latency in nsec based on the LTR value & scale */ 1111 value = ltr & E1000_LTRV_VALUE_MASK; 1112 scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT; 1113 1114 return value * (1ULL << (scale * E1000_LTRV_SCALE_FACTOR)); 1115 } 1116 1117 /** 1118 * e1000_platform_pm_pch_lpt - Set platform power management values 1119 * @hw: pointer to the HW structure 1120 * @link: bool indicating link status 1121 * 1122 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" 1123 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed 1124 * when link is up (which must not exceed the maximum latency supported 1125 * by the platform), otherwise specify there is no LTR requirement. 1126 * Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop 1127 * latencies in the LTR Extended Capability Structure in the PCIe Extended 1128 * Capability register set, on this device LTR is set by writing the 1129 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and 1130 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) 1131 * message to the PMC. 1132 * 1133 * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF) 1134 * high-water mark. 1135 **/ 1136 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) 1137 { 1138 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | 1139 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; 1140 u16 lat_enc = 0; /* latency encoded */ 1141 s32 obff_hwm = 0; 1142 1143 DEBUGFUNC("e1000_platform_pm_pch_lpt"); 1144 1145 if (link) { 1146 u16 speed, duplex, scale = 0; 1147 u16 max_snoop, max_nosnoop; 1148 u16 max_ltr_enc; /* max LTR latency encoded */ 1149 s64 lat_ns; 1150 s64 value; 1151 u32 rxa; 1152 1153 if (!hw->mac.max_frame_size) { 1154 DEBUGOUT("max_frame_size not set.\n"); 1155 return -E1000_ERR_CONFIG; 1156 } 1157 1158 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 1159 if (!speed) { 1160 DEBUGOUT("Speed not set.\n"); 1161 return -E1000_ERR_CONFIG; 1162 } 1163 1164 /* Rx Packet Buffer Allocation size (KB) */ 1165 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK; 1166 1167 /* Determine the maximum latency tolerated by the device. 1168 * 1169 * Per the PCIe spec, the tolerated latencies are encoded as 1170 * a 3-bit encoded scale (only 0-5 are valid) multiplied by 1171 * a 10-bit value (0-1023) to provide a range from 1 ns to 1172 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, 1173 * 1=2^5ns, 2=2^10ns,...5=2^25ns. 1174 */ 1175 lat_ns = ((s64)rxa * 1024 - 1176 (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000; 1177 if (lat_ns < 0) 1178 lat_ns = 0; 1179 else 1180 lat_ns /= speed; 1181 value = lat_ns; 1182 1183 while (value > E1000_LTRV_VALUE_MASK) { 1184 scale++; 1185 value = E1000_DIVIDE_ROUND_UP(value, (1 << 5)); 1186 } 1187 if (scale > E1000_LTRV_SCALE_MAX) { 1188 DEBUGOUT1("Invalid LTR latency scale %d\n", scale); 1189 return -E1000_ERR_CONFIG; 1190 } 1191 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value); 1192 1193 /* Determine the maximum latency tolerated by the platform */ 1194 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop); 1195 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); 1196 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop); 1197 1198 if (lat_enc > max_ltr_enc) { 1199 lat_enc = max_ltr_enc; 1200 lat_ns = e1000_ltr2ns(max_ltr_enc); 1201 } 1202 1203 if (lat_ns) { 1204 lat_ns *= speed * 1000; 1205 lat_ns /= 8; 1206 lat_ns /= 1000000000; 1207 obff_hwm = (s32)(rxa - lat_ns); 1208 } 1209 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) { 1210 DEBUGOUT1("Invalid high water mark %d\n", obff_hwm); 1211 return -E1000_ERR_CONFIG; 1212 } 1213 } 1214 1215 /* Set Snoop and No-Snoop latencies the same */ 1216 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); 1217 E1000_WRITE_REG(hw, E1000_LTRV, reg); 1218 1219 /* Set OBFF high water mark */ 1220 reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK; 1221 reg |= obff_hwm; 1222 E1000_WRITE_REG(hw, E1000_SVT, reg); 1223 1224 /* Enable OBFF */ 1225 reg = E1000_READ_REG(hw, E1000_SVCR); 1226 reg |= E1000_SVCR_OFF_EN; 1227 /* Always unblock interrupts to the CPU even when the system is 1228 * in OBFF mode. This ensures that small round-robin traffic 1229 * (like ping) does not get dropped or experience long latency. 1230 */ 1231 reg |= E1000_SVCR_OFF_MASKINT; 1232 E1000_WRITE_REG(hw, E1000_SVCR, reg); 1233 1234 return E1000_SUCCESS; 1235 } 1236 1237 /** 1238 * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer 1239 * @hw: pointer to the HW structure 1240 * @itr: interrupt throttling rate 1241 * 1242 * Configure OBFF with the updated interrupt rate. 1243 **/ 1244 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr) 1245 { 1246 u32 svcr; 1247 s32 timer; 1248 1249 DEBUGFUNC("e1000_set_obff_timer_pch_lpt"); 1250 1251 /* Convert ITR value into microseconds for OBFF timer */ 1252 timer = itr & E1000_ITR_MASK; 1253 timer = (timer * E1000_ITR_MULT) / 1000; 1254 1255 if ((timer < 0) || (timer > E1000_ITR_MASK)) { 1256 DEBUGOUT1("Invalid OBFF timer %d\n", timer); 1257 return -E1000_ERR_CONFIG; 1258 } 1259 1260 svcr = E1000_READ_REG(hw, E1000_SVCR); 1261 svcr &= ~E1000_SVCR_OFF_TIMER_MASK; 1262 svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT; 1263 E1000_WRITE_REG(hw, E1000_SVCR, svcr); 1264 1265 return E1000_SUCCESS; 1266 } 1267 1268 /** 1269 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP 1270 * @hw: pointer to the HW structure 1271 * @to_sx: boolean indicating a system power state transition to Sx 1272 * 1273 * When link is down, configure ULP mode to significantly reduce the power 1274 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the 1275 * ME firmware to start the ULP configuration. If not on an ME enabled 1276 * system, configure the ULP mode by software. 1277 */ 1278 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) 1279 { 1280 u32 mac_reg; 1281 s32 ret_val = E1000_SUCCESS; 1282 u16 phy_reg; 1283 u16 oem_reg = 0; 1284 1285 if ((hw->mac.type < e1000_pch_lpt) || 1286 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || 1287 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || 1288 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || 1289 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || 1290 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) 1291 return 0; 1292 1293 if (!to_sx) { 1294 int i = 0; 1295 /* Poll up to 5 seconds for Cable Disconnected indication */ 1296 while (!(E1000_READ_REG(hw, E1000_FEXT) & 1297 E1000_FEXT_PHY_CABLE_DISCONNECTED)) { 1298 /* Bail if link is re-acquired */ 1299 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) 1300 return -E1000_ERR_PHY; 1301 if (i++ == 100) 1302 break; 1303 1304 msec_delay(50); 1305 } 1306 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n", 1307 (E1000_READ_REG(hw, E1000_FEXT) & 1308 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", 1309 i * 50); 1310 if (!(E1000_READ_REG(hw, E1000_FEXT) & 1311 E1000_FEXT_PHY_CABLE_DISCONNECTED)) 1312 return 0; 1313 } 1314 1315 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { 1316 /* Request ME configure ULP mode in the PHY */ 1317 mac_reg = E1000_READ_REG(hw, E1000_H2ME); 1318 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; 1319 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); 1320 1321 goto out; 1322 } 1323 1324 ret_val = hw->phy.ops.acquire(hw); 1325 if (ret_val) 1326 goto out; 1327 1328 /* During S0 Idle keep the phy in PCI-E mode */ 1329 if (hw->dev_spec.ich8lan.smbus_disable) 1330 goto skip_smbus; 1331 1332 /* Force SMBus mode in PHY */ 1333 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); 1334 if (ret_val) 1335 goto release; 1336 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; 1337 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); 1338 1339 /* Force SMBus mode in MAC */ 1340 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 1341 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 1342 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 1343 1344 /* Si workaround for ULP entry flow on i217/rev6 h/w. Enable 1345 * LPLU and disable Gig speed when entering ULP 1346 */ 1347 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { 1348 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, 1349 &oem_reg); 1350 if (ret_val) 1351 goto release; 1352 1353 phy_reg = oem_reg; 1354 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; 1355 1356 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, 1357 phy_reg); 1358 1359 if (ret_val) 1360 goto release; 1361 } 1362 1363 skip_smbus: 1364 if (!to_sx) { 1365 /* Change the 'Link Status Change' interrupt to trigger 1366 * on 'Cable Status Change' 1367 */ 1368 ret_val = e1000_read_kmrn_reg_locked(hw, 1369 E1000_KMRNCTRLSTA_OP_MODES, 1370 &phy_reg); 1371 if (ret_val) 1372 goto release; 1373 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC; 1374 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, 1375 phy_reg); 1376 } 1377 1378 /* Set Inband ULP Exit, Reset to SMBus mode and 1379 * Disable SMBus Release on PERST# in PHY 1380 */ 1381 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); 1382 if (ret_val) 1383 goto release; 1384 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | 1385 I218_ULP_CONFIG1_DISABLE_SMB_PERST); 1386 if (to_sx) { 1387 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC) 1388 phy_reg |= I218_ULP_CONFIG1_WOL_HOST; 1389 else 1390 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; 1391 1392 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; 1393 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT; 1394 } else { 1395 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; 1396 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP; 1397 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; 1398 } 1399 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1400 1401 /* Set Disable SMBus Release on PERST# in MAC */ 1402 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); 1403 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; 1404 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); 1405 1406 /* Commit ULP changes in PHY by starting auto ULP configuration */ 1407 phy_reg |= I218_ULP_CONFIG1_START; 1408 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1409 1410 if (!to_sx) { 1411 /* Disable Tx so that the MAC doesn't send any (buffered) 1412 * packets to the PHY. 1413 */ 1414 mac_reg = E1000_READ_REG(hw, E1000_TCTL); 1415 mac_reg &= ~E1000_TCTL_EN; 1416 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg); 1417 } 1418 1419 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) && 1420 to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 1421 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, 1422 oem_reg); 1423 if (ret_val) 1424 goto release; 1425 } 1426 1427 release: 1428 hw->phy.ops.release(hw); 1429 out: 1430 if (ret_val) 1431 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val); 1432 else 1433 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; 1434 1435 return ret_val; 1436 } 1437 1438 /** 1439 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP 1440 * @hw: pointer to the HW structure 1441 * @force: boolean indicating whether or not to force disabling ULP 1442 * 1443 * Un-configure ULP mode when link is up, the system is transitioned from 1444 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled 1445 * system, poll for an indication from ME that ULP has been un-configured. 1446 * If not on an ME enabled system, un-configure the ULP mode by software. 1447 * 1448 * During nominal operation, this function is called when link is acquired 1449 * to disable ULP mode (force=FALSE); otherwise, for example when unloading 1450 * the driver or during Sx->S0 transitions, this is called with force=TRUE 1451 * to forcibly disable ULP. 1452 1453 * When the cable is plugged in while the device is in D0, a Cable Status 1454 * Change interrupt is generated which causes this function to be called 1455 * to partially disable ULP mode and restart autonegotiation. This function 1456 * is then called again due to the resulting Link Status Change interrupt 1457 * to finish cleaning up after the ULP flow. 1458 */ 1459 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) 1460 { 1461 s32 ret_val = E1000_SUCCESS; 1462 u8 ulp_exit_timeout = 30; 1463 u32 mac_reg; 1464 u16 phy_reg; 1465 int i = 0; 1466 1467 if ((hw->mac.type < e1000_pch_lpt) || 1468 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || 1469 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || 1470 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || 1471 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || 1472 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) 1473 return 0; 1474 1475 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { 1476 if (force) { 1477 /* Request ME un-configure ULP mode in the PHY */ 1478 mac_reg = E1000_READ_REG(hw, E1000_H2ME); 1479 mac_reg &= ~E1000_H2ME_ULP; 1480 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; 1481 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); 1482 } 1483 1484 if (hw->mac.type == e1000_pch_cnp) 1485 ulp_exit_timeout = 100; 1486 1487 while (E1000_READ_REG(hw, E1000_FWSM) & 1488 E1000_FWSM_ULP_CFG_DONE) { 1489 if (i++ == ulp_exit_timeout) { 1490 ret_val = -E1000_ERR_PHY; 1491 goto out; 1492 } 1493 1494 msec_delay(10); 1495 } 1496 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); 1497 1498 if (force) { 1499 mac_reg = E1000_READ_REG(hw, E1000_H2ME); 1500 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; 1501 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); 1502 } else { 1503 /* Clear H2ME.ULP after ME ULP configuration */ 1504 mac_reg = E1000_READ_REG(hw, E1000_H2ME); 1505 mac_reg &= ~E1000_H2ME_ULP; 1506 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); 1507 1508 /* Restore link speed advertisements and restart 1509 * Auto-negotiation 1510 */ 1511 if (hw->mac.autoneg) { 1512 ret_val = e1000_phy_setup_autoneg(hw); 1513 if (ret_val) 1514 goto out; 1515 } else { 1516 ret_val = e1000_setup_copper_link_generic(hw); 1517 if (ret_val) 1518 goto out; 1519 } 1520 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 1521 } 1522 1523 goto out; 1524 } 1525 1526 ret_val = hw->phy.ops.acquire(hw); 1527 if (ret_val) 1528 goto out; 1529 1530 /* Revert the change to the 'Link Status Change' 1531 * interrupt to trigger on 'Cable Status Change' 1532 */ 1533 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, 1534 &phy_reg); 1535 if (ret_val) 1536 goto release; 1537 phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC; 1538 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg); 1539 1540 if (force) 1541 /* Toggle LANPHYPC Value bit */ 1542 e1000_toggle_lanphypc_pch_lpt(hw); 1543 1544 /* Unforce SMBus mode in PHY */ 1545 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); 1546 if (ret_val) { 1547 /* The MAC might be in PCIe mode, so temporarily force to 1548 * SMBus mode in order to access the PHY. 1549 */ 1550 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 1551 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 1552 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 1553 1554 msec_delay(50); 1555 1556 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, 1557 &phy_reg); 1558 if (ret_val) 1559 goto release; 1560 } 1561 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 1562 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); 1563 1564 /* Unforce SMBus mode in MAC */ 1565 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 1566 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 1567 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 1568 1569 /* When ULP mode was previously entered, K1 was disabled by the 1570 * hardware. Re-Enable K1 in the PHY when exiting ULP. 1571 */ 1572 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); 1573 if (ret_val) 1574 goto release; 1575 phy_reg |= HV_PM_CTRL_K1_ENABLE; 1576 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); 1577 1578 /* Clear ULP enabled configuration */ 1579 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); 1580 if (ret_val) 1581 goto release; 1582 /* CSC interrupt received due to ULP Indication */ 1583 if ((phy_reg & I218_ULP_CONFIG1_IND) || force) { 1584 phy_reg &= ~(I218_ULP_CONFIG1_IND | 1585 I218_ULP_CONFIG1_STICKY_ULP | 1586 I218_ULP_CONFIG1_RESET_TO_SMBUS | 1587 I218_ULP_CONFIG1_WOL_HOST | 1588 I218_ULP_CONFIG1_INBAND_EXIT | 1589 I218_ULP_CONFIG1_EN_ULP_LANPHYPC | 1590 I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST | 1591 I218_ULP_CONFIG1_DISABLE_SMB_PERST); 1592 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1593 1594 /* Commit ULP changes by starting auto ULP configuration */ 1595 phy_reg |= I218_ULP_CONFIG1_START; 1596 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1597 1598 /* Clear Disable SMBus Release on PERST# in MAC */ 1599 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); 1600 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; 1601 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); 1602 1603 if (!force) { 1604 hw->phy.ops.release(hw); 1605 1606 if (hw->mac.autoneg) 1607 e1000_phy_setup_autoneg(hw); 1608 else 1609 e1000_setup_copper_link_generic(hw); 1610 1611 e1000_sw_lcd_config_ich8lan(hw); 1612 1613 e1000_oem_bits_config_ich8lan(hw, true); 1614 1615 /* Set ULP state to unknown and return non-zero to 1616 * indicate no link (yet) and re-enter on the next LSC 1617 * to finish disabling ULP flow. 1618 */ 1619 hw->dev_spec.ich8lan.ulp_state = 1620 e1000_ulp_state_unknown; 1621 1622 return 1; 1623 } 1624 } 1625 1626 /* Re-enable Tx */ 1627 mac_reg = E1000_READ_REG(hw, E1000_TCTL); 1628 mac_reg |= E1000_TCTL_EN; 1629 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg); 1630 1631 release: 1632 hw->phy.ops.release(hw); 1633 if (force) { 1634 hw->phy.ops.reset(hw); 1635 msec_delay(50); 1636 } 1637 out: 1638 if (ret_val) 1639 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val); 1640 else 1641 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; 1642 1643 return ret_val; 1644 } 1645 1646 /** 1647 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 1648 * @hw: pointer to the HW structure 1649 * 1650 * Checks to see of the link status of the hardware has changed. If a 1651 * change in link status has been detected, then we read the PHY registers 1652 * to get the current speed/duplex if link exists. 1653 **/ 1654 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1655 { 1656 struct e1000_mac_info *mac = &hw->mac; 1657 s32 ret_val, tipg_reg = 0; 1658 u16 emi_addr, emi_val = 0; 1659 bool link = false; 1660 u16 phy_reg; 1661 1662 DEBUGFUNC("e1000_check_for_copper_link_ich8lan"); 1663 1664 /* We only want to go out to the PHY registers to see if Auto-Neg 1665 * has completed and/or if our link status has changed. The 1666 * get_link_status flag is set upon receiving a Link Status 1667 * Change or Rx Sequence Error interrupt. 1668 */ 1669 if (!mac->get_link_status) 1670 return E1000_SUCCESS; 1671 1672 if ((hw->mac.type < e1000_pch_lpt) || 1673 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || 1674 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) { 1675 /* First we want to see if the MII Status Register reports 1676 * link. If so, then we want to get the current speed/duplex 1677 * of the PHY. 1678 */ 1679 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); 1680 if (ret_val) 1681 return ret_val; 1682 } else { 1683 /* Check the MAC's STATUS register to determine link state 1684 * since the PHY could be inaccessible while in ULP mode. 1685 */ 1686 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); 1687 if (link) 1688 ret_val = e1000_disable_ulp_lpt_lp(hw, false); 1689 else 1690 ret_val = e1000_enable_ulp_lpt_lp(hw, false); 1691 if (ret_val) 1692 return ret_val; 1693 } 1694 1695 if (hw->mac.type == e1000_pchlan) { 1696 ret_val = e1000_k1_gig_workaround_hv(hw, link); 1697 if (ret_val) 1698 return ret_val; 1699 } 1700 1701 /* When connected at 10Mbps half-duplex, some parts are excessively 1702 * aggressive resulting in many collisions. To avoid this, increase 1703 * the IPG and reduce Rx latency in the PHY. 1704 */ 1705 if ((hw->mac.type >= e1000_pch2lan) && link) { 1706 u16 speed, duplex; 1707 1708 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex); 1709 tipg_reg = E1000_READ_REG(hw, E1000_TIPG); 1710 tipg_reg &= ~E1000_TIPG_IPGT_MASK; 1711 1712 if (duplex == HALF_DUPLEX && speed == SPEED_10) { 1713 tipg_reg |= 0xFF; 1714 /* Reduce Rx latency in analog PHY */ 1715 emi_val = 0; 1716 } else if (hw->mac.type >= e1000_pch_spt && 1717 duplex == FULL_DUPLEX && speed != SPEED_1000) { 1718 tipg_reg |= 0xC; 1719 emi_val = 1; 1720 } else { 1721 /* Roll back the default values */ 1722 tipg_reg |= 0x08; 1723 emi_val = 1; 1724 } 1725 1726 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg); 1727 1728 ret_val = hw->phy.ops.acquire(hw); 1729 if (ret_val) 1730 return ret_val; 1731 1732 if (hw->mac.type == e1000_pch2lan) 1733 emi_addr = I82579_RX_CONFIG; 1734 else 1735 emi_addr = I217_RX_CONFIG; 1736 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); 1737 1738 1739 if (hw->mac.type >= e1000_pch_lpt) { 1740 hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG, 1741 &phy_reg); 1742 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK; 1743 if (speed == SPEED_100 || speed == SPEED_10) 1744 phy_reg |= 0x3E8; 1745 else 1746 phy_reg |= 0xFA; 1747 hw->phy.ops.write_reg_locked(hw, 1748 I217_PLL_CLOCK_GATE_REG, 1749 phy_reg); 1750 1751 if (speed == SPEED_1000) { 1752 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL, 1753 &phy_reg); 1754 1755 phy_reg |= HV_PM_CTRL_K1_CLK_REQ; 1756 1757 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL, 1758 phy_reg); 1759 } 1760 } 1761 hw->phy.ops.release(hw); 1762 1763 if (ret_val) 1764 return ret_val; 1765 1766 if (hw->mac.type >= e1000_pch_spt) { 1767 u16 data; 1768 u16 ptr_gap; 1769 1770 if (speed == SPEED_1000) { 1771 ret_val = hw->phy.ops.acquire(hw); 1772 if (ret_val) 1773 return ret_val; 1774 1775 ret_val = hw->phy.ops.read_reg_locked(hw, 1776 PHY_REG(776, 20), 1777 &data); 1778 if (ret_val) { 1779 hw->phy.ops.release(hw); 1780 return ret_val; 1781 } 1782 1783 ptr_gap = (data & (0x3FF << 2)) >> 2; 1784 if (ptr_gap < 0x18) { 1785 data &= ~(0x3FF << 2); 1786 data |= (0x18 << 2); 1787 ret_val = 1788 hw->phy.ops.write_reg_locked(hw, 1789 PHY_REG(776, 20), data); 1790 } 1791 hw->phy.ops.release(hw); 1792 if (ret_val) 1793 return ret_val; 1794 } else { 1795 ret_val = hw->phy.ops.acquire(hw); 1796 if (ret_val) 1797 return ret_val; 1798 1799 ret_val = hw->phy.ops.write_reg_locked(hw, 1800 PHY_REG(776, 20), 1801 0xC023); 1802 hw->phy.ops.release(hw); 1803 if (ret_val) 1804 return ret_val; 1805 1806 } 1807 } 1808 } 1809 1810 /* I217 Packet Loss issue: 1811 * ensure that FEXTNVM4 Beacon Duration is set correctly 1812 * on power up. 1813 * Set the Beacon Duration for I217 to 8 usec 1814 */ 1815 if (hw->mac.type >= e1000_pch_lpt) { 1816 u32 mac_reg; 1817 1818 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); 1819 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 1820 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 1821 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); 1822 } 1823 1824 /* Work-around I218 hang issue */ 1825 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 1826 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || 1827 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) || 1828 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) { 1829 ret_val = e1000_k1_workaround_lpt_lp(hw, link); 1830 if (ret_val) 1831 return ret_val; 1832 } 1833 if (hw->mac.type >= e1000_pch_lpt) { 1834 /* Set platform power management values for 1835 * Latency Tolerance Reporting (LTR) 1836 * Optimized Buffer Flush/Fill (OBFF) 1837 */ 1838 ret_val = e1000_platform_pm_pch_lpt(hw, link); 1839 if (ret_val) 1840 return ret_val; 1841 } 1842 1843 /* Clear link partner's EEE ability */ 1844 hw->dev_spec.ich8lan.eee_lp_ability = 0; 1845 1846 /* Configure K0s minimum time */ 1847 if (hw->mac.type >= e1000_pch_lpt) { 1848 e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME); 1849 } 1850 1851 if (hw->mac.type >= e1000_pch_lpt) { 1852 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); 1853 1854 if (hw->mac.type == e1000_pch_spt) { 1855 /* FEXTNVM6 K1-off workaround - for SPT only */ 1856 u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG); 1857 1858 if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) 1859 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; 1860 else 1861 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; 1862 } 1863 1864 if (hw->dev_spec.ich8lan.disable_k1_off == TRUE) 1865 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; 1866 1867 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6); 1868 } 1869 1870 if (!link) 1871 return E1000_SUCCESS; /* No link detected */ 1872 1873 mac->get_link_status = FALSE; 1874 1875 switch (hw->mac.type) { 1876 case e1000_pch2lan: 1877 ret_val = e1000_k1_workaround_lv(hw); 1878 if (ret_val) 1879 return ret_val; 1880 /* fall-thru */ 1881 case e1000_pchlan: 1882 if (hw->phy.type == e1000_phy_82578) { 1883 ret_val = e1000_link_stall_workaround_hv(hw); 1884 if (ret_val) 1885 return ret_val; 1886 } 1887 1888 /* Workaround for PCHx parts in half-duplex: 1889 * Set the number of preambles removed from the packet 1890 * when it is passed from the PHY to the MAC to prevent 1891 * the MAC from misinterpreting the packet type. 1892 */ 1893 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); 1894 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; 1895 1896 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) != 1897 E1000_STATUS_FD) 1898 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); 1899 1900 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); 1901 break; 1902 default: 1903 break; 1904 } 1905 1906 /* Check if there was DownShift, must be checked 1907 * immediately after link-up 1908 */ 1909 e1000_check_downshift_generic(hw); 1910 1911 /* Enable/Disable EEE after link up */ 1912 if (hw->phy.type > e1000_phy_82579) { 1913 ret_val = e1000_set_eee_pchlan(hw); 1914 if (ret_val) 1915 return ret_val; 1916 } 1917 1918 /* If we are forcing speed/duplex, then we simply return since 1919 * we have already determined whether we have link or not. 1920 */ 1921 if (!mac->autoneg) 1922 return -E1000_ERR_CONFIG; 1923 1924 /* Auto-Neg is enabled. Auto Speed Detection takes care 1925 * of MAC speed/duplex configuration. So we only need to 1926 * configure Collision Distance in the MAC. 1927 */ 1928 mac->ops.config_collision_dist(hw); 1929 1930 /* Configure Flow Control now that Auto-Neg has completed. 1931 * First, we need to restore the desired flow control 1932 * settings because we may have had to re-autoneg with a 1933 * different link partner. 1934 */ 1935 ret_val = e1000_config_fc_after_link_up_generic(hw); 1936 if (ret_val) 1937 DEBUGOUT("Error configuring flow control\n"); 1938 1939 return ret_val; 1940 } 1941 1942 /** 1943 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers 1944 * @hw: pointer to the HW structure 1945 * 1946 * Initialize family-specific function pointers for PHY, MAC, and NVM. 1947 **/ 1948 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw) 1949 { 1950 DEBUGFUNC("e1000_init_function_pointers_ich8lan"); 1951 1952 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan; 1953 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan; 1954 switch (hw->mac.type) { 1955 case e1000_ich8lan: 1956 case e1000_ich9lan: 1957 case e1000_ich10lan: 1958 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan; 1959 break; 1960 case e1000_pchlan: 1961 case e1000_pch2lan: 1962 case e1000_pch_lpt: 1963 case e1000_pch_spt: 1964 case e1000_pch_cnp: 1965 case e1000_pch_tgp: 1966 case e1000_pch_adp: 1967 case e1000_pch_mtp: 1968 hw->phy.ops.init_params = e1000_init_phy_params_pchlan; 1969 break; 1970 default: 1971 break; 1972 } 1973 } 1974 1975 /** 1976 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex 1977 * @hw: pointer to the HW structure 1978 * 1979 * Acquires the mutex for performing NVM operations. 1980 **/ 1981 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) 1982 { 1983 DEBUGFUNC("e1000_acquire_nvm_ich8lan"); 1984 1985 ASSERT_CTX_LOCK_HELD(hw); 1986 1987 return E1000_SUCCESS; 1988 } 1989 1990 /** 1991 * e1000_release_nvm_ich8lan - Release NVM mutex 1992 * @hw: pointer to the HW structure 1993 * 1994 * Releases the mutex used while performing NVM operations. 1995 **/ 1996 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) 1997 { 1998 DEBUGFUNC("e1000_release_nvm_ich8lan"); 1999 2000 ASSERT_CTX_LOCK_HELD(hw); 2001 } 2002 2003 /** 2004 * e1000_acquire_swflag_ich8lan - Acquire software control flag 2005 * @hw: pointer to the HW structure 2006 * 2007 * Acquires the software control flag for performing PHY and select 2008 * MAC CSR accesses. 2009 **/ 2010 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 2011 { 2012 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; 2013 s32 ret_val = E1000_SUCCESS; 2014 2015 DEBUGFUNC("e1000_acquire_swflag_ich8lan"); 2016 2017 ASSERT_CTX_LOCK_HELD(hw); 2018 2019 while (timeout) { 2020 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 2021 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) 2022 break; 2023 2024 msec_delay_irq(1); 2025 timeout--; 2026 } 2027 2028 if (!timeout) { 2029 DEBUGOUT("SW has already locked the resource.\n"); 2030 ret_val = -E1000_ERR_CONFIG; 2031 goto out; 2032 } 2033 2034 timeout = SW_FLAG_TIMEOUT; 2035 2036 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 2037 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 2038 2039 while (timeout) { 2040 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 2041 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 2042 break; 2043 2044 msec_delay_irq(1); 2045 timeout--; 2046 } 2047 2048 if (!timeout) { 2049 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", 2050 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl); 2051 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 2052 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 2053 ret_val = -E1000_ERR_CONFIG; 2054 goto out; 2055 } 2056 2057 out: 2058 return ret_val; 2059 } 2060 2061 /** 2062 * e1000_release_swflag_ich8lan - Release software control flag 2063 * @hw: pointer to the HW structure 2064 * 2065 * Releases the software control flag for performing PHY and select 2066 * MAC CSR accesses. 2067 **/ 2068 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) 2069 { 2070 u32 extcnf_ctrl; 2071 2072 DEBUGFUNC("e1000_release_swflag_ich8lan"); 2073 2074 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 2075 2076 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { 2077 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 2078 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 2079 } else { 2080 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n"); 2081 } 2082 } 2083 2084 /** 2085 * e1000_check_mng_mode_ich8lan - Checks management mode 2086 * @hw: pointer to the HW structure 2087 * 2088 * This checks if the adapter has any manageability enabled. 2089 * This is a function pointer entry point only called by read/write 2090 * routines for the PHY and NVM parts. 2091 **/ 2092 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) 2093 { 2094 u32 fwsm; 2095 2096 DEBUGFUNC("e1000_check_mng_mode_ich8lan"); 2097 2098 fwsm = E1000_READ_REG(hw, E1000_FWSM); 2099 2100 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 2101 ((fwsm & E1000_FWSM_MODE_MASK) == 2102 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 2103 } 2104 2105 /** 2106 * e1000_check_mng_mode_pchlan - Checks management mode 2107 * @hw: pointer to the HW structure 2108 * 2109 * This checks if the adapter has iAMT enabled. 2110 * This is a function pointer entry point only called by read/write 2111 * routines for the PHY and NVM parts. 2112 **/ 2113 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) 2114 { 2115 u32 fwsm; 2116 2117 DEBUGFUNC("e1000_check_mng_mode_pchlan"); 2118 2119 fwsm = E1000_READ_REG(hw, E1000_FWSM); 2120 2121 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 2122 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 2123 } 2124 2125 /** 2126 * e1000_rar_set_pch2lan - Set receive address register 2127 * @hw: pointer to the HW structure 2128 * @addr: pointer to the receive address 2129 * @index: receive address array register 2130 * 2131 * Sets the receive address array register at index to the address passed 2132 * in by addr. For 82579, RAR[0] is the base address register that is to 2133 * contain the MAC address but RAR[1-6] are reserved for manageability (ME). 2134 * Use SHRA[0-3] in place of those reserved for ME. 2135 **/ 2136 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) 2137 { 2138 u32 rar_low, rar_high; 2139 2140 DEBUGFUNC("e1000_rar_set_pch2lan"); 2141 2142 /* HW expects these in little endian so we reverse the byte order 2143 * from network order (big endian) to little endian 2144 */ 2145 rar_low = ((u32) addr[0] | 2146 ((u32) addr[1] << 8) | 2147 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 2148 2149 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 2150 2151 /* If MAC address zero, no need to set the AV bit */ 2152 if (rar_low || rar_high) 2153 rar_high |= E1000_RAH_AV; 2154 2155 if (index == 0) { 2156 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); 2157 E1000_WRITE_FLUSH(hw); 2158 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); 2159 E1000_WRITE_FLUSH(hw); 2160 return E1000_SUCCESS; 2161 } 2162 2163 /* RAR[1-6] are owned by manageability. Skip those and program the 2164 * next address into the SHRA register array. 2165 */ 2166 if (index < (u32) (hw->mac.rar_entry_count)) { 2167 s32 ret_val; 2168 2169 ret_val = e1000_acquire_swflag_ich8lan(hw); 2170 if (ret_val) 2171 goto out; 2172 2173 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low); 2174 E1000_WRITE_FLUSH(hw); 2175 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high); 2176 E1000_WRITE_FLUSH(hw); 2177 2178 e1000_release_swflag_ich8lan(hw); 2179 2180 /* verify the register updates */ 2181 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) && 2182 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high)) 2183 return E1000_SUCCESS; 2184 2185 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", 2186 (index - 1), E1000_READ_REG(hw, E1000_FWSM)); 2187 } 2188 2189 out: 2190 DEBUGOUT1("Failed to write receive address at index %d\n", index); 2191 return -E1000_ERR_CONFIG; 2192 } 2193 2194 /** 2195 * e1000_rar_set_pch_lpt - Set receive address registers 2196 * @hw: pointer to the HW structure 2197 * @addr: pointer to the receive address 2198 * @index: receive address array register 2199 * 2200 * Sets the receive address register array at index to the address passed 2201 * in by addr. For LPT, RAR[0] is the base address register that is to 2202 * contain the MAC address. SHRA[0-10] are the shared receive address 2203 * registers that are shared between the Host and manageability engine (ME). 2204 **/ 2205 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) 2206 { 2207 u32 rar_low, rar_high; 2208 u32 wlock_mac; 2209 2210 DEBUGFUNC("e1000_rar_set_pch_lpt"); 2211 2212 /* HW expects these in little endian so we reverse the byte order 2213 * from network order (big endian) to little endian 2214 */ 2215 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | 2216 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 2217 2218 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 2219 2220 /* If MAC address zero, no need to set the AV bit */ 2221 if (rar_low || rar_high) 2222 rar_high |= E1000_RAH_AV; 2223 2224 if (index == 0) { 2225 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); 2226 E1000_WRITE_FLUSH(hw); 2227 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); 2228 E1000_WRITE_FLUSH(hw); 2229 return E1000_SUCCESS; 2230 } 2231 2232 /* The manageability engine (ME) can lock certain SHRAR registers that 2233 * it is using - those registers are unavailable for use. 2234 */ 2235 if (index < hw->mac.rar_entry_count) { 2236 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) & 2237 E1000_FWSM_WLOCK_MAC_MASK; 2238 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; 2239 2240 /* Check if all SHRAR registers are locked */ 2241 if (wlock_mac == 1) 2242 goto out; 2243 2244 if ((wlock_mac == 0) || (index <= wlock_mac)) { 2245 s32 ret_val; 2246 2247 ret_val = e1000_acquire_swflag_ich8lan(hw); 2248 2249 if (ret_val) 2250 goto out; 2251 2252 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1), 2253 rar_low); 2254 E1000_WRITE_FLUSH(hw); 2255 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1), 2256 rar_high); 2257 E1000_WRITE_FLUSH(hw); 2258 2259 e1000_release_swflag_ich8lan(hw); 2260 2261 /* verify the register updates */ 2262 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) && 2263 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high)) 2264 return E1000_SUCCESS; 2265 } 2266 } 2267 2268 out: 2269 DEBUGOUT1("Failed to write receive address at index %d\n", index); 2270 return -E1000_ERR_CONFIG; 2271 } 2272 2273 /** 2274 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses 2275 * @hw: pointer to the HW structure 2276 * @mc_addr_list: array of multicast addresses to program 2277 * @mc_addr_count: number of multicast addresses to program 2278 * 2279 * Updates entire Multicast Table Array of the PCH2 MAC and PHY. 2280 * The caller must have a packed mc_addr_list of multicast addresses. 2281 **/ 2282 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, 2283 u8 *mc_addr_list, 2284 u32 mc_addr_count) 2285 { 2286 u16 phy_reg = 0; 2287 int i; 2288 s32 ret_val; 2289 2290 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan"); 2291 2292 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count); 2293 2294 ret_val = hw->phy.ops.acquire(hw); 2295 if (ret_val) 2296 return; 2297 2298 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2299 if (ret_val) 2300 goto release; 2301 2302 for (i = 0; i < hw->mac.mta_reg_count; i++) { 2303 hw->phy.ops.write_reg_page(hw, BM_MTA(i), 2304 (u16)(hw->mac.mta_shadow[i] & 2305 0xFFFF)); 2306 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1), 2307 (u16)((hw->mac.mta_shadow[i] >> 16) & 2308 0xFFFF)); 2309 } 2310 2311 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2312 2313 release: 2314 hw->phy.ops.release(hw); 2315 } 2316 2317 /** 2318 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 2319 * @hw: pointer to the HW structure 2320 * 2321 * Checks if firmware is blocking the reset of the PHY. 2322 * This is a function pointer entry point only called by 2323 * reset routines. 2324 **/ 2325 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) 2326 { 2327 u32 fwsm; 2328 bool blocked = FALSE; 2329 int i = 0; 2330 2331 DEBUGFUNC("e1000_check_reset_block_ich8lan"); 2332 2333 do { 2334 fwsm = E1000_READ_REG(hw, E1000_FWSM); 2335 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) { 2336 blocked = TRUE; 2337 msec_delay(10); 2338 continue; 2339 } 2340 blocked = FALSE; 2341 } while (blocked && (i++ < 30)); 2342 return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS; 2343 } 2344 2345 /** 2346 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states 2347 * @hw: pointer to the HW structure 2348 * 2349 * Assumes semaphore already acquired. 2350 * 2351 **/ 2352 static s32 e1000_write_smbus_addr(struct e1000_hw *hw) 2353 { 2354 u16 phy_data; 2355 u32 strap = E1000_READ_REG(hw, E1000_STRAP); 2356 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> 2357 E1000_STRAP_SMT_FREQ_SHIFT; 2358 s32 ret_val; 2359 2360 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; 2361 2362 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); 2363 if (ret_val) 2364 return ret_val; 2365 2366 phy_data &= ~HV_SMB_ADDR_MASK; 2367 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); 2368 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 2369 2370 if (hw->phy.type == e1000_phy_i217) { 2371 /* Restore SMBus frequency */ 2372 if (freq--) { 2373 phy_data &= ~HV_SMB_ADDR_FREQ_MASK; 2374 phy_data |= (freq & (1 << 0)) << 2375 HV_SMB_ADDR_FREQ_LOW_SHIFT; 2376 phy_data |= (freq & (1 << 1)) << 2377 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); 2378 } else { 2379 DEBUGOUT("Unsupported SMB frequency in PHY\n"); 2380 } 2381 } 2382 2383 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); 2384 } 2385 2386 /** 2387 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 2388 * @hw: pointer to the HW structure 2389 * 2390 * SW should configure the LCD from the NVM extended configuration region 2391 * as a workaround for certain parts. 2392 **/ 2393 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 2394 { 2395 struct e1000_phy_info *phy = &hw->phy; 2396 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 2397 s32 ret_val = E1000_SUCCESS; 2398 u16 word_addr, reg_data, reg_addr, phy_page = 0; 2399 2400 DEBUGFUNC("e1000_sw_lcd_config_ich8lan"); 2401 2402 /* Initialize the PHY from the NVM on ICH platforms. This 2403 * is needed due to an issue where the NVM configuration is 2404 * not properly autoloaded after power transitions. 2405 * Therefore, after each PHY reset, we will load the 2406 * configuration data out of the NVM manually. 2407 */ 2408 switch (hw->mac.type) { 2409 case e1000_ich8lan: 2410 if (phy->type != e1000_phy_igp_3) 2411 return ret_val; 2412 2413 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) || 2414 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) { 2415 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 2416 break; 2417 } 2418 /* Fall-thru */ 2419 case e1000_pchlan: 2420 case e1000_pch2lan: 2421 case e1000_pch_lpt: 2422 case e1000_pch_spt: 2423 case e1000_pch_cnp: 2424 case e1000_pch_tgp: 2425 case e1000_pch_adp: 2426 case e1000_pch_mtp: 2427 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 2428 break; 2429 default: 2430 return ret_val; 2431 } 2432 2433 ret_val = hw->phy.ops.acquire(hw); 2434 if (ret_val) 2435 return ret_val; 2436 2437 data = E1000_READ_REG(hw, E1000_FEXTNVM); 2438 if (!(data & sw_cfg_mask)) 2439 goto release; 2440 2441 /* Make sure HW does not configure LCD from PHY 2442 * extended configuration before SW configuration 2443 */ 2444 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 2445 if ((hw->mac.type < e1000_pch2lan) && 2446 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) 2447 goto release; 2448 2449 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE); 2450 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 2451 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; 2452 if (!cnf_size) 2453 goto release; 2454 2455 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 2456 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 2457 2458 if (((hw->mac.type == e1000_pchlan) && 2459 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || 2460 (hw->mac.type > e1000_pchlan)) { 2461 /* HW configures the SMBus address and LEDs when the 2462 * OEM and LCD Write Enable bits are set in the NVM. 2463 * When both NVM bits are cleared, SW will configure 2464 * them instead. 2465 */ 2466 ret_val = e1000_write_smbus_addr(hw); 2467 if (ret_val) 2468 goto release; 2469 2470 data = E1000_READ_REG(hw, E1000_LEDCTL); 2471 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, 2472 (u16)data); 2473 if (ret_val) 2474 goto release; 2475 } 2476 2477 /* Configure LCD from extended configuration region. */ 2478 2479 /* cnf_base_addr is in DWORD */ 2480 word_addr = (u16)(cnf_base_addr << 1); 2481 2482 for (i = 0; i < cnf_size; i++) { 2483 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1, 2484 ®_data); 2485 if (ret_val) 2486 goto release; 2487 2488 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1), 2489 1, ®_addr); 2490 if (ret_val) 2491 goto release; 2492 2493 /* Save off the PHY page for future writes. */ 2494 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { 2495 phy_page = reg_data; 2496 continue; 2497 } 2498 2499 reg_addr &= PHY_REG_MASK; 2500 reg_addr |= phy_page; 2501 2502 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, 2503 reg_data); 2504 if (ret_val) 2505 goto release; 2506 } 2507 2508 release: 2509 hw->phy.ops.release(hw); 2510 return ret_val; 2511 } 2512 2513 /** 2514 * e1000_k1_gig_workaround_hv - K1 Si workaround 2515 * @hw: pointer to the HW structure 2516 * @link: link up bool flag 2517 * 2518 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning 2519 * from a lower speed. This workaround disables K1 whenever link is at 1Gig 2520 * If link is down, the function will restore the default K1 setting located 2521 * in the NVM. 2522 **/ 2523 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) 2524 { 2525 s32 ret_val = E1000_SUCCESS; 2526 u16 status_reg = 0; 2527 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; 2528 2529 DEBUGFUNC("e1000_k1_gig_workaround_hv"); 2530 2531 if (hw->mac.type != e1000_pchlan) 2532 return E1000_SUCCESS; 2533 2534 /* Wrap the whole flow with the sw flag */ 2535 ret_val = hw->phy.ops.acquire(hw); 2536 if (ret_val) 2537 return ret_val; 2538 2539 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ 2540 if (link) { 2541 if (hw->phy.type == e1000_phy_82578) { 2542 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, 2543 &status_reg); 2544 if (ret_val) 2545 goto release; 2546 2547 status_reg &= (BM_CS_STATUS_LINK_UP | 2548 BM_CS_STATUS_RESOLVED | 2549 BM_CS_STATUS_SPEED_MASK); 2550 2551 if (status_reg == (BM_CS_STATUS_LINK_UP | 2552 BM_CS_STATUS_RESOLVED | 2553 BM_CS_STATUS_SPEED_1000)) 2554 k1_enable = FALSE; 2555 } 2556 2557 if (hw->phy.type == e1000_phy_82577) { 2558 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, 2559 &status_reg); 2560 if (ret_val) 2561 goto release; 2562 2563 status_reg &= (HV_M_STATUS_LINK_UP | 2564 HV_M_STATUS_AUTONEG_COMPLETE | 2565 HV_M_STATUS_SPEED_MASK); 2566 2567 if (status_reg == (HV_M_STATUS_LINK_UP | 2568 HV_M_STATUS_AUTONEG_COMPLETE | 2569 HV_M_STATUS_SPEED_1000)) 2570 k1_enable = FALSE; 2571 } 2572 2573 /* Link stall fix for link up */ 2574 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 2575 0x0100); 2576 if (ret_val) 2577 goto release; 2578 2579 } else { 2580 /* Link stall fix for link down */ 2581 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 2582 0x4100); 2583 if (ret_val) 2584 goto release; 2585 } 2586 2587 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); 2588 2589 release: 2590 hw->phy.ops.release(hw); 2591 2592 return ret_val; 2593 } 2594 2595 /** 2596 * e1000_configure_k1_ich8lan - Configure K1 power state 2597 * @hw: pointer to the HW structure 2598 * @k1_enable: K1 state to configure 2599 * 2600 * Configure the K1 power state based on the provided parameter. 2601 * Assumes semaphore already acquired. 2602 * 2603 * Success returns 0, Failure returns -E1000_ERR_PHY (-2) 2604 **/ 2605 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) 2606 { 2607 s32 ret_val; 2608 u32 ctrl_reg = 0; 2609 u32 ctrl_ext = 0; 2610 u32 reg = 0; 2611 u16 kmrn_reg = 0; 2612 2613 DEBUGFUNC("e1000_configure_k1_ich8lan"); 2614 2615 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 2616 &kmrn_reg); 2617 if (ret_val) 2618 return ret_val; 2619 2620 if (k1_enable) 2621 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; 2622 else 2623 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; 2624 2625 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 2626 kmrn_reg); 2627 if (ret_val) 2628 return ret_val; 2629 2630 usec_delay(20); 2631 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2632 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); 2633 2634 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 2635 reg |= E1000_CTRL_FRCSPD; 2636 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2637 2638 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); 2639 E1000_WRITE_FLUSH(hw); 2640 usec_delay(20); 2641 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); 2642 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 2643 E1000_WRITE_FLUSH(hw); 2644 usec_delay(20); 2645 2646 return E1000_SUCCESS; 2647 } 2648 2649 /** 2650 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration 2651 * @hw: pointer to the HW structure 2652 * @d0_state: boolean if entering d0 or d3 device state 2653 * 2654 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are 2655 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit 2656 * in NVM determines whether HW should configure LPLU and Gbe Disable. 2657 **/ 2658 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) 2659 { 2660 s32 ret_val = 0; 2661 u32 mac_reg; 2662 u16 oem_reg; 2663 2664 DEBUGFUNC("e1000_oem_bits_config_ich8lan"); 2665 2666 if (hw->mac.type < e1000_pchlan) 2667 return ret_val; 2668 2669 ret_val = hw->phy.ops.acquire(hw); 2670 if (ret_val) 2671 return ret_val; 2672 2673 if (hw->mac.type == e1000_pchlan) { 2674 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 2675 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) 2676 goto release; 2677 } 2678 2679 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM); 2680 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) 2681 goto release; 2682 2683 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL); 2684 2685 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); 2686 if (ret_val) 2687 goto release; 2688 2689 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); 2690 2691 if (d0_state) { 2692 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) 2693 oem_reg |= HV_OEM_BITS_GBE_DIS; 2694 2695 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) 2696 oem_reg |= HV_OEM_BITS_LPLU; 2697 } else { 2698 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | 2699 E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) 2700 oem_reg |= HV_OEM_BITS_GBE_DIS; 2701 2702 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | 2703 E1000_PHY_CTRL_NOND0A_LPLU)) 2704 oem_reg |= HV_OEM_BITS_LPLU; 2705 } 2706 2707 /* Set Restart auto-neg to activate the bits */ 2708 if ((d0_state || (hw->mac.type != e1000_pchlan)) && 2709 !hw->phy.ops.check_reset_block(hw)) 2710 oem_reg |= HV_OEM_BITS_RESTART_AN; 2711 2712 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); 2713 2714 release: 2715 hw->phy.ops.release(hw); 2716 2717 return ret_val; 2718 } 2719 2720 2721 /** 2722 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode 2723 * @hw: pointer to the HW structure 2724 **/ 2725 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) 2726 { 2727 s32 ret_val; 2728 u16 data; 2729 2730 DEBUGFUNC("e1000_set_mdio_slow_mode_hv"); 2731 2732 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data); 2733 if (ret_val) 2734 return ret_val; 2735 2736 data |= HV_KMRN_MDIO_SLOW; 2737 2738 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data); 2739 2740 return ret_val; 2741 } 2742 2743 /** 2744 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be 2745 * done after every PHY reset. 2746 * @hw: pointer to the HW structure 2747 **/ 2748 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) 2749 { 2750 s32 ret_val = E1000_SUCCESS; 2751 u16 phy_data; 2752 2753 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan"); 2754 2755 if (hw->mac.type != e1000_pchlan) 2756 return E1000_SUCCESS; 2757 2758 /* Set MDIO slow mode before any other MDIO access */ 2759 if (hw->phy.type == e1000_phy_82577) { 2760 ret_val = e1000_set_mdio_slow_mode_hv(hw); 2761 if (ret_val) 2762 return ret_val; 2763 } 2764 2765 if (((hw->phy.type == e1000_phy_82577) && 2766 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || 2767 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { 2768 /* Disable generation of early preamble */ 2769 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431); 2770 if (ret_val) 2771 return ret_val; 2772 2773 /* Preamble tuning for SSC */ 2774 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, 2775 0xA204); 2776 if (ret_val) 2777 return ret_val; 2778 } 2779 2780 if (hw->phy.type == e1000_phy_82578) { 2781 /* Return registers to default by doing a soft reset then 2782 * writing 0x3140 to the control register. 2783 */ 2784 if (hw->phy.revision < 2) { 2785 e1000_phy_sw_reset_generic(hw); 2786 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, 2787 0x3140); 2788 if (ret_val) 2789 return ret_val; 2790 } 2791 } 2792 2793 /* Select page 0 */ 2794 ret_val = hw->phy.ops.acquire(hw); 2795 if (ret_val) 2796 return ret_val; 2797 2798 hw->phy.addr = 1; 2799 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 2800 hw->phy.ops.release(hw); 2801 if (ret_val) 2802 return ret_val; 2803 2804 /* Configure the K1 Si workaround during phy reset assuming there is 2805 * link so that it disables K1 if link is in 1Gbps. 2806 */ 2807 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE); 2808 if (ret_val) 2809 return ret_val; 2810 2811 /* Workaround for link disconnects on a busy hub in half duplex */ 2812 ret_val = hw->phy.ops.acquire(hw); 2813 if (ret_val) 2814 return ret_val; 2815 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); 2816 if (ret_val) 2817 goto release; 2818 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, 2819 phy_data & 0x00FF); 2820 if (ret_val) 2821 goto release; 2822 2823 /* set MSE higher to enable link to stay up when noise is high */ 2824 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); 2825 release: 2826 hw->phy.ops.release(hw); 2827 2828 return ret_val; 2829 } 2830 2831 /** 2832 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY 2833 * @hw: pointer to the HW structure 2834 **/ 2835 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) 2836 { 2837 u32 mac_reg; 2838 u16 i, phy_reg = 0; 2839 s32 ret_val; 2840 2841 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan"); 2842 2843 ret_val = hw->phy.ops.acquire(hw); 2844 if (ret_val) 2845 return; 2846 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2847 if (ret_val) 2848 goto release; 2849 2850 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ 2851 for (i = 0; i < (hw->mac.rar_entry_count); i++) { 2852 mac_reg = E1000_READ_REG(hw, E1000_RAL(i)); 2853 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), 2854 (u16)(mac_reg & 0xFFFF)); 2855 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), 2856 (u16)((mac_reg >> 16) & 0xFFFF)); 2857 2858 mac_reg = E1000_READ_REG(hw, E1000_RAH(i)); 2859 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), 2860 (u16)(mac_reg & 0xFFFF)); 2861 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), 2862 (u16)((mac_reg & E1000_RAH_AV) 2863 >> 16)); 2864 } 2865 2866 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2867 2868 release: 2869 hw->phy.ops.release(hw); 2870 } 2871 2872 static u32 e1000_calc_rx_da_crc(u8 mac[]) 2873 { 2874 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */ 2875 u32 i, j, mask, crc; 2876 2877 DEBUGFUNC("e1000_calc_rx_da_crc"); 2878 2879 crc = 0xffffffff; 2880 for (i = 0; i < 6; i++) { 2881 crc = crc ^ mac[i]; 2882 for (j = 8; j > 0; j--) { 2883 mask = (crc & 1) * (-1); 2884 crc = (crc >> 1) ^ (poly & mask); 2885 } 2886 } 2887 return ~crc; 2888 } 2889 2890 /** 2891 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation 2892 * with 82579 PHY 2893 * @hw: pointer to the HW structure 2894 * @enable: flag to enable/disable workaround when enabling/disabling jumbos 2895 **/ 2896 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) 2897 { 2898 s32 ret_val = E1000_SUCCESS; 2899 u16 phy_reg, data; 2900 u32 mac_reg; 2901 u16 i; 2902 2903 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan"); 2904 2905 if (hw->mac.type < e1000_pch2lan) 2906 return E1000_SUCCESS; 2907 2908 /* disable Rx path while enabling/disabling workaround */ 2909 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg); 2910 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), 2911 phy_reg | (1 << 14)); 2912 if (ret_val) 2913 return ret_val; 2914 2915 if (enable) { 2916 /* Write Rx addresses (rar_entry_count for RAL/H, and 2917 * SHRAL/H) and initial CRC values to the MAC 2918 */ 2919 for (i = 0; i < hw->mac.rar_entry_count; i++) { 2920 u8 mac_addr[ETHER_ADDR_LEN] = {0}; 2921 u32 addr_high, addr_low; 2922 2923 addr_high = E1000_READ_REG(hw, E1000_RAH(i)); 2924 if (!(addr_high & E1000_RAH_AV)) 2925 continue; 2926 addr_low = E1000_READ_REG(hw, E1000_RAL(i)); 2927 mac_addr[0] = (addr_low & 0xFF); 2928 mac_addr[1] = ((addr_low >> 8) & 0xFF); 2929 mac_addr[2] = ((addr_low >> 16) & 0xFF); 2930 mac_addr[3] = ((addr_low >> 24) & 0xFF); 2931 mac_addr[4] = (addr_high & 0xFF); 2932 mac_addr[5] = ((addr_high >> 8) & 0xFF); 2933 2934 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i), 2935 e1000_calc_rx_da_crc(mac_addr)); 2936 } 2937 2938 /* Write Rx addresses to the PHY */ 2939 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 2940 2941 /* Enable jumbo frame workaround in the MAC */ 2942 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); 2943 mac_reg &= ~(1 << 14); 2944 mac_reg |= (7 << 15); 2945 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); 2946 2947 mac_reg = E1000_READ_REG(hw, E1000_RCTL); 2948 mac_reg |= E1000_RCTL_SECRC; 2949 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); 2950 2951 ret_val = e1000_read_kmrn_reg_generic(hw, 2952 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2953 &data); 2954 if (ret_val) 2955 return ret_val; 2956 ret_val = e1000_write_kmrn_reg_generic(hw, 2957 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2958 data | (1 << 0)); 2959 if (ret_val) 2960 return ret_val; 2961 ret_val = e1000_read_kmrn_reg_generic(hw, 2962 E1000_KMRNCTRLSTA_HD_CTRL, 2963 &data); 2964 if (ret_val) 2965 return ret_val; 2966 data &= ~(0xF << 8); 2967 data |= (0xB << 8); 2968 ret_val = e1000_write_kmrn_reg_generic(hw, 2969 E1000_KMRNCTRLSTA_HD_CTRL, 2970 data); 2971 if (ret_val) 2972 return ret_val; 2973 2974 /* Enable jumbo frame workaround in the PHY */ 2975 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); 2976 data &= ~(0x7F << 5); 2977 data |= (0x37 << 5); 2978 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); 2979 if (ret_val) 2980 return ret_val; 2981 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); 2982 data &= ~(1 << 13); 2983 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); 2984 if (ret_val) 2985 return ret_val; 2986 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); 2987 data &= ~(0x3FF << 2); 2988 data |= (E1000_TX_PTR_GAP << 2); 2989 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); 2990 if (ret_val) 2991 return ret_val; 2992 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100); 2993 if (ret_val) 2994 return ret_val; 2995 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); 2996 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | 2997 (1 << 10)); 2998 if (ret_val) 2999 return ret_val; 3000 } else { 3001 /* Write MAC register values back to h/w defaults */ 3002 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); 3003 mac_reg &= ~(0xF << 14); 3004 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); 3005 3006 mac_reg = E1000_READ_REG(hw, E1000_RCTL); 3007 mac_reg &= ~E1000_RCTL_SECRC; 3008 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); 3009 3010 ret_val = e1000_read_kmrn_reg_generic(hw, 3011 E1000_KMRNCTRLSTA_CTRL_OFFSET, 3012 &data); 3013 if (ret_val) 3014 return ret_val; 3015 ret_val = e1000_write_kmrn_reg_generic(hw, 3016 E1000_KMRNCTRLSTA_CTRL_OFFSET, 3017 data & ~(1 << 0)); 3018 if (ret_val) 3019 return ret_val; 3020 ret_val = e1000_read_kmrn_reg_generic(hw, 3021 E1000_KMRNCTRLSTA_HD_CTRL, 3022 &data); 3023 if (ret_val) 3024 return ret_val; 3025 data &= ~(0xF << 8); 3026 data |= (0xB << 8); 3027 ret_val = e1000_write_kmrn_reg_generic(hw, 3028 E1000_KMRNCTRLSTA_HD_CTRL, 3029 data); 3030 if (ret_val) 3031 return ret_val; 3032 3033 /* Write PHY register values back to h/w defaults */ 3034 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); 3035 data &= ~(0x7F << 5); 3036 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); 3037 if (ret_val) 3038 return ret_val; 3039 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); 3040 data |= (1 << 13); 3041 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); 3042 if (ret_val) 3043 return ret_val; 3044 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); 3045 data &= ~(0x3FF << 2); 3046 data |= (0x8 << 2); 3047 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); 3048 if (ret_val) 3049 return ret_val; 3050 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00); 3051 if (ret_val) 3052 return ret_val; 3053 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); 3054 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & 3055 ~(1 << 10)); 3056 if (ret_val) 3057 return ret_val; 3058 } 3059 3060 /* re-enable Rx path after enabling/disabling workaround */ 3061 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & 3062 ~(1 << 14)); 3063 } 3064 3065 /** 3066 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be 3067 * done after every PHY reset. 3068 * @hw: pointer to the HW structure 3069 **/ 3070 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) 3071 { 3072 s32 ret_val = E1000_SUCCESS; 3073 3074 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan"); 3075 3076 if (hw->mac.type != e1000_pch2lan) 3077 return E1000_SUCCESS; 3078 3079 /* Set MDIO slow mode before any other MDIO access */ 3080 ret_val = e1000_set_mdio_slow_mode_hv(hw); 3081 if (ret_val) 3082 return ret_val; 3083 3084 ret_val = hw->phy.ops.acquire(hw); 3085 if (ret_val) 3086 return ret_val; 3087 /* set MSE higher to enable link to stay up when noise is high */ 3088 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); 3089 if (ret_val) 3090 goto release; 3091 /* drop link after 5 times MSE threshold was reached */ 3092 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); 3093 release: 3094 hw->phy.ops.release(hw); 3095 3096 return ret_val; 3097 } 3098 3099 /** 3100 * e1000_k1_gig_workaround_lv - K1 Si workaround 3101 * @hw: pointer to the HW structure 3102 * 3103 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps 3104 * Disable K1 for 1000 and 100 speeds 3105 **/ 3106 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 3107 { 3108 s32 ret_val = E1000_SUCCESS; 3109 u16 status_reg = 0; 3110 3111 DEBUGFUNC("e1000_k1_workaround_lv"); 3112 3113 if (hw->mac.type != e1000_pch2lan) 3114 return E1000_SUCCESS; 3115 3116 /* Set K1 beacon duration based on 10Mbs speed */ 3117 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg); 3118 if (ret_val) 3119 return ret_val; 3120 3121 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 3122 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 3123 if (status_reg & 3124 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { 3125 u16 pm_phy_reg; 3126 3127 /* LV 1G/100 Packet drop issue wa */ 3128 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL, 3129 &pm_phy_reg); 3130 if (ret_val) 3131 return ret_val; 3132 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; 3133 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, 3134 pm_phy_reg); 3135 if (ret_val) 3136 return ret_val; 3137 } else { 3138 u32 mac_reg; 3139 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); 3140 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 3141 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 3142 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); 3143 } 3144 } 3145 3146 return ret_val; 3147 } 3148 3149 /** 3150 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware 3151 * @hw: pointer to the HW structure 3152 * @gate: boolean set to TRUE to gate, FALSE to ungate 3153 * 3154 * Gate/ungate the automatic PHY configuration via hardware; perform 3155 * the configuration via software instead. 3156 **/ 3157 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) 3158 { 3159 u32 extcnf_ctrl; 3160 3161 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan"); 3162 3163 if (hw->mac.type < e1000_pch2lan) 3164 return; 3165 3166 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 3167 3168 if (gate) 3169 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; 3170 else 3171 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; 3172 3173 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 3174 } 3175 3176 /** 3177 * e1000_lan_init_done_ich8lan - Check for PHY config completion 3178 * @hw: pointer to the HW structure 3179 * 3180 * Check the appropriate indication the MAC has finished configuring the 3181 * PHY after a software reset. 3182 **/ 3183 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) 3184 { 3185 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; 3186 3187 DEBUGFUNC("e1000_lan_init_done_ich8lan"); 3188 3189 /* Wait for basic configuration completes before proceeding */ 3190 do { 3191 data = E1000_READ_REG(hw, E1000_STATUS); 3192 data &= E1000_STATUS_LAN_INIT_DONE; 3193 usec_delay(100); 3194 } while ((!data) && --loop); 3195 3196 /* If basic configuration is incomplete before the above loop 3197 * count reaches 0, loading the configuration from NVM will 3198 * leave the PHY in a bad state possibly resulting in no link. 3199 */ 3200 if (loop == 0) 3201 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n"); 3202 3203 /* Clear the Init Done bit for the next init event */ 3204 data = E1000_READ_REG(hw, E1000_STATUS); 3205 data &= ~E1000_STATUS_LAN_INIT_DONE; 3206 E1000_WRITE_REG(hw, E1000_STATUS, data); 3207 } 3208 3209 /** 3210 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset 3211 * @hw: pointer to the HW structure 3212 **/ 3213 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) 3214 { 3215 s32 ret_val = E1000_SUCCESS; 3216 u16 reg; 3217 3218 DEBUGFUNC("e1000_post_phy_reset_ich8lan"); 3219 3220 if (hw->phy.ops.check_reset_block(hw)) 3221 return E1000_SUCCESS; 3222 3223 /* Allow time for h/w to get to quiescent state after reset */ 3224 msec_delay(10); 3225 3226 /* Perform any necessary post-reset workarounds */ 3227 switch (hw->mac.type) { 3228 case e1000_pchlan: 3229 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 3230 if (ret_val) 3231 return ret_val; 3232 break; 3233 case e1000_pch2lan: 3234 ret_val = e1000_lv_phy_workarounds_ich8lan(hw); 3235 if (ret_val) 3236 return ret_val; 3237 break; 3238 default: 3239 break; 3240 } 3241 3242 /* Clear the host wakeup bit after lcd reset */ 3243 if (hw->mac.type >= e1000_pchlan) { 3244 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®); 3245 reg &= ~BM_WUC_HOST_WU_BIT; 3246 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg); 3247 } 3248 3249 /* Configure the LCD with the extended configuration region in NVM */ 3250 ret_val = e1000_sw_lcd_config_ich8lan(hw); 3251 if (ret_val) 3252 return ret_val; 3253 3254 /* Configure the LCD with the OEM bits in NVM */ 3255 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE); 3256 3257 if (hw->mac.type == e1000_pch2lan) { 3258 /* Ungate automatic PHY configuration on non-managed 82579 */ 3259 if (!(E1000_READ_REG(hw, E1000_FWSM) & 3260 E1000_ICH_FWSM_FW_VALID)) { 3261 msec_delay(10); 3262 e1000_gate_hw_phy_config_ich8lan(hw, FALSE); 3263 } 3264 3265 /* Set EEE LPI Update Timer to 200usec */ 3266 ret_val = hw->phy.ops.acquire(hw); 3267 if (ret_val) 3268 return ret_val; 3269 ret_val = e1000_write_emi_reg_locked(hw, 3270 I82579_LPI_UPDATE_TIMER, 3271 0x1387); 3272 hw->phy.ops.release(hw); 3273 } 3274 3275 return ret_val; 3276 } 3277 3278 /** 3279 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset 3280 * @hw: pointer to the HW structure 3281 * 3282 * Resets the PHY 3283 * This is a function pointer entry point called by drivers 3284 * or other shared routines. 3285 **/ 3286 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 3287 { 3288 s32 ret_val = E1000_SUCCESS; 3289 3290 DEBUGFUNC("e1000_phy_hw_reset_ich8lan"); 3291 3292 /* Gate automatic PHY configuration by hardware on non-managed 82579 */ 3293 if ((hw->mac.type == e1000_pch2lan) && 3294 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) 3295 e1000_gate_hw_phy_config_ich8lan(hw, TRUE); 3296 3297 ret_val = e1000_phy_hw_reset_generic(hw); 3298 if (ret_val) 3299 return ret_val; 3300 3301 return e1000_post_phy_reset_ich8lan(hw); 3302 } 3303 3304 /** 3305 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state 3306 * @hw: pointer to the HW structure 3307 * @active: TRUE to enable LPLU, FALSE to disable 3308 * 3309 * Sets the LPLU state according to the active flag. For PCH, if OEM write 3310 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set 3311 * the phy speed. This function will manually set the LPLU bit and restart 3312 * auto-neg as hw would do. D3 and D0 LPLU will call the same function 3313 * since it configures the same bit. 3314 **/ 3315 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) 3316 { 3317 s32 ret_val; 3318 u16 oem_reg; 3319 3320 DEBUGFUNC("e1000_set_lplu_state_pchlan"); 3321 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg); 3322 if (ret_val) 3323 return ret_val; 3324 3325 if (active) 3326 oem_reg |= HV_OEM_BITS_LPLU; 3327 else 3328 oem_reg &= ~HV_OEM_BITS_LPLU; 3329 3330 if (!hw->phy.ops.check_reset_block(hw)) 3331 oem_reg |= HV_OEM_BITS_RESTART_AN; 3332 3333 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg); 3334 } 3335 3336 /** 3337 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 3338 * @hw: pointer to the HW structure 3339 * @active: TRUE to enable LPLU, FALSE to disable 3340 * 3341 * Sets the LPLU D0 state according to the active flag. When 3342 * activating LPLU this function also disables smart speed 3343 * and vice versa. LPLU will not be activated unless the 3344 * device autonegotiation advertisement meets standards of 3345 * either 10 or 10/100 or 10/100/1000 at all duplexes. 3346 * This is a function pointer entry point only called by 3347 * PHY setup routines. 3348 **/ 3349 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 3350 { 3351 struct e1000_phy_info *phy = &hw->phy; 3352 u32 phy_ctrl; 3353 s32 ret_val = E1000_SUCCESS; 3354 u16 data; 3355 3356 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan"); 3357 3358 if (phy->type == e1000_phy_ife) 3359 return E1000_SUCCESS; 3360 3361 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 3362 3363 if (active) { 3364 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 3365 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3366 3367 if (phy->type != e1000_phy_igp_3) 3368 return E1000_SUCCESS; 3369 3370 /* Call gig speed drop workaround on LPLU before accessing 3371 * any PHY registers 3372 */ 3373 if (hw->mac.type == e1000_ich8lan) 3374 e1000_gig_downshift_workaround_ich8lan(hw); 3375 3376 /* When LPLU is enabled, we should disable SmartSpeed */ 3377 ret_val = phy->ops.read_reg(hw, 3378 IGP01E1000_PHY_PORT_CONFIG, 3379 &data); 3380 if (ret_val) 3381 return ret_val; 3382 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 3383 ret_val = phy->ops.write_reg(hw, 3384 IGP01E1000_PHY_PORT_CONFIG, 3385 data); 3386 if (ret_val) 3387 return ret_val; 3388 } else { 3389 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 3390 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3391 3392 if (phy->type != e1000_phy_igp_3) 3393 return E1000_SUCCESS; 3394 3395 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 3396 * during Dx states where the power conservation is most 3397 * important. During driver activity we should enable 3398 * SmartSpeed, so performance is maintained. 3399 */ 3400 if (phy->smart_speed == e1000_smart_speed_on) { 3401 ret_val = phy->ops.read_reg(hw, 3402 IGP01E1000_PHY_PORT_CONFIG, 3403 &data); 3404 if (ret_val) 3405 return ret_val; 3406 3407 data |= IGP01E1000_PSCFR_SMART_SPEED; 3408 ret_val = phy->ops.write_reg(hw, 3409 IGP01E1000_PHY_PORT_CONFIG, 3410 data); 3411 if (ret_val) 3412 return ret_val; 3413 } else if (phy->smart_speed == e1000_smart_speed_off) { 3414 ret_val = phy->ops.read_reg(hw, 3415 IGP01E1000_PHY_PORT_CONFIG, 3416 &data); 3417 if (ret_val) 3418 return ret_val; 3419 3420 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 3421 ret_val = phy->ops.write_reg(hw, 3422 IGP01E1000_PHY_PORT_CONFIG, 3423 data); 3424 if (ret_val) 3425 return ret_val; 3426 } 3427 } 3428 3429 return E1000_SUCCESS; 3430 } 3431 3432 /** 3433 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state 3434 * @hw: pointer to the HW structure 3435 * @active: TRUE to enable LPLU, FALSE to disable 3436 * 3437 * Sets the LPLU D3 state according to the active flag. When 3438 * activating LPLU this function also disables smart speed 3439 * and vice versa. LPLU will not be activated unless the 3440 * device autonegotiation advertisement meets standards of 3441 * either 10 or 10/100 or 10/100/1000 at all duplexes. 3442 * This is a function pointer entry point only called by 3443 * PHY setup routines. 3444 **/ 3445 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 3446 { 3447 struct e1000_phy_info *phy = &hw->phy; 3448 u32 phy_ctrl; 3449 s32 ret_val = E1000_SUCCESS; 3450 u16 data; 3451 3452 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan"); 3453 3454 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 3455 3456 if (!active) { 3457 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 3458 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3459 3460 if (phy->type != e1000_phy_igp_3) 3461 return E1000_SUCCESS; 3462 3463 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 3464 * during Dx states where the power conservation is most 3465 * important. During driver activity we should enable 3466 * SmartSpeed, so performance is maintained. 3467 */ 3468 if (phy->smart_speed == e1000_smart_speed_on) { 3469 ret_val = phy->ops.read_reg(hw, 3470 IGP01E1000_PHY_PORT_CONFIG, 3471 &data); 3472 if (ret_val) 3473 return ret_val; 3474 3475 data |= IGP01E1000_PSCFR_SMART_SPEED; 3476 ret_val = phy->ops.write_reg(hw, 3477 IGP01E1000_PHY_PORT_CONFIG, 3478 data); 3479 if (ret_val) 3480 return ret_val; 3481 } else if (phy->smart_speed == e1000_smart_speed_off) { 3482 ret_val = phy->ops.read_reg(hw, 3483 IGP01E1000_PHY_PORT_CONFIG, 3484 &data); 3485 if (ret_val) 3486 return ret_val; 3487 3488 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 3489 ret_val = phy->ops.write_reg(hw, 3490 IGP01E1000_PHY_PORT_CONFIG, 3491 data); 3492 if (ret_val) 3493 return ret_val; 3494 } 3495 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 3496 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 3497 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 3498 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 3499 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3500 3501 if (phy->type != e1000_phy_igp_3) 3502 return E1000_SUCCESS; 3503 3504 /* Call gig speed drop workaround on LPLU before accessing 3505 * any PHY registers 3506 */ 3507 if (hw->mac.type == e1000_ich8lan) 3508 e1000_gig_downshift_workaround_ich8lan(hw); 3509 3510 /* When LPLU is enabled, we should disable SmartSpeed */ 3511 ret_val = phy->ops.read_reg(hw, 3512 IGP01E1000_PHY_PORT_CONFIG, 3513 &data); 3514 if (ret_val) 3515 return ret_val; 3516 3517 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 3518 ret_val = phy->ops.write_reg(hw, 3519 IGP01E1000_PHY_PORT_CONFIG, 3520 data); 3521 } 3522 3523 return ret_val; 3524 } 3525 3526 /** 3527 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 3528 * @hw: pointer to the HW structure 3529 * @bank: pointer to the variable that returns the active bank 3530 * 3531 * Reads signature byte from the NVM using the flash access registers. 3532 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. 3533 **/ 3534 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) 3535 { 3536 u32 eecd; 3537 struct e1000_nvm_info *nvm = &hw->nvm; 3538 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); 3539 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; 3540 u32 nvm_dword = 0; 3541 u8 sig_byte = 0; 3542 s32 ret_val; 3543 3544 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan"); 3545 3546 switch (hw->mac.type) { 3547 case e1000_pch_spt: 3548 case e1000_pch_cnp: 3549 case e1000_pch_tgp: 3550 case e1000_pch_adp: 3551 case e1000_pch_mtp: 3552 bank1_offset = nvm->flash_bank_size; 3553 act_offset = E1000_ICH_NVM_SIG_WORD; 3554 3555 /* set bank to 0 in case flash read fails */ 3556 *bank = 0; 3557 3558 /* Check bank 0 */ 3559 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, 3560 &nvm_dword); 3561 if (ret_val) 3562 return ret_val; 3563 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); 3564 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3565 E1000_ICH_NVM_SIG_VALUE) { 3566 *bank = 0; 3567 return E1000_SUCCESS; 3568 } 3569 3570 /* Check bank 1 */ 3571 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset + 3572 bank1_offset, 3573 &nvm_dword); 3574 if (ret_val) 3575 return ret_val; 3576 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); 3577 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3578 E1000_ICH_NVM_SIG_VALUE) { 3579 *bank = 1; 3580 return E1000_SUCCESS; 3581 } 3582 3583 DEBUGOUT("ERROR: No valid NVM bank present\n"); 3584 return -E1000_ERR_NVM; 3585 case e1000_ich8lan: 3586 case e1000_ich9lan: 3587 eecd = E1000_READ_REG(hw, E1000_EECD); 3588 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == 3589 E1000_EECD_SEC1VAL_VALID_MASK) { 3590 if (eecd & E1000_EECD_SEC1VAL) 3591 *bank = 1; 3592 else 3593 *bank = 0; 3594 3595 return E1000_SUCCESS; 3596 } 3597 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n"); 3598 /* fall-thru */ 3599 default: 3600 /* set bank to 0 in case flash read fails */ 3601 *bank = 0; 3602 3603 /* Check bank 0 */ 3604 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, 3605 &sig_byte); 3606 if (ret_val) 3607 return ret_val; 3608 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3609 E1000_ICH_NVM_SIG_VALUE) { 3610 *bank = 0; 3611 return E1000_SUCCESS; 3612 } 3613 3614 /* Check bank 1 */ 3615 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + 3616 bank1_offset, 3617 &sig_byte); 3618 if (ret_val) 3619 return ret_val; 3620 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3621 E1000_ICH_NVM_SIG_VALUE) { 3622 *bank = 1; 3623 return E1000_SUCCESS; 3624 } 3625 3626 DEBUGOUT("ERROR: No valid NVM bank present\n"); 3627 return -E1000_ERR_NVM; 3628 } 3629 } 3630 3631 /** 3632 * e1000_read_nvm_spt - NVM access for SPT 3633 * @hw: pointer to the HW structure 3634 * @offset: The offset (in bytes) of the word(s) to read. 3635 * @words: Size of data to read in words. 3636 * @data: pointer to the word(s) to read at offset. 3637 * 3638 * Reads a word(s) from the NVM 3639 **/ 3640 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, 3641 u16 *data) 3642 { 3643 struct e1000_nvm_info *nvm = &hw->nvm; 3644 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3645 u32 act_offset; 3646 s32 ret_val = E1000_SUCCESS; 3647 u32 bank = 0; 3648 u32 dword = 0; 3649 u16 offset_to_read; 3650 u16 i; 3651 3652 DEBUGFUNC("e1000_read_nvm_spt"); 3653 3654 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 3655 (words == 0)) { 3656 DEBUGOUT("nvm parameter(s) out of bounds\n"); 3657 ret_val = -E1000_ERR_NVM; 3658 goto out; 3659 } 3660 3661 nvm->ops.acquire(hw); 3662 3663 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 3664 if (ret_val != E1000_SUCCESS) { 3665 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 3666 bank = 0; 3667 } 3668 3669 act_offset = (bank) ? nvm->flash_bank_size : 0; 3670 act_offset += offset; 3671 3672 ret_val = E1000_SUCCESS; 3673 3674 for (i = 0; i < words; i += 2) { 3675 if (words - i == 1) { 3676 if (dev_spec->shadow_ram[offset+i].modified) { 3677 data[i] = dev_spec->shadow_ram[offset+i].value; 3678 } else { 3679 offset_to_read = act_offset + i - 3680 ((act_offset + i) % 2); 3681 ret_val = 3682 e1000_read_flash_dword_ich8lan(hw, 3683 offset_to_read, 3684 &dword); 3685 if (ret_val) 3686 break; 3687 if ((act_offset + i) % 2 == 0) 3688 data[i] = (u16)(dword & 0xFFFF); 3689 else 3690 data[i] = (u16)((dword >> 16) & 0xFFFF); 3691 } 3692 } else { 3693 offset_to_read = act_offset + i; 3694 if (!(dev_spec->shadow_ram[offset+i].modified) || 3695 !(dev_spec->shadow_ram[offset+i+1].modified)) { 3696 ret_val = 3697 e1000_read_flash_dword_ich8lan(hw, 3698 offset_to_read, 3699 &dword); 3700 if (ret_val) 3701 break; 3702 } 3703 if (dev_spec->shadow_ram[offset+i].modified) 3704 data[i] = dev_spec->shadow_ram[offset+i].value; 3705 else 3706 data[i] = (u16) (dword & 0xFFFF); 3707 if (dev_spec->shadow_ram[offset+i].modified) 3708 data[i+1] = 3709 dev_spec->shadow_ram[offset+i+1].value; 3710 else 3711 data[i+1] = (u16) (dword >> 16 & 0xFFFF); 3712 } 3713 } 3714 3715 nvm->ops.release(hw); 3716 3717 out: 3718 if (ret_val) 3719 DEBUGOUT1("NVM read error: %d\n", ret_val); 3720 3721 return ret_val; 3722 } 3723 3724 /** 3725 * e1000_read_nvm_ich8lan - Read word(s) from the NVM 3726 * @hw: pointer to the HW structure 3727 * @offset: The offset (in bytes) of the word(s) to read. 3728 * @words: Size of data to read in words 3729 * @data: Pointer to the word(s) to read at offset. 3730 * 3731 * Reads a word(s) from the NVM using the flash access registers. 3732 **/ 3733 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 3734 u16 *data) 3735 { 3736 struct e1000_nvm_info *nvm = &hw->nvm; 3737 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3738 u32 act_offset; 3739 s32 ret_val = E1000_SUCCESS; 3740 u32 bank = 0; 3741 u16 i, word; 3742 3743 DEBUGFUNC("e1000_read_nvm_ich8lan"); 3744 3745 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 3746 (words == 0)) { 3747 DEBUGOUT("nvm parameter(s) out of bounds\n"); 3748 ret_val = -E1000_ERR_NVM; 3749 goto out; 3750 } 3751 3752 nvm->ops.acquire(hw); 3753 3754 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 3755 if (ret_val != E1000_SUCCESS) { 3756 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 3757 bank = 0; 3758 } 3759 3760 act_offset = (bank) ? nvm->flash_bank_size : 0; 3761 act_offset += offset; 3762 3763 ret_val = E1000_SUCCESS; 3764 for (i = 0; i < words; i++) { 3765 if (dev_spec->shadow_ram[offset+i].modified) { 3766 data[i] = dev_spec->shadow_ram[offset+i].value; 3767 } else { 3768 ret_val = e1000_read_flash_word_ich8lan(hw, 3769 act_offset + i, 3770 &word); 3771 if (ret_val) 3772 break; 3773 data[i] = word; 3774 } 3775 } 3776 3777 nvm->ops.release(hw); 3778 3779 out: 3780 if (ret_val) 3781 DEBUGOUT1("NVM read error: %d\n", ret_val); 3782 3783 return ret_val; 3784 } 3785 3786 /** 3787 * e1000_flash_cycle_init_ich8lan - Initialize flash 3788 * @hw: pointer to the HW structure 3789 * 3790 * This function does initial flash setup so that a new read/write/erase cycle 3791 * can be started. 3792 **/ 3793 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) 3794 { 3795 union ich8_hws_flash_status hsfsts; 3796 s32 ret_val = -E1000_ERR_NVM; 3797 3798 DEBUGFUNC("e1000_flash_cycle_init_ich8lan"); 3799 3800 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 3801 3802 /* Check if the flash descriptor is valid */ 3803 if (!hsfsts.hsf_status.fldesvalid) { 3804 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n"); 3805 return -E1000_ERR_NVM; 3806 } 3807 3808 /* Clear FCERR and DAEL in hw status by writing 1 */ 3809 hsfsts.hsf_status.flcerr = 1; 3810 hsfsts.hsf_status.dael = 1; 3811 if (hw->mac.type >= e1000_pch_spt) 3812 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 3813 hsfsts.regval & 0xFFFF); 3814 else 3815 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); 3816 3817 /* Either we should have a hardware SPI cycle in progress 3818 * bit to check against, in order to start a new cycle or 3819 * FDONE bit should be changed in the hardware so that it 3820 * is 1 after hardware reset, which can then be used as an 3821 * indication whether a cycle is in progress or has been 3822 * completed. 3823 */ 3824 3825 if (!hsfsts.hsf_status.flcinprog) { 3826 /* There is no cycle running at present, 3827 * so we can start a cycle. 3828 * Begin by setting Flash Cycle Done. 3829 */ 3830 hsfsts.hsf_status.flcdone = 1; 3831 if (hw->mac.type >= e1000_pch_spt) 3832 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 3833 hsfsts.regval & 0xFFFF); 3834 else 3835 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, 3836 hsfsts.regval); 3837 ret_val = E1000_SUCCESS; 3838 } else { 3839 s32 i; 3840 3841 /* Otherwise poll for sometime so the current 3842 * cycle has a chance to end before giving up. 3843 */ 3844 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 3845 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 3846 ICH_FLASH_HSFSTS); 3847 if (!hsfsts.hsf_status.flcinprog) { 3848 ret_val = E1000_SUCCESS; 3849 break; 3850 } 3851 usec_delay(1); 3852 } 3853 if (ret_val == E1000_SUCCESS) { 3854 /* Successful in waiting for previous cycle to timeout, 3855 * now set the Flash Cycle Done. 3856 */ 3857 hsfsts.hsf_status.flcdone = 1; 3858 if (hw->mac.type >= e1000_pch_spt) 3859 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 3860 hsfsts.regval & 0xFFFF); 3861 else 3862 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, 3863 hsfsts.regval); 3864 } else { 3865 DEBUGOUT("Flash controller busy, cannot get access\n"); 3866 } 3867 } 3868 3869 return ret_val; 3870 } 3871 3872 /** 3873 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) 3874 * @hw: pointer to the HW structure 3875 * @timeout: maximum time to wait for completion 3876 * 3877 * This function starts a flash cycle and waits for its completion. 3878 **/ 3879 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) 3880 { 3881 union ich8_hws_flash_ctrl hsflctl; 3882 union ich8_hws_flash_status hsfsts; 3883 u32 i = 0; 3884 3885 DEBUGFUNC("e1000_flash_cycle_ich8lan"); 3886 3887 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 3888 if (hw->mac.type >= e1000_pch_spt) 3889 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; 3890 else 3891 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 3892 hsflctl.hsf_ctrl.flcgo = 1; 3893 3894 if (hw->mac.type >= e1000_pch_spt) 3895 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 3896 hsflctl.regval << 16); 3897 else 3898 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 3899 3900 /* wait till FDONE bit is set to 1 */ 3901 do { 3902 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 3903 if (hsfsts.hsf_status.flcdone) 3904 break; 3905 usec_delay(1); 3906 } while (i++ < timeout); 3907 3908 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) 3909 return E1000_SUCCESS; 3910 3911 return -E1000_ERR_NVM; 3912 } 3913 3914 /** 3915 * e1000_read_flash_dword_ich8lan - Read dword from flash 3916 * @hw: pointer to the HW structure 3917 * @offset: offset to data location 3918 * @data: pointer to the location for storing the data 3919 * 3920 * Reads the flash dword at offset into data. Offset is converted 3921 * to bytes before read. 3922 **/ 3923 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, 3924 u32 *data) 3925 { 3926 DEBUGFUNC("e1000_read_flash_dword_ich8lan"); 3927 3928 if (!data) 3929 return -E1000_ERR_NVM; 3930 3931 /* Must convert word offset into bytes. */ 3932 offset <<= 1; 3933 3934 return e1000_read_flash_data32_ich8lan(hw, offset, data); 3935 } 3936 3937 /** 3938 * e1000_read_flash_word_ich8lan - Read word from flash 3939 * @hw: pointer to the HW structure 3940 * @offset: offset to data location 3941 * @data: pointer to the location for storing the data 3942 * 3943 * Reads the flash word at offset into data. Offset is converted 3944 * to bytes before read. 3945 **/ 3946 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, 3947 u16 *data) 3948 { 3949 DEBUGFUNC("e1000_read_flash_word_ich8lan"); 3950 3951 if (!data) 3952 return -E1000_ERR_NVM; 3953 3954 /* Must convert offset into bytes. */ 3955 offset <<= 1; 3956 3957 return e1000_read_flash_data_ich8lan(hw, offset, 2, data); 3958 } 3959 3960 /** 3961 * e1000_read_flash_byte_ich8lan - Read byte from flash 3962 * @hw: pointer to the HW structure 3963 * @offset: The offset of the byte to read. 3964 * @data: Pointer to a byte to store the value read. 3965 * 3966 * Reads a single byte from the NVM using the flash access registers. 3967 **/ 3968 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 3969 u8 *data) 3970 { 3971 s32 ret_val; 3972 u16 word = 0; 3973 3974 /* In SPT, only 32 bits access is supported, 3975 * so this function should not be called. 3976 */ 3977 if (hw->mac.type >= e1000_pch_spt) 3978 return -E1000_ERR_NVM; 3979 else 3980 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); 3981 3982 if (ret_val) 3983 return ret_val; 3984 3985 *data = (u8)word; 3986 3987 return E1000_SUCCESS; 3988 } 3989 3990 /** 3991 * e1000_read_flash_data_ich8lan - Read byte or word from NVM 3992 * @hw: pointer to the HW structure 3993 * @offset: The offset (in bytes) of the byte or word to read. 3994 * @size: Size of data to read, 1=byte 2=word 3995 * @data: Pointer to the word to store the value read. 3996 * 3997 * Reads a byte or word from the NVM using the flash access registers. 3998 **/ 3999 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 4000 u8 size, u16 *data) 4001 { 4002 union ich8_hws_flash_status hsfsts; 4003 union ich8_hws_flash_ctrl hsflctl; 4004 u32 flash_linear_addr; 4005 u32 flash_data = 0; 4006 s32 ret_val = -E1000_ERR_NVM; 4007 u8 count = 0; 4008 4009 DEBUGFUNC("e1000_read_flash_data_ich8lan"); 4010 4011 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 4012 return -E1000_ERR_NVM; 4013 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 4014 hw->nvm.flash_base_addr); 4015 4016 do { 4017 usec_delay(1); 4018 /* Steps */ 4019 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4020 if (ret_val != E1000_SUCCESS) 4021 break; 4022 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 4023 4024 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 4025 hsflctl.hsf_ctrl.fldbcount = size - 1; 4026 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; 4027 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 4028 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 4029 4030 ret_val = e1000_flash_cycle_ich8lan(hw, 4031 ICH_FLASH_READ_COMMAND_TIMEOUT); 4032 4033 /* Check if FCERR is set to 1, if set to 1, clear it 4034 * and try the whole sequence a few more times, else 4035 * read in (shift in) the Flash Data0, the order is 4036 * least significant byte first msb to lsb 4037 */ 4038 if (ret_val == E1000_SUCCESS) { 4039 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); 4040 if (size == 1) 4041 *data = (u8)(flash_data & 0x000000FF); 4042 else if (size == 2) 4043 *data = (u16)(flash_data & 0x0000FFFF); 4044 break; 4045 } else { 4046 /* If we've gotten here, then things are probably 4047 * completely hosed, but if the error condition is 4048 * detected, it won't hurt to give it another try... 4049 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 4050 */ 4051 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 4052 ICH_FLASH_HSFSTS); 4053 if (hsfsts.hsf_status.flcerr) { 4054 /* Repeat for some time before giving up. */ 4055 continue; 4056 } else if (!hsfsts.hsf_status.flcdone) { 4057 DEBUGOUT("Timeout error - flash cycle did not complete.\n"); 4058 break; 4059 } 4060 } 4061 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 4062 4063 return ret_val; 4064 } 4065 4066 /** 4067 * e1000_read_flash_data32_ich8lan - Read dword from NVM 4068 * @hw: pointer to the HW structure 4069 * @offset: The offset (in bytes) of the dword to read. 4070 * @data: Pointer to the dword to store the value read. 4071 * 4072 * Reads a byte or word from the NVM using the flash access registers. 4073 **/ 4074 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, 4075 u32 *data) 4076 { 4077 union ich8_hws_flash_status hsfsts; 4078 union ich8_hws_flash_ctrl hsflctl; 4079 u32 flash_linear_addr; 4080 s32 ret_val = -E1000_ERR_NVM; 4081 u8 count = 0; 4082 4083 DEBUGFUNC("e1000_read_flash_data_ich8lan"); 4084 4085 if (offset > ICH_FLASH_LINEAR_ADDR_MASK || 4086 hw->mac.type < e1000_pch_spt) 4087 return -E1000_ERR_NVM; 4088 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 4089 hw->nvm.flash_base_addr); 4090 4091 do { 4092 usec_delay(1); 4093 /* Steps */ 4094 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4095 if (ret_val != E1000_SUCCESS) 4096 break; 4097 /* In SPT, This register is in Lan memory space, not flash. 4098 * Therefore, only 32 bit access is supported 4099 */ 4100 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; 4101 4102 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 4103 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; 4104 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; 4105 /* In SPT, This register is in Lan memory space, not flash. 4106 * Therefore, only 32 bit access is supported 4107 */ 4108 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 4109 (u32)hsflctl.regval << 16); 4110 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 4111 4112 ret_val = e1000_flash_cycle_ich8lan(hw, 4113 ICH_FLASH_READ_COMMAND_TIMEOUT); 4114 4115 /* Check if FCERR is set to 1, if set to 1, clear it 4116 * and try the whole sequence a few more times, else 4117 * read in (shift in) the Flash Data0, the order is 4118 * least significant byte first msb to lsb 4119 */ 4120 if (ret_val == E1000_SUCCESS) { 4121 *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); 4122 break; 4123 } else { 4124 /* If we've gotten here, then things are probably 4125 * completely hosed, but if the error condition is 4126 * detected, it won't hurt to give it another try... 4127 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 4128 */ 4129 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 4130 ICH_FLASH_HSFSTS); 4131 if (hsfsts.hsf_status.flcerr) { 4132 /* Repeat for some time before giving up. */ 4133 continue; 4134 } else if (!hsfsts.hsf_status.flcdone) { 4135 DEBUGOUT("Timeout error - flash cycle did not complete.\n"); 4136 break; 4137 } 4138 } 4139 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 4140 4141 return ret_val; 4142 } 4143 4144 /** 4145 * e1000_write_nvm_ich8lan - Write word(s) to the NVM 4146 * @hw: pointer to the HW structure 4147 * @offset: The offset (in bytes) of the word(s) to write. 4148 * @words: Size of data to write in words 4149 * @data: Pointer to the word(s) to write at offset. 4150 * 4151 * Writes a byte or word to the NVM using the flash access registers. 4152 **/ 4153 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 4154 u16 *data) 4155 { 4156 struct e1000_nvm_info *nvm = &hw->nvm; 4157 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4158 u16 i; 4159 4160 DEBUGFUNC("e1000_write_nvm_ich8lan"); 4161 4162 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 4163 (words == 0)) { 4164 DEBUGOUT("nvm parameter(s) out of bounds\n"); 4165 return -E1000_ERR_NVM; 4166 } 4167 4168 nvm->ops.acquire(hw); 4169 4170 for (i = 0; i < words; i++) { 4171 dev_spec->shadow_ram[offset+i].modified = TRUE; 4172 dev_spec->shadow_ram[offset+i].value = data[i]; 4173 } 4174 4175 nvm->ops.release(hw); 4176 4177 return E1000_SUCCESS; 4178 } 4179 4180 /** 4181 * e1000_update_nvm_checksum_spt - Update the checksum for NVM 4182 * @hw: pointer to the HW structure 4183 * 4184 * The NVM checksum is updated by calling the generic update_nvm_checksum, 4185 * which writes the checksum to the shadow ram. The changes in the shadow 4186 * ram are then committed to the EEPROM by processing each bank at a time 4187 * checking for the modified bit and writing only the pending changes. 4188 * After a successful commit, the shadow ram is cleared and is ready for 4189 * future writes. 4190 **/ 4191 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) 4192 { 4193 struct e1000_nvm_info *nvm = &hw->nvm; 4194 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4195 u32 i, act_offset, new_bank_offset, old_bank_offset, bank; 4196 s32 ret_val; 4197 u32 dword = 0; 4198 4199 DEBUGFUNC("e1000_update_nvm_checksum_spt"); 4200 4201 ret_val = e1000_update_nvm_checksum_generic(hw); 4202 if (ret_val) 4203 goto out; 4204 4205 if (nvm->type != e1000_nvm_flash_sw) 4206 goto out; 4207 4208 nvm->ops.acquire(hw); 4209 4210 /* We're writing to the opposite bank so if we're on bank 1, 4211 * write to bank 0 etc. We also need to erase the segment that 4212 * is going to be written 4213 */ 4214 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 4215 if (ret_val != E1000_SUCCESS) { 4216 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 4217 bank = 0; 4218 } 4219 4220 if (bank == 0) { 4221 new_bank_offset = nvm->flash_bank_size; 4222 old_bank_offset = 0; 4223 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 4224 if (ret_val) 4225 goto release; 4226 } else { 4227 old_bank_offset = nvm->flash_bank_size; 4228 new_bank_offset = 0; 4229 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 4230 if (ret_val) 4231 goto release; 4232 } 4233 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) { 4234 /* Determine whether to write the value stored 4235 * in the other NVM bank or a modified value stored 4236 * in the shadow RAM 4237 */ 4238 ret_val = e1000_read_flash_dword_ich8lan(hw, 4239 i + old_bank_offset, 4240 &dword); 4241 4242 if (dev_spec->shadow_ram[i].modified) { 4243 dword &= 0xffff0000; 4244 dword |= (dev_spec->shadow_ram[i].value & 0xffff); 4245 } 4246 if (dev_spec->shadow_ram[i + 1].modified) { 4247 dword &= 0x0000ffff; 4248 dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) 4249 << 16); 4250 } 4251 if (ret_val) 4252 break; 4253 4254 /* If the word is 0x13, then make sure the signature bits 4255 * (15:14) are 11b until the commit has completed. 4256 * This will allow us to write 10b which indicates the 4257 * signature is valid. We want to do this after the write 4258 * has completed so that we don't mark the segment valid 4259 * while the write is still in progress 4260 */ 4261 if (i == E1000_ICH_NVM_SIG_WORD - 1) 4262 dword |= E1000_ICH_NVM_SIG_MASK << 16; 4263 4264 /* Convert offset to bytes. */ 4265 act_offset = (i + new_bank_offset) << 1; 4266 4267 usec_delay(100); 4268 4269 /* Write the data to the new bank. Offset in words*/ 4270 act_offset = i + new_bank_offset; 4271 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, 4272 dword); 4273 if (ret_val) 4274 break; 4275 } 4276 4277 /* Don't bother writing the segment valid bits if sector 4278 * programming failed. 4279 */ 4280 if (ret_val) { 4281 DEBUGOUT("Flash commit failed.\n"); 4282 goto release; 4283 } 4284 4285 /* Finally validate the new segment by setting bit 15:14 4286 * to 10b in word 0x13 , this can be done without an 4287 * erase as well since these bits are 11 to start with 4288 * and we need to change bit 14 to 0b 4289 */ 4290 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 4291 4292 /*offset in words but we read dword*/ 4293 --act_offset; 4294 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); 4295 4296 if (ret_val) 4297 goto release; 4298 4299 dword &= 0xBFFFFFFF; 4300 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); 4301 4302 if (ret_val) 4303 goto release; 4304 4305 /* offset in words but we read dword*/ 4306 act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; 4307 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); 4308 4309 if (ret_val) 4310 goto release; 4311 4312 dword &= 0x00FFFFFF; 4313 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); 4314 4315 if (ret_val) 4316 goto release; 4317 4318 /* Great! Everything worked, we can now clear the cached entries. */ 4319 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 4320 dev_spec->shadow_ram[i].modified = FALSE; 4321 dev_spec->shadow_ram[i].value = 0xFFFF; 4322 } 4323 4324 release: 4325 nvm->ops.release(hw); 4326 4327 /* Reload the EEPROM, or else modifications will not appear 4328 * until after the next adapter reset. 4329 */ 4330 if (!ret_val) { 4331 nvm->ops.reload(hw); 4332 msec_delay(10); 4333 } 4334 4335 out: 4336 if (ret_val) 4337 DEBUGOUT1("NVM update error: %d\n", ret_val); 4338 4339 return ret_val; 4340 } 4341 4342 /** 4343 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM 4344 * @hw: pointer to the HW structure 4345 * 4346 * The NVM checksum is updated by calling the generic update_nvm_checksum, 4347 * which writes the checksum to the shadow ram. The changes in the shadow 4348 * ram are then committed to the EEPROM by processing each bank at a time 4349 * checking for the modified bit and writing only the pending changes. 4350 * After a successful commit, the shadow ram is cleared and is ready for 4351 * future writes. 4352 **/ 4353 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) 4354 { 4355 struct e1000_nvm_info *nvm = &hw->nvm; 4356 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4357 u32 i, act_offset, new_bank_offset, old_bank_offset, bank; 4358 s32 ret_val; 4359 u16 data = 0; 4360 4361 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan"); 4362 4363 ret_val = e1000_update_nvm_checksum_generic(hw); 4364 if (ret_val) 4365 goto out; 4366 4367 if (nvm->type != e1000_nvm_flash_sw) 4368 goto out; 4369 4370 nvm->ops.acquire(hw); 4371 4372 /* We're writing to the opposite bank so if we're on bank 1, 4373 * write to bank 0 etc. We also need to erase the segment that 4374 * is going to be written 4375 */ 4376 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 4377 if (ret_val != E1000_SUCCESS) { 4378 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 4379 bank = 0; 4380 } 4381 4382 if (bank == 0) { 4383 new_bank_offset = nvm->flash_bank_size; 4384 old_bank_offset = 0; 4385 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 4386 if (ret_val) 4387 goto release; 4388 } else { 4389 old_bank_offset = nvm->flash_bank_size; 4390 new_bank_offset = 0; 4391 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 4392 if (ret_val) 4393 goto release; 4394 } 4395 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 4396 if (dev_spec->shadow_ram[i].modified) { 4397 data = dev_spec->shadow_ram[i].value; 4398 } else { 4399 ret_val = e1000_read_flash_word_ich8lan(hw, i + 4400 old_bank_offset, 4401 &data); 4402 if (ret_val) 4403 break; 4404 } 4405 /* If the word is 0x13, then make sure the signature bits 4406 * (15:14) are 11b until the commit has completed. 4407 * This will allow us to write 10b which indicates the 4408 * signature is valid. We want to do this after the write 4409 * has completed so that we don't mark the segment valid 4410 * while the write is still in progress 4411 */ 4412 if (i == E1000_ICH_NVM_SIG_WORD) 4413 data |= E1000_ICH_NVM_SIG_MASK; 4414 4415 /* Convert offset to bytes. */ 4416 act_offset = (i + new_bank_offset) << 1; 4417 4418 usec_delay(100); 4419 4420 /* Write the bytes to the new bank. */ 4421 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 4422 act_offset, 4423 (u8)data); 4424 if (ret_val) 4425 break; 4426 4427 usec_delay(100); 4428 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 4429 act_offset + 1, 4430 (u8)(data >> 8)); 4431 if (ret_val) 4432 break; 4433 } 4434 4435 /* Don't bother writing the segment valid bits if sector 4436 * programming failed. 4437 */ 4438 if (ret_val) { 4439 DEBUGOUT("Flash commit failed.\n"); 4440 goto release; 4441 } 4442 4443 /* Finally validate the new segment by setting bit 15:14 4444 * to 10b in word 0x13 , this can be done without an 4445 * erase as well since these bits are 11 to start with 4446 * and we need to change bit 14 to 0b 4447 */ 4448 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 4449 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 4450 if (ret_val) 4451 goto release; 4452 4453 data &= 0xBFFF; 4454 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1, 4455 (u8)(data >> 8)); 4456 if (ret_val) 4457 goto release; 4458 4459 /* And invalidate the previously valid segment by setting 4460 * its signature word (0x13) high_byte to 0b. This can be 4461 * done without an erase because flash erase sets all bits 4462 * to 1's. We can write 1's to 0's without an erase 4463 */ 4464 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 4465 4466 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 4467 4468 if (ret_val) 4469 goto release; 4470 4471 /* Great! Everything worked, we can now clear the cached entries. */ 4472 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 4473 dev_spec->shadow_ram[i].modified = FALSE; 4474 dev_spec->shadow_ram[i].value = 0xFFFF; 4475 } 4476 4477 release: 4478 nvm->ops.release(hw); 4479 4480 /* Reload the EEPROM, or else modifications will not appear 4481 * until after the next adapter reset. 4482 */ 4483 if (!ret_val) { 4484 nvm->ops.reload(hw); 4485 msec_delay(10); 4486 } 4487 4488 out: 4489 if (ret_val) 4490 DEBUGOUT1("NVM update error: %d\n", ret_val); 4491 4492 return ret_val; 4493 } 4494 4495 /** 4496 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum 4497 * @hw: pointer to the HW structure 4498 * 4499 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. 4500 * If the bit is 0, that the EEPROM had been modified, but the checksum was not 4501 * calculated, in which case we need to calculate the checksum and set bit 6. 4502 **/ 4503 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) 4504 { 4505 s32 ret_val; 4506 u16 data; 4507 u16 word; 4508 u16 valid_csum_mask; 4509 4510 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan"); 4511 4512 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, 4513 * the checksum needs to be fixed. This bit is an indication that 4514 * the NVM was prepared by OEM software and did not calculate 4515 * the checksum...a likely scenario. 4516 */ 4517 switch (hw->mac.type) { 4518 case e1000_pch_lpt: 4519 case e1000_pch_spt: 4520 case e1000_pch_cnp: 4521 case e1000_pch_tgp: 4522 case e1000_pch_adp: 4523 case e1000_pch_mtp: 4524 word = NVM_COMPAT; 4525 valid_csum_mask = NVM_COMPAT_VALID_CSUM; 4526 break; 4527 default: 4528 word = NVM_FUTURE_INIT_WORD1; 4529 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; 4530 break; 4531 } 4532 4533 ret_val = hw->nvm.ops.read(hw, word, 1, &data); 4534 if (ret_val) 4535 return ret_val; 4536 4537 if (!(data & valid_csum_mask)) { 4538 data |= valid_csum_mask; 4539 ret_val = hw->nvm.ops.write(hw, word, 1, &data); 4540 if (ret_val) 4541 return ret_val; 4542 ret_val = hw->nvm.ops.update(hw); 4543 if (ret_val) 4544 return ret_val; 4545 } 4546 4547 return e1000_validate_nvm_checksum_generic(hw); 4548 } 4549 4550 /** 4551 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM 4552 * @hw: pointer to the HW structure 4553 * @offset: The offset (in bytes) of the byte/word to read. 4554 * @size: Size of data to read, 1=byte 2=word 4555 * @data: The byte(s) to write to the NVM. 4556 * 4557 * Writes one/two bytes to the NVM using the flash access registers. 4558 **/ 4559 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 4560 u8 size, u16 data) 4561 { 4562 union ich8_hws_flash_status hsfsts; 4563 union ich8_hws_flash_ctrl hsflctl; 4564 u32 flash_linear_addr; 4565 u32 flash_data = 0; 4566 s32 ret_val; 4567 u8 count = 0; 4568 4569 DEBUGFUNC("e1000_write_ich8_data"); 4570 4571 if (hw->mac.type >= e1000_pch_spt) { 4572 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 4573 return -E1000_ERR_NVM; 4574 } else { 4575 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 4576 return -E1000_ERR_NVM; 4577 } 4578 4579 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 4580 hw->nvm.flash_base_addr); 4581 4582 do { 4583 usec_delay(1); 4584 /* Steps */ 4585 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4586 if (ret_val != E1000_SUCCESS) 4587 break; 4588 /* In SPT, This register is in Lan memory space, not 4589 * flash. Therefore, only 32 bit access is supported 4590 */ 4591 if (hw->mac.type >= e1000_pch_spt) 4592 hsflctl.regval = 4593 E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; 4594 else 4595 hsflctl.regval = 4596 E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 4597 4598 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 4599 hsflctl.hsf_ctrl.fldbcount = size - 1; 4600 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 4601 /* In SPT, This register is in Lan memory space, 4602 * not flash. Therefore, only 32 bit access is 4603 * supported 4604 */ 4605 if (hw->mac.type >= e1000_pch_spt) 4606 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 4607 hsflctl.regval << 16); 4608 else 4609 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, 4610 hsflctl.regval); 4611 4612 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 4613 4614 if (size == 1) 4615 flash_data = (u32)data & 0x00FF; 4616 else 4617 flash_data = (u32)data; 4618 4619 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); 4620 4621 /* check if FCERR is set to 1 , if set to 1, clear it 4622 * and try the whole sequence a few more times else done 4623 */ 4624 ret_val = 4625 e1000_flash_cycle_ich8lan(hw, 4626 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 4627 if (ret_val == E1000_SUCCESS) 4628 break; 4629 4630 /* If we're here, then things are most likely 4631 * completely hosed, but if the error condition 4632 * is detected, it won't hurt to give it another 4633 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 4634 */ 4635 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 4636 if (hsfsts.hsf_status.flcerr) 4637 /* Repeat for some time before giving up. */ 4638 continue; 4639 if (!hsfsts.hsf_status.flcdone) { 4640 DEBUGOUT("Timeout error - flash cycle did not complete.\n"); 4641 break; 4642 } 4643 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 4644 4645 return ret_val; 4646 } 4647 4648 /** 4649 * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM 4650 * @hw: pointer to the HW structure 4651 * @offset: The offset (in bytes) of the dwords to read. 4652 * @data: The 4 bytes to write to the NVM. 4653 * 4654 * Writes one/two/four bytes to the NVM using the flash access registers. 4655 **/ 4656 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, 4657 u32 data) 4658 { 4659 union ich8_hws_flash_status hsfsts; 4660 union ich8_hws_flash_ctrl hsflctl; 4661 u32 flash_linear_addr; 4662 s32 ret_val; 4663 u8 count = 0; 4664 4665 DEBUGFUNC("e1000_write_flash_data32_ich8lan"); 4666 4667 if (hw->mac.type >= e1000_pch_spt) { 4668 if (offset > ICH_FLASH_LINEAR_ADDR_MASK) 4669 return -E1000_ERR_NVM; 4670 } 4671 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 4672 hw->nvm.flash_base_addr); 4673 do { 4674 usec_delay(1); 4675 /* Steps */ 4676 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4677 if (ret_val != E1000_SUCCESS) 4678 break; 4679 4680 /* In SPT, This register is in Lan memory space, not 4681 * flash. Therefore, only 32 bit access is supported 4682 */ 4683 if (hw->mac.type >= e1000_pch_spt) 4684 hsflctl.regval = E1000_READ_FLASH_REG(hw, 4685 ICH_FLASH_HSFSTS) 4686 >> 16; 4687 else 4688 hsflctl.regval = E1000_READ_FLASH_REG16(hw, 4689 ICH_FLASH_HSFCTL); 4690 4691 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; 4692 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 4693 4694 /* In SPT, This register is in Lan memory space, 4695 * not flash. Therefore, only 32 bit access is 4696 * supported 4697 */ 4698 if (hw->mac.type >= e1000_pch_spt) 4699 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 4700 hsflctl.regval << 16); 4701 else 4702 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, 4703 hsflctl.regval); 4704 4705 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 4706 4707 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data); 4708 4709 /* check if FCERR is set to 1 , if set to 1, clear it 4710 * and try the whole sequence a few more times else done 4711 */ 4712 ret_val = e1000_flash_cycle_ich8lan(hw, 4713 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 4714 4715 if (ret_val == E1000_SUCCESS) 4716 break; 4717 4718 /* If we're here, then things are most likely 4719 * completely hosed, but if the error condition 4720 * is detected, it won't hurt to give it another 4721 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 4722 */ 4723 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 4724 4725 if (hsfsts.hsf_status.flcerr) 4726 /* Repeat for some time before giving up. */ 4727 continue; 4728 if (!hsfsts.hsf_status.flcdone) { 4729 DEBUGOUT("Timeout error - flash cycle did not complete.\n"); 4730 break; 4731 } 4732 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 4733 4734 return ret_val; 4735 } 4736 4737 /** 4738 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM 4739 * @hw: pointer to the HW structure 4740 * @offset: The index of the byte to read. 4741 * @data: The byte to write to the NVM. 4742 * 4743 * Writes a single byte to the NVM using the flash access registers. 4744 **/ 4745 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 4746 u8 data) 4747 { 4748 u16 word = (u16)data; 4749 4750 DEBUGFUNC("e1000_write_flash_byte_ich8lan"); 4751 4752 return e1000_write_flash_data_ich8lan(hw, offset, 1, word); 4753 } 4754 4755 /** 4756 * e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM 4757 * @hw: pointer to the HW structure 4758 * @offset: The offset of the word to write. 4759 * @dword: The dword to write to the NVM. 4760 * 4761 * Writes a single dword to the NVM using the flash access registers. 4762 * Goes through a retry algorithm before giving up. 4763 **/ 4764 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, 4765 u32 offset, u32 dword) 4766 { 4767 s32 ret_val; 4768 u16 program_retries; 4769 4770 DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan"); 4771 4772 /* Must convert word offset into bytes. */ 4773 offset <<= 1; 4774 4775 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); 4776 4777 if (!ret_val) 4778 return ret_val; 4779 for (program_retries = 0; program_retries < 100; program_retries++) { 4780 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset); 4781 usec_delay(100); 4782 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); 4783 if (ret_val == E1000_SUCCESS) 4784 break; 4785 } 4786 if (program_retries == 100) 4787 return -E1000_ERR_NVM; 4788 4789 return E1000_SUCCESS; 4790 } 4791 4792 /** 4793 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM 4794 * @hw: pointer to the HW structure 4795 * @offset: The offset of the byte to write. 4796 * @byte: The byte to write to the NVM. 4797 * 4798 * Writes a single byte to the NVM using the flash access registers. 4799 * Goes through a retry algorithm before giving up. 4800 **/ 4801 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 4802 u32 offset, u8 byte) 4803 { 4804 s32 ret_val; 4805 u16 program_retries; 4806 4807 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan"); 4808 4809 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 4810 if (!ret_val) 4811 return ret_val; 4812 4813 for (program_retries = 0; program_retries < 100; program_retries++) { 4814 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset); 4815 usec_delay(100); 4816 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 4817 if (ret_val == E1000_SUCCESS) 4818 break; 4819 } 4820 if (program_retries == 100) 4821 return -E1000_ERR_NVM; 4822 4823 return E1000_SUCCESS; 4824 } 4825 4826 /** 4827 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM 4828 * @hw: pointer to the HW structure 4829 * @bank: 0 for first bank, 1 for second bank, etc. 4830 * 4831 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. 4832 * bank N is 4096 * N + flash_reg_addr. 4833 **/ 4834 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) 4835 { 4836 struct e1000_nvm_info *nvm = &hw->nvm; 4837 union ich8_hws_flash_status hsfsts; 4838 union ich8_hws_flash_ctrl hsflctl; 4839 u32 flash_linear_addr; 4840 /* bank size is in 16bit words - adjust to bytes */ 4841 u32 flash_bank_size = nvm->flash_bank_size * 2; 4842 s32 ret_val; 4843 s32 count = 0; 4844 s32 j, iteration, sector_size; 4845 4846 DEBUGFUNC("e1000_erase_flash_bank_ich8lan"); 4847 4848 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 4849 4850 /* Determine HW Sector size: Read BERASE bits of hw flash status 4851 * register 4852 * 00: The Hw sector is 256 bytes, hence we need to erase 16 4853 * consecutive sectors. The start index for the nth Hw sector 4854 * can be calculated as = bank * 4096 + n * 256 4855 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. 4856 * The start index for the nth Hw sector can be calculated 4857 * as = bank * 4096 4858 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 4859 * (ich9 only, otherwise error condition) 4860 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 4861 */ 4862 switch (hsfsts.hsf_status.berasesz) { 4863 case 0: 4864 /* Hw sector size 256 */ 4865 sector_size = ICH_FLASH_SEG_SIZE_256; 4866 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; 4867 break; 4868 case 1: 4869 sector_size = ICH_FLASH_SEG_SIZE_4K; 4870 iteration = 1; 4871 break; 4872 case 2: 4873 sector_size = ICH_FLASH_SEG_SIZE_8K; 4874 iteration = 1; 4875 break; 4876 case 3: 4877 sector_size = ICH_FLASH_SEG_SIZE_64K; 4878 iteration = 1; 4879 break; 4880 default: 4881 return -E1000_ERR_NVM; 4882 } 4883 4884 /* Start with the base address, then add the sector offset. */ 4885 flash_linear_addr = hw->nvm.flash_base_addr; 4886 flash_linear_addr += (bank) ? flash_bank_size : 0; 4887 4888 for (j = 0; j < iteration; j++) { 4889 do { 4890 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; 4891 4892 /* Steps */ 4893 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4894 if (ret_val) 4895 return ret_val; 4896 4897 /* Write a value 11 (block Erase) in Flash 4898 * Cycle field in hw flash control 4899 */ 4900 if (hw->mac.type >= e1000_pch_spt) 4901 hsflctl.regval = 4902 E1000_READ_FLASH_REG(hw, 4903 ICH_FLASH_HSFSTS)>>16; 4904 else 4905 hsflctl.regval = 4906 E1000_READ_FLASH_REG16(hw, 4907 ICH_FLASH_HSFCTL); 4908 4909 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 4910 if (hw->mac.type >= e1000_pch_spt) 4911 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 4912 hsflctl.regval << 16); 4913 else 4914 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, 4915 hsflctl.regval); 4916 4917 /* Write the last 24 bits of an index within the 4918 * block into Flash Linear address field in Flash 4919 * Address. 4920 */ 4921 flash_linear_addr += (j * sector_size); 4922 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, 4923 flash_linear_addr); 4924 4925 ret_val = e1000_flash_cycle_ich8lan(hw, timeout); 4926 if (ret_val == E1000_SUCCESS) 4927 break; 4928 4929 /* Check if FCERR is set to 1. If 1, 4930 * clear it and try the whole sequence 4931 * a few more times else Done 4932 */ 4933 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 4934 ICH_FLASH_HSFSTS); 4935 if (hsfsts.hsf_status.flcerr) 4936 /* repeat for some time before giving up */ 4937 continue; 4938 else if (!hsfsts.hsf_status.flcdone) 4939 return ret_val; 4940 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); 4941 } 4942 4943 return E1000_SUCCESS; 4944 } 4945 4946 /** 4947 * e1000_valid_led_default_ich8lan - Set the default LED settings 4948 * @hw: pointer to the HW structure 4949 * @data: Pointer to the LED settings 4950 * 4951 * Reads the LED default settings from the NVM to data. If the NVM LED 4952 * settings is all 0's or F's, set the LED default to a valid LED default 4953 * setting. 4954 **/ 4955 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) 4956 { 4957 s32 ret_val; 4958 4959 DEBUGFUNC("e1000_valid_led_default_ich8lan"); 4960 4961 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 4962 if (ret_val) { 4963 DEBUGOUT("NVM Read Error\n"); 4964 return ret_val; 4965 } 4966 4967 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) 4968 *data = ID_LED_DEFAULT_ICH8LAN; 4969 4970 return E1000_SUCCESS; 4971 } 4972 4973 /** 4974 * e1000_id_led_init_pchlan - store LED configurations 4975 * @hw: pointer to the HW structure 4976 * 4977 * PCH does not control LEDs via the LEDCTL register, rather it uses 4978 * the PHY LED configuration register. 4979 * 4980 * PCH also does not have an "always on" or "always off" mode which 4981 * complicates the ID feature. Instead of using the "on" mode to indicate 4982 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()), 4983 * use "link_up" mode. The LEDs will still ID on request if there is no 4984 * link based on logic in e1000_led_[on|off]_pchlan(). 4985 **/ 4986 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) 4987 { 4988 struct e1000_mac_info *mac = &hw->mac; 4989 s32 ret_val; 4990 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; 4991 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; 4992 u16 data, i, temp, shift; 4993 4994 DEBUGFUNC("e1000_id_led_init_pchlan"); 4995 4996 /* Get default ID LED modes */ 4997 ret_val = hw->nvm.ops.valid_led_default(hw, &data); 4998 if (ret_val) 4999 return ret_val; 5000 5001 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); 5002 mac->ledctl_mode1 = mac->ledctl_default; 5003 mac->ledctl_mode2 = mac->ledctl_default; 5004 5005 for (i = 0; i < 4; i++) { 5006 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; 5007 shift = (i * 5); 5008 switch (temp) { 5009 case ID_LED_ON1_DEF2: 5010 case ID_LED_ON1_ON2: 5011 case ID_LED_ON1_OFF2: 5012 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 5013 mac->ledctl_mode1 |= (ledctl_on << shift); 5014 break; 5015 case ID_LED_OFF1_DEF2: 5016 case ID_LED_OFF1_ON2: 5017 case ID_LED_OFF1_OFF2: 5018 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 5019 mac->ledctl_mode1 |= (ledctl_off << shift); 5020 break; 5021 default: 5022 /* Do nothing */ 5023 break; 5024 } 5025 switch (temp) { 5026 case ID_LED_DEF1_ON2: 5027 case ID_LED_ON1_ON2: 5028 case ID_LED_OFF1_ON2: 5029 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 5030 mac->ledctl_mode2 |= (ledctl_on << shift); 5031 break; 5032 case ID_LED_DEF1_OFF2: 5033 case ID_LED_ON1_OFF2: 5034 case ID_LED_OFF1_OFF2: 5035 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 5036 mac->ledctl_mode2 |= (ledctl_off << shift); 5037 break; 5038 default: 5039 /* Do nothing */ 5040 break; 5041 } 5042 } 5043 5044 return E1000_SUCCESS; 5045 } 5046 5047 /** 5048 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width 5049 * @hw: pointer to the HW structure 5050 * 5051 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability 5052 * register, so the bus width is hard coded. 5053 **/ 5054 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) 5055 { 5056 struct e1000_bus_info *bus = &hw->bus; 5057 s32 ret_val; 5058 5059 DEBUGFUNC("e1000_get_bus_info_ich8lan"); 5060 5061 ret_val = e1000_get_bus_info_pcie_generic(hw); 5062 5063 /* ICH devices are "PCI Express"-ish. They have 5064 * a configuration space, but do not contain 5065 * PCI Express Capability registers, so bus width 5066 * must be hardcoded. 5067 */ 5068 if (bus->width == e1000_bus_width_unknown) 5069 bus->width = e1000_bus_width_pcie_x1; 5070 5071 return ret_val; 5072 } 5073 5074 /** 5075 * e1000_reset_hw_ich8lan - Reset the hardware 5076 * @hw: pointer to the HW structure 5077 * 5078 * Does a full reset of the hardware which includes a reset of the PHY and 5079 * MAC. 5080 **/ 5081 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 5082 { 5083 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 5084 u16 kum_cfg; 5085 u32 ctrl, reg; 5086 s32 ret_val; 5087 u16 pci_cfg; 5088 5089 DEBUGFUNC("e1000_reset_hw_ich8lan"); 5090 5091 /* Prevent the PCI-E bus from sticking if there is no TLP connection 5092 * on the last TLP read/write transaction when MAC is reset. 5093 */ 5094 ret_val = e1000_disable_pcie_master_generic(hw); 5095 if (ret_val) 5096 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 5097 5098 DEBUGOUT("Masking off all interrupts\n"); 5099 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 5100 5101 /* Disable the Transmit and Receive units. Then delay to allow 5102 * any pending transactions to complete before we hit the MAC 5103 * with the global reset. 5104 */ 5105 E1000_WRITE_REG(hw, E1000_RCTL, 0); 5106 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); 5107 E1000_WRITE_FLUSH(hw); 5108 5109 msec_delay(10); 5110 5111 /* Workaround for ICH8 bit corruption issue in FIFO memory */ 5112 if (hw->mac.type == e1000_ich8lan) { 5113 /* Set Tx and Rx buffer allocation to 8k apiece. */ 5114 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K); 5115 /* Set Packet Buffer Size to 16k. */ 5116 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K); 5117 } 5118 5119 if (hw->mac.type == e1000_pchlan) { 5120 /* Save the NVM K1 bit setting*/ 5121 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); 5122 if (ret_val) 5123 return ret_val; 5124 5125 if (kum_cfg & E1000_NVM_K1_ENABLE) 5126 dev_spec->nvm_k1_enabled = TRUE; 5127 else 5128 dev_spec->nvm_k1_enabled = FALSE; 5129 } 5130 5131 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5132 5133 if (!hw->phy.ops.check_reset_block(hw)) { 5134 /* Full-chip reset requires MAC and PHY reset at the same 5135 * time to make sure the interface between MAC and the 5136 * external PHY is reset. 5137 */ 5138 ctrl |= E1000_CTRL_PHY_RST; 5139 5140 /* Gate automatic PHY configuration by hardware on 5141 * non-managed 82579 5142 */ 5143 if ((hw->mac.type == e1000_pch2lan) && 5144 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) 5145 e1000_gate_hw_phy_config_ich8lan(hw, TRUE); 5146 } 5147 ret_val = e1000_acquire_swflag_ich8lan(hw); 5148 5149 /* Read from EXTCNF_CTRL in e1000_acquire_swflag_ich8lan function 5150 * may occur during global reset and cause system hang. 5151 * Configuration space access creates the needed delay. 5152 * Write to E1000_STRAP RO register E1000_PCI_VENDOR_ID_REGISTER value 5153 * insures configuration space read is done before global reset. 5154 */ 5155 e1000_read_pci_cfg(hw, E1000_PCI_VENDOR_ID_REGISTER, &pci_cfg); 5156 E1000_WRITE_REG(hw, E1000_STRAP, pci_cfg); 5157 DEBUGOUT("Issuing a global reset to ich8lan\n"); 5158 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST)); 5159 /* cannot issue a flush here because it hangs the hardware */ 5160 msec_delay(20); 5161 5162 /* Configuration space access improve HW level time sync mechanism. 5163 * Write to E1000_STRAP RO register E1000_PCI_VENDOR_ID_REGISTER 5164 * value to insure configuration space read is done 5165 * before any access to mac register. 5166 */ 5167 e1000_read_pci_cfg(hw, E1000_PCI_VENDOR_ID_REGISTER, &pci_cfg); 5168 E1000_WRITE_REG(hw, E1000_STRAP, pci_cfg); 5169 5170 /* Set Phy Config Counter to 50msec */ 5171 if (hw->mac.type == e1000_pch2lan) { 5172 reg = E1000_READ_REG(hw, E1000_FEXTNVM3); 5173 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; 5174 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; 5175 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg); 5176 } 5177 5178 5179 if (ctrl & E1000_CTRL_PHY_RST) { 5180 ret_val = hw->phy.ops.get_cfg_done(hw); 5181 if (ret_val) 5182 return ret_val; 5183 5184 ret_val = e1000_post_phy_reset_ich8lan(hw); 5185 if (ret_val) 5186 return ret_val; 5187 } 5188 5189 /* For PCH, this write will make sure that any noise 5190 * will be detected as a CRC error and be dropped rather than show up 5191 * as a bad packet to the DMA engine. 5192 */ 5193 if (hw->mac.type == e1000_pchlan) 5194 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565); 5195 5196 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 5197 E1000_READ_REG(hw, E1000_ICR); 5198 5199 reg = E1000_READ_REG(hw, E1000_KABGTXD); 5200 reg |= E1000_KABGTXD_BGSQLBIAS; 5201 E1000_WRITE_REG(hw, E1000_KABGTXD, reg); 5202 5203 return E1000_SUCCESS; 5204 } 5205 5206 /** 5207 * e1000_init_hw_ich8lan - Initialize the hardware 5208 * @hw: pointer to the HW structure 5209 * 5210 * Prepares the hardware for transmit and receive by doing the following: 5211 * - initialize hardware bits 5212 * - initialize LED identification 5213 * - setup receive address registers 5214 * - setup flow control 5215 * - setup transmit descriptors 5216 * - clear statistics 5217 **/ 5218 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) 5219 { 5220 struct e1000_mac_info *mac = &hw->mac; 5221 u32 ctrl_ext, txdctl, snoop; 5222 s32 ret_val; 5223 u16 i; 5224 5225 DEBUGFUNC("e1000_init_hw_ich8lan"); 5226 5227 e1000_initialize_hw_bits_ich8lan(hw); 5228 5229 /* Initialize identification LED */ 5230 ret_val = mac->ops.id_led_init(hw); 5231 /* An error is not fatal and we should not stop init due to this */ 5232 if (ret_val) 5233 DEBUGOUT("Error initializing identification LED\n"); 5234 5235 /* Setup the receive address. */ 5236 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); 5237 5238 /* Zero out the Multicast HASH table */ 5239 DEBUGOUT("Zeroing the MTA\n"); 5240 for (i = 0; i < mac->mta_reg_count; i++) 5241 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 5242 5243 /* The 82578 Rx buffer will stall if wakeup is enabled in host and 5244 * the ME. Disable wakeup by clearing the host wakeup bit. 5245 * Reset the phy after disabling host wakeup to reset the Rx buffer. 5246 */ 5247 if (hw->phy.type == e1000_phy_82578) { 5248 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i); 5249 i &= ~BM_WUC_HOST_WU_BIT; 5250 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i); 5251 ret_val = e1000_phy_hw_reset_ich8lan(hw); 5252 if (ret_val) 5253 return ret_val; 5254 } 5255 5256 /* Setup link and flow control */ 5257 ret_val = mac->ops.setup_link(hw); 5258 5259 /* Set the transmit descriptor write-back policy for both queues */ 5260 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); 5261 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | 5262 E1000_TXDCTL_FULL_TX_DESC_WB); 5263 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | 5264 E1000_TXDCTL_MAX_TX_DESC_PREFETCH); 5265 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); 5266 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1)); 5267 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | 5268 E1000_TXDCTL_FULL_TX_DESC_WB); 5269 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | 5270 E1000_TXDCTL_MAX_TX_DESC_PREFETCH); 5271 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl); 5272 5273 /* ICH8 has opposite polarity of no_snoop bits. 5274 * By default, we should use snoop behavior. 5275 */ 5276 if (mac->type == e1000_ich8lan) 5277 snoop = PCIE_ICH8_SNOOP_ALL; 5278 else 5279 snoop = (u32) ~(PCIE_NO_SNOOP_ALL); 5280 e1000_set_pcie_no_snoop_generic(hw, snoop); 5281 5282 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 5283 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 5284 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 5285 5286 /* Clear all of the statistics registers (clear on read). It is 5287 * important that we do this after we have tried to establish link 5288 * because the symbol error count will increment wildly if there 5289 * is no link. 5290 */ 5291 e1000_clear_hw_cntrs_ich8lan(hw); 5292 5293 return ret_val; 5294 } 5295 5296 /** 5297 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits 5298 * @hw: pointer to the HW structure 5299 * 5300 * Sets/Clears required hardware bits necessary for correctly setting up the 5301 * hardware for transmit and receive. 5302 **/ 5303 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) 5304 { 5305 u32 reg; 5306 5307 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan"); 5308 5309 /* Extended Device Control */ 5310 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 5311 reg |= (1 << 22); 5312 /* Enable PHY low-power state when MAC is at D3 w/o WoL */ 5313 if (hw->mac.type >= e1000_pchlan) 5314 reg |= E1000_CTRL_EXT_PHYPDEN; 5315 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 5316 5317 /* Transmit Descriptor Control 0 */ 5318 reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); 5319 reg |= (1 << 22); 5320 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); 5321 5322 /* Transmit Descriptor Control 1 */ 5323 reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); 5324 reg |= (1 << 22); 5325 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); 5326 5327 /* Transmit Arbitration Control 0 */ 5328 reg = E1000_READ_REG(hw, E1000_TARC(0)); 5329 if (hw->mac.type == e1000_ich8lan) 5330 reg |= (1 << 28) | (1 << 29); 5331 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); 5332 E1000_WRITE_REG(hw, E1000_TARC(0), reg); 5333 5334 /* Transmit Arbitration Control 1 */ 5335 reg = E1000_READ_REG(hw, E1000_TARC(1)); 5336 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) 5337 reg &= ~(1 << 28); 5338 else 5339 reg |= (1 << 28); 5340 reg |= (1 << 24) | (1 << 26) | (1 << 30); 5341 E1000_WRITE_REG(hw, E1000_TARC(1), reg); 5342 5343 /* Device Status */ 5344 if (hw->mac.type == e1000_ich8lan) { 5345 reg = E1000_READ_REG(hw, E1000_STATUS); 5346 reg &= ~(1U << 31); 5347 E1000_WRITE_REG(hw, E1000_STATUS, reg); 5348 } 5349 5350 /* work-around descriptor data corruption issue during nfs v2 udp 5351 * traffic, just disable the nfs filtering capability 5352 */ 5353 reg = E1000_READ_REG(hw, E1000_RFCTL); 5354 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); 5355 5356 /* Disable IPv6 extension header parsing because some malformed 5357 * IPv6 headers can hang the Rx. 5358 */ 5359 if (hw->mac.type == e1000_ich8lan) 5360 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); 5361 E1000_WRITE_REG(hw, E1000_RFCTL, reg); 5362 5363 /* Enable ECC on Lynxpoint */ 5364 if (hw->mac.type >= e1000_pch_lpt) { 5365 reg = E1000_READ_REG(hw, E1000_PBECCSTS); 5366 reg |= E1000_PBECCSTS_ECC_ENABLE; 5367 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg); 5368 5369 reg = E1000_READ_REG(hw, E1000_CTRL); 5370 reg |= E1000_CTRL_MEHE; 5371 E1000_WRITE_REG(hw, E1000_CTRL, reg); 5372 } 5373 5374 return; 5375 } 5376 5377 /** 5378 * e1000_setup_link_ich8lan - Setup flow control and link settings 5379 * @hw: pointer to the HW structure 5380 * 5381 * Determines which flow control settings to use, then configures flow 5382 * control. Calls the appropriate media-specific link configuration 5383 * function. Assuming the adapter has a valid link partner, a valid link 5384 * should be established. Assumes the hardware has previously been reset 5385 * and the transmitter and receiver are not enabled. 5386 **/ 5387 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) 5388 { 5389 s32 ret_val; 5390 5391 DEBUGFUNC("e1000_setup_link_ich8lan"); 5392 5393 /* ICH parts do not have a word in the NVM to determine 5394 * the default flow control setting, so we explicitly 5395 * set it to full. 5396 */ 5397 if (hw->fc.requested_mode == e1000_fc_default) 5398 hw->fc.requested_mode = e1000_fc_full; 5399 5400 /* Save off the requested flow control mode for use later. Depending 5401 * on the link partner's capabilities, we may or may not use this mode. 5402 */ 5403 hw->fc.current_mode = hw->fc.requested_mode; 5404 5405 DEBUGOUT1("After fix-ups FlowControl is now = %x\n", 5406 hw->fc.current_mode); 5407 5408 if (!hw->phy.ops.check_reset_block(hw)) { 5409 /* Continue to configure the copper link. */ 5410 ret_val = hw->mac.ops.setup_physical_interface(hw); 5411 if (ret_val) 5412 return ret_val; 5413 } 5414 5415 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); 5416 if ((hw->phy.type == e1000_phy_82578) || 5417 (hw->phy.type == e1000_phy_82579) || 5418 (hw->phy.type == e1000_phy_i217) || 5419 (hw->phy.type == e1000_phy_82577)) { 5420 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time); 5421 5422 ret_val = hw->phy.ops.write_reg(hw, 5423 PHY_REG(BM_PORT_CTRL_PAGE, 27), 5424 hw->fc.pause_time); 5425 if (ret_val) 5426 return ret_val; 5427 } 5428 5429 return e1000_set_fc_watermarks_generic(hw); 5430 } 5431 5432 /** 5433 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface 5434 * @hw: pointer to the HW structure 5435 * 5436 * Configures the kumeran interface to the PHY to wait the appropriate time 5437 * when polling the PHY, then call the generic setup_copper_link to finish 5438 * configuring the copper link. 5439 **/ 5440 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) 5441 { 5442 u32 ctrl; 5443 s32 ret_val; 5444 u16 reg_data; 5445 5446 DEBUGFUNC("e1000_setup_copper_link_ich8lan"); 5447 5448 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5449 ctrl |= E1000_CTRL_SLU; 5450 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 5451 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5452 5453 /* Set the mac to wait the maximum time between each iteration 5454 * and increase the max iterations when polling the phy; 5455 * this fixes erroneous timeouts at 10Mbps. 5456 */ 5457 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 5458 0xFFFF); 5459 if (ret_val) 5460 return ret_val; 5461 ret_val = e1000_read_kmrn_reg_generic(hw, 5462 E1000_KMRNCTRLSTA_INBAND_PARAM, 5463 ®_data); 5464 if (ret_val) 5465 return ret_val; 5466 reg_data |= 0x3F; 5467 ret_val = e1000_write_kmrn_reg_generic(hw, 5468 E1000_KMRNCTRLSTA_INBAND_PARAM, 5469 reg_data); 5470 if (ret_val) 5471 return ret_val; 5472 5473 switch (hw->phy.type) { 5474 case e1000_phy_igp_3: 5475 ret_val = e1000_copper_link_setup_igp(hw); 5476 if (ret_val) 5477 return ret_val; 5478 break; 5479 case e1000_phy_bm: 5480 case e1000_phy_82578: 5481 ret_val = e1000_copper_link_setup_m88(hw); 5482 if (ret_val) 5483 return ret_val; 5484 break; 5485 case e1000_phy_82577: 5486 case e1000_phy_82579: 5487 ret_val = e1000_copper_link_setup_82577(hw); 5488 if (ret_val) 5489 return ret_val; 5490 break; 5491 case e1000_phy_ife: 5492 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, 5493 ®_data); 5494 if (ret_val) 5495 return ret_val; 5496 5497 reg_data &= ~IFE_PMC_AUTO_MDIX; 5498 5499 switch (hw->phy.mdix) { 5500 case 1: 5501 reg_data &= ~IFE_PMC_FORCE_MDIX; 5502 break; 5503 case 2: 5504 reg_data |= IFE_PMC_FORCE_MDIX; 5505 break; 5506 case 0: 5507 default: 5508 reg_data |= IFE_PMC_AUTO_MDIX; 5509 break; 5510 } 5511 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, 5512 reg_data); 5513 if (ret_val) 5514 return ret_val; 5515 break; 5516 default: 5517 break; 5518 } 5519 5520 return e1000_setup_copper_link_generic(hw); 5521 } 5522 5523 /** 5524 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface 5525 * @hw: pointer to the HW structure 5526 * 5527 * Calls the PHY specific link setup function and then calls the 5528 * generic setup_copper_link to finish configuring the link for 5529 * Lynxpoint PCH devices 5530 **/ 5531 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) 5532 { 5533 u32 ctrl; 5534 s32 ret_val; 5535 5536 DEBUGFUNC("e1000_setup_copper_link_pch_lpt"); 5537 5538 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5539 ctrl |= E1000_CTRL_SLU; 5540 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 5541 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5542 5543 ret_val = e1000_copper_link_setup_82577(hw); 5544 if (ret_val) 5545 return ret_val; 5546 5547 return e1000_setup_copper_link_generic(hw); 5548 } 5549 5550 /** 5551 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex 5552 * @hw: pointer to the HW structure 5553 * @speed: pointer to store current link speed 5554 * @duplex: pointer to store the current link duplex 5555 * 5556 * Calls the generic get_speed_and_duplex to retrieve the current link 5557 * information and then calls the Kumeran lock loss workaround for links at 5558 * gigabit speeds. 5559 **/ 5560 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, 5561 u16 *duplex) 5562 { 5563 s32 ret_val; 5564 5565 DEBUGFUNC("e1000_get_link_up_info_ich8lan"); 5566 5567 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); 5568 if (ret_val) 5569 return ret_val; 5570 5571 if ((hw->mac.type == e1000_ich8lan) && 5572 (hw->phy.type == e1000_phy_igp_3) && 5573 (*speed == SPEED_1000)) { 5574 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); 5575 } 5576 5577 return ret_val; 5578 } 5579 5580 /** 5581 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround 5582 * @hw: pointer to the HW structure 5583 * 5584 * Work-around for 82566 Kumeran PCS lock loss: 5585 * On link status change (i.e. PCI reset, speed change) and link is up and 5586 * speed is gigabit- 5587 * 0) if workaround is optionally disabled do nothing 5588 * 1) wait 1ms for Kumeran link to come up 5589 * 2) check Kumeran Diagnostic register PCS lock loss bit 5590 * 3) if not set the link is locked (all is good), otherwise... 5591 * 4) reset the PHY 5592 * 5) repeat up to 10 times 5593 * Note: this is only called for IGP3 copper when speed is 1gb. 5594 **/ 5595 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) 5596 { 5597 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 5598 u32 phy_ctrl; 5599 s32 ret_val; 5600 u16 i, data; 5601 bool link; 5602 5603 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan"); 5604 5605 if (!dev_spec->kmrn_lock_loss_workaround_enabled) 5606 return E1000_SUCCESS; 5607 5608 /* Make sure link is up before proceeding. If not just return. 5609 * Attempting this while link is negotiating fouled up link 5610 * stability 5611 */ 5612 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); 5613 if (!link) 5614 return E1000_SUCCESS; 5615 5616 for (i = 0; i < 10; i++) { 5617 /* read once to clear */ 5618 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); 5619 if (ret_val) 5620 return ret_val; 5621 /* and again to get new status */ 5622 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); 5623 if (ret_val) 5624 return ret_val; 5625 5626 /* check for PCS lock */ 5627 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) 5628 return E1000_SUCCESS; 5629 5630 /* Issue PHY reset */ 5631 hw->phy.ops.reset(hw); 5632 msec_delay_irq(5); 5633 } 5634 /* Disable GigE link negotiation */ 5635 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 5636 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | 5637 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 5638 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 5639 5640 /* Call gig speed drop workaround on Gig disable before accessing 5641 * any PHY registers 5642 */ 5643 e1000_gig_downshift_workaround_ich8lan(hw); 5644 5645 /* unable to acquire PCS lock */ 5646 return -E1000_ERR_PHY; 5647 } 5648 5649 /** 5650 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state 5651 * @hw: pointer to the HW structure 5652 * @state: boolean value used to set the current Kumeran workaround state 5653 * 5654 * If ICH8, set the current Kumeran workaround state (enabled - TRUE 5655 * /disabled - FALSE). 5656 **/ 5657 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 5658 bool state) 5659 { 5660 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 5661 5662 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan"); 5663 5664 if (hw->mac.type != e1000_ich8lan) { 5665 DEBUGOUT("Workaround applies to ICH8 only.\n"); 5666 return; 5667 } 5668 5669 dev_spec->kmrn_lock_loss_workaround_enabled = state; 5670 5671 return; 5672 } 5673 5674 /** 5675 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 5676 * @hw: pointer to the HW structure 5677 * 5678 * Workaround for 82566 power-down on D3 entry: 5679 * 1) disable gigabit link 5680 * 2) write VR power-down enable 5681 * 3) read it back 5682 * Continue if successful, else issue LCD reset and repeat 5683 **/ 5684 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) 5685 { 5686 u32 reg; 5687 u16 data; 5688 u8 retry = 0; 5689 5690 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan"); 5691 5692 if (hw->phy.type != e1000_phy_igp_3) 5693 return; 5694 5695 /* Try the workaround twice (if needed) */ 5696 do { 5697 /* Disable link */ 5698 reg = E1000_READ_REG(hw, E1000_PHY_CTRL); 5699 reg |= (E1000_PHY_CTRL_GBE_DISABLE | 5700 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 5701 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg); 5702 5703 /* Call gig speed drop workaround on Gig disable before 5704 * accessing any PHY registers 5705 */ 5706 if (hw->mac.type == e1000_ich8lan) 5707 e1000_gig_downshift_workaround_ich8lan(hw); 5708 5709 /* Write VR power-down enable */ 5710 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); 5711 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 5712 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL, 5713 data | IGP3_VR_CTRL_MODE_SHUTDOWN); 5714 5715 /* Read it back and test */ 5716 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); 5717 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 5718 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) 5719 break; 5720 5721 /* Issue PHY reset and repeat at most one more time */ 5722 reg = E1000_READ_REG(hw, E1000_CTRL); 5723 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST); 5724 retry++; 5725 } while (retry); 5726 } 5727 5728 /** 5729 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working 5730 * @hw: pointer to the HW structure 5731 * 5732 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), 5733 * LPLU, Gig disable, MDIC PHY reset): 5734 * 1) Set Kumeran Near-end loopback 5735 * 2) Clear Kumeran Near-end loopback 5736 * Should only be called for ICH8[m] devices with any 1G Phy. 5737 **/ 5738 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) 5739 { 5740 s32 ret_val; 5741 u16 reg_data = 0; 5742 5743 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan"); 5744 5745 if ((hw->mac.type != e1000_ich8lan) || 5746 (hw->phy.type == e1000_phy_ife)) 5747 return; 5748 5749 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 5750 ®_data); 5751 if (ret_val) 5752 return; 5753 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; 5754 ret_val = e1000_write_kmrn_reg_generic(hw, 5755 E1000_KMRNCTRLSTA_DIAG_OFFSET, 5756 reg_data); 5757 if (ret_val) 5758 return; 5759 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; 5760 e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 5761 reg_data); 5762 } 5763 5764 /** 5765 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx 5766 * @hw: pointer to the HW structure 5767 * 5768 * During S0 to Sx transition, it is possible the link remains at gig 5769 * instead of negotiating to a lower speed. Before going to Sx, set 5770 * 'Gig Disable' to force link speed negotiation to a lower speed based on 5771 * the LPLU setting in the NVM or custom setting. For PCH and newer parts, 5772 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also 5773 * needs to be written. 5774 * Parts that support (and are linked to a partner which support) EEE in 5775 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power 5776 * than 10Mbps w/o EEE. 5777 **/ 5778 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) 5779 { 5780 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 5781 u32 phy_ctrl; 5782 s32 ret_val; 5783 5784 DEBUGFUNC("e1000_suspend_workarounds_ich8lan"); 5785 5786 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 5787 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; 5788 5789 if (hw->phy.type == e1000_phy_i217) { 5790 u16 phy_reg, device_id = hw->device_id; 5791 5792 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 5793 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || 5794 (device_id == E1000_DEV_ID_PCH_I218_LM3) || 5795 (device_id == E1000_DEV_ID_PCH_I218_V3) || 5796 (hw->mac.type >= e1000_pch_spt)) { 5797 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); 5798 5799 E1000_WRITE_REG(hw, E1000_FEXTNVM6, 5800 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); 5801 } 5802 5803 ret_val = hw->phy.ops.acquire(hw); 5804 if (ret_val) 5805 goto out; 5806 5807 if (!dev_spec->eee_disable) { 5808 u16 eee_advert; 5809 5810 ret_val = 5811 e1000_read_emi_reg_locked(hw, 5812 I217_EEE_ADVERTISEMENT, 5813 &eee_advert); 5814 if (ret_val) 5815 goto release; 5816 5817 /* Disable LPLU if both link partners support 100BaseT 5818 * EEE and 100Full is advertised on both ends of the 5819 * link, and enable Auto Enable LPI since there will 5820 * be no driver to enable LPI while in Sx. 5821 */ 5822 if ((eee_advert & I82579_EEE_100_SUPPORTED) && 5823 (dev_spec->eee_lp_ability & 5824 I82579_EEE_100_SUPPORTED) && 5825 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) { 5826 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | 5827 E1000_PHY_CTRL_NOND0A_LPLU); 5828 5829 /* Set Auto Enable LPI after link up */ 5830 hw->phy.ops.read_reg_locked(hw, 5831 I217_LPI_GPIO_CTRL, 5832 &phy_reg); 5833 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; 5834 hw->phy.ops.write_reg_locked(hw, 5835 I217_LPI_GPIO_CTRL, 5836 phy_reg); 5837 } 5838 } 5839 5840 /* For i217 Intel Rapid Start Technology support, 5841 * when the system is going into Sx and no manageability engine 5842 * is present, the driver must configure proxy to reset only on 5843 * power good. LPI (Low Power Idle) state must also reset only 5844 * on power good, as well as the MTA (Multicast table array). 5845 * The SMBus release must also be disabled on LCD reset. 5846 */ 5847 if (!(E1000_READ_REG(hw, E1000_FWSM) & 5848 E1000_ICH_FWSM_FW_VALID)) { 5849 /* Enable proxy to reset only on power good. */ 5850 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL, 5851 &phy_reg); 5852 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; 5853 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 5854 phy_reg); 5855 5856 /* Set bit enable LPI (EEE) to reset only on 5857 * power good. 5858 */ 5859 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg); 5860 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; 5861 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg); 5862 5863 /* Disable the SMB release on LCD reset. */ 5864 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg); 5865 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; 5866 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); 5867 } 5868 5869 /* Enable MTA to reset for Intel Rapid Start Technology 5870 * Support 5871 */ 5872 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg); 5873 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; 5874 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); 5875 5876 release: 5877 hw->phy.ops.release(hw); 5878 } 5879 out: 5880 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 5881 5882 if (hw->mac.type == e1000_ich8lan) 5883 e1000_gig_downshift_workaround_ich8lan(hw); 5884 5885 if (hw->mac.type >= e1000_pchlan) { 5886 e1000_oem_bits_config_ich8lan(hw, FALSE); 5887 5888 /* Reset PHY to activate OEM bits on 82577/8 */ 5889 if (hw->mac.type == e1000_pchlan) 5890 e1000_phy_hw_reset_generic(hw); 5891 5892 ret_val = hw->phy.ops.acquire(hw); 5893 if (ret_val) 5894 return; 5895 e1000_write_smbus_addr(hw); 5896 hw->phy.ops.release(hw); 5897 } 5898 5899 return; 5900 } 5901 5902 /** 5903 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 5904 * @hw: pointer to the HW structure 5905 * 5906 * During Sx to S0 transitions on non-managed devices or managed devices 5907 * on which PHY resets are not blocked, if the PHY registers cannot be 5908 * accessed properly by the s/w toggle the LANPHYPC value to power cycle 5909 * the PHY. 5910 * On i217, setup Intel Rapid Start Technology. 5911 **/ 5912 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw) 5913 { 5914 s32 ret_val; 5915 5916 DEBUGFUNC("e1000_resume_workarounds_pchlan"); 5917 if (hw->mac.type < e1000_pch2lan) 5918 return E1000_SUCCESS; 5919 5920 ret_val = e1000_init_phy_workarounds_pchlan(hw); 5921 if (ret_val) { 5922 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val); 5923 return ret_val; 5924 } 5925 5926 /* For i217 Intel Rapid Start Technology support when the system 5927 * is transitioning from Sx and no manageability engine is present 5928 * configure SMBus to restore on reset, disable proxy, and enable 5929 * the reset on MTA (Multicast table array). 5930 */ 5931 if (hw->phy.type == e1000_phy_i217) { 5932 u16 phy_reg; 5933 5934 ret_val = hw->phy.ops.acquire(hw); 5935 if (ret_val) { 5936 DEBUGOUT("Failed to setup iRST\n"); 5937 return ret_val; 5938 } 5939 5940 /* Clear Auto Enable LPI after link up */ 5941 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); 5942 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; 5943 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); 5944 5945 if (!(E1000_READ_REG(hw, E1000_FWSM) & 5946 E1000_ICH_FWSM_FW_VALID)) { 5947 /* Restore clear on SMB if no manageability engine 5948 * is present 5949 */ 5950 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, 5951 &phy_reg); 5952 if (ret_val) 5953 goto release; 5954 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; 5955 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); 5956 5957 /* Disable Proxy */ 5958 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0); 5959 } 5960 /* Enable reset on MTA */ 5961 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG, 5962 &phy_reg); 5963 if (ret_val) 5964 goto release; 5965 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; 5966 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); 5967 release: 5968 if (ret_val) 5969 DEBUGOUT1("Error %d in resume workarounds\n", ret_val); 5970 hw->phy.ops.release(hw); 5971 return ret_val; 5972 } 5973 return E1000_SUCCESS; 5974 } 5975 5976 /** 5977 * e1000_cleanup_led_ich8lan - Restore the default LED operation 5978 * @hw: pointer to the HW structure 5979 * 5980 * Return the LED back to the default configuration. 5981 **/ 5982 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) 5983 { 5984 DEBUGFUNC("e1000_cleanup_led_ich8lan"); 5985 5986 if (hw->phy.type == e1000_phy_ife) 5987 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 5988 0); 5989 5990 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); 5991 return E1000_SUCCESS; 5992 } 5993 5994 /** 5995 * e1000_led_on_ich8lan - Turn LEDs on 5996 * @hw: pointer to the HW structure 5997 * 5998 * Turn on the LEDs. 5999 **/ 6000 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) 6001 { 6002 DEBUGFUNC("e1000_led_on_ich8lan"); 6003 6004 if (hw->phy.type == e1000_phy_ife) 6005 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 6006 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); 6007 6008 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); 6009 return E1000_SUCCESS; 6010 } 6011 6012 /** 6013 * e1000_led_off_ich8lan - Turn LEDs off 6014 * @hw: pointer to the HW structure 6015 * 6016 * Turn off the LEDs. 6017 **/ 6018 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) 6019 { 6020 DEBUGFUNC("e1000_led_off_ich8lan"); 6021 6022 if (hw->phy.type == e1000_phy_ife) 6023 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 6024 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); 6025 6026 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); 6027 return E1000_SUCCESS; 6028 } 6029 6030 /** 6031 * e1000_setup_led_pchlan - Configures SW controllable LED 6032 * @hw: pointer to the HW structure 6033 * 6034 * This prepares the SW controllable LED for use. 6035 **/ 6036 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 6037 { 6038 DEBUGFUNC("e1000_setup_led_pchlan"); 6039 6040 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 6041 (u16)hw->mac.ledctl_mode1); 6042 } 6043 6044 /** 6045 * e1000_cleanup_led_pchlan - Restore the default LED operation 6046 * @hw: pointer to the HW structure 6047 * 6048 * Return the LED back to the default configuration. 6049 **/ 6050 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 6051 { 6052 DEBUGFUNC("e1000_cleanup_led_pchlan"); 6053 6054 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 6055 (u16)hw->mac.ledctl_default); 6056 } 6057 6058 /** 6059 * e1000_led_on_pchlan - Turn LEDs on 6060 * @hw: pointer to the HW structure 6061 * 6062 * Turn on the LEDs. 6063 **/ 6064 static s32 e1000_led_on_pchlan(struct e1000_hw *hw) 6065 { 6066 u16 data = (u16)hw->mac.ledctl_mode2; 6067 u32 i, led; 6068 6069 DEBUGFUNC("e1000_led_on_pchlan"); 6070 6071 /* If no link, then turn LED on by setting the invert bit 6072 * for each LED that's mode is "link_up" in ledctl_mode2. 6073 */ 6074 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 6075 for (i = 0; i < 3; i++) { 6076 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 6077 if ((led & E1000_PHY_LED0_MODE_MASK) != 6078 E1000_LEDCTL_MODE_LINK_UP) 6079 continue; 6080 if (led & E1000_PHY_LED0_IVRT) 6081 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 6082 else 6083 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 6084 } 6085 } 6086 6087 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 6088 } 6089 6090 /** 6091 * e1000_led_off_pchlan - Turn LEDs off 6092 * @hw: pointer to the HW structure 6093 * 6094 * Turn off the LEDs. 6095 **/ 6096 static s32 e1000_led_off_pchlan(struct e1000_hw *hw) 6097 { 6098 u16 data = (u16)hw->mac.ledctl_mode1; 6099 u32 i, led; 6100 6101 DEBUGFUNC("e1000_led_off_pchlan"); 6102 6103 /* If no link, then turn LED off by clearing the invert bit 6104 * for each LED that's mode is "link_up" in ledctl_mode1. 6105 */ 6106 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 6107 for (i = 0; i < 3; i++) { 6108 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 6109 if ((led & E1000_PHY_LED0_MODE_MASK) != 6110 E1000_LEDCTL_MODE_LINK_UP) 6111 continue; 6112 if (led & E1000_PHY_LED0_IVRT) 6113 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 6114 else 6115 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 6116 } 6117 } 6118 6119 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 6120 } 6121 6122 /** 6123 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset 6124 * @hw: pointer to the HW structure 6125 * 6126 * Read appropriate register for the config done bit for completion status 6127 * and configure the PHY through s/w for EEPROM-less parts. 6128 * 6129 * NOTE: some silicon which is EEPROM-less will fail trying to read the 6130 * config done bit, so only an error is logged and continues. If we were 6131 * to return with error, EEPROM-less silicon would not be able to be reset 6132 * or change link. 6133 **/ 6134 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) 6135 { 6136 s32 ret_val = E1000_SUCCESS; 6137 u32 bank = 0; 6138 u32 status; 6139 6140 DEBUGFUNC("e1000_get_cfg_done_ich8lan"); 6141 6142 e1000_get_cfg_done_generic(hw); 6143 6144 /* Wait for indication from h/w that it has completed basic config */ 6145 if (hw->mac.type >= e1000_ich10lan) { 6146 e1000_lan_init_done_ich8lan(hw); 6147 } else { 6148 ret_val = e1000_get_auto_rd_done_generic(hw); 6149 if (ret_val) { 6150 /* When auto config read does not complete, do not 6151 * return with an error. This can happen in situations 6152 * where there is no eeprom and prevents getting link. 6153 */ 6154 DEBUGOUT("Auto Read Done did not complete\n"); 6155 ret_val = E1000_SUCCESS; 6156 } 6157 } 6158 6159 /* Clear PHY Reset Asserted bit */ 6160 status = E1000_READ_REG(hw, E1000_STATUS); 6161 if (status & E1000_STATUS_PHYRA) 6162 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA); 6163 else 6164 DEBUGOUT("PHY Reset Asserted not set - needs delay\n"); 6165 6166 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 6167 if (hw->mac.type <= e1000_ich9lan) { 6168 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && 6169 (hw->phy.type == e1000_phy_igp_3)) { 6170 e1000_phy_init_script_igp3(hw); 6171 } 6172 } else { 6173 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { 6174 /* Maybe we should do a basic PHY config */ 6175 DEBUGOUT("EEPROM not present\n"); 6176 ret_val = -E1000_ERR_CONFIG; 6177 } 6178 } 6179 6180 return ret_val; 6181 } 6182 6183 /** 6184 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down 6185 * @hw: pointer to the HW structure 6186 * 6187 * In the case of a PHY power down to save power, or to turn off link during a 6188 * driver unload, or wake on lan is not enabled, remove the link. 6189 **/ 6190 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) 6191 { 6192 /* If the management interface is not enabled, then power down */ 6193 if (!(hw->mac.ops.check_mng_mode(hw) || 6194 hw->phy.ops.check_reset_block(hw))) 6195 e1000_power_down_phy_copper(hw); 6196 6197 return; 6198 } 6199 6200 /** 6201 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters 6202 * @hw: pointer to the HW structure 6203 * 6204 * Clears hardware counters specific to the silicon family and calls 6205 * clear_hw_cntrs_generic to clear all general purpose counters. 6206 **/ 6207 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) 6208 { 6209 u16 phy_data; 6210 s32 ret_val; 6211 6212 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan"); 6213 6214 e1000_clear_hw_cntrs_base_generic(hw); 6215 6216 E1000_READ_REG(hw, E1000_ALGNERRC); 6217 E1000_READ_REG(hw, E1000_RXERRC); 6218 E1000_READ_REG(hw, E1000_TNCRS); 6219 E1000_READ_REG(hw, E1000_CEXTERR); 6220 E1000_READ_REG(hw, E1000_TSCTC); 6221 E1000_READ_REG(hw, E1000_TSCTFC); 6222 6223 E1000_READ_REG(hw, E1000_MGTPRC); 6224 E1000_READ_REG(hw, E1000_MGTPDC); 6225 E1000_READ_REG(hw, E1000_MGTPTC); 6226 6227 E1000_READ_REG(hw, E1000_IAC); 6228 E1000_READ_REG(hw, E1000_ICRXOC); 6229 6230 /* Clear PHY statistics registers */ 6231 if ((hw->phy.type == e1000_phy_82578) || 6232 (hw->phy.type == e1000_phy_82579) || 6233 (hw->phy.type == e1000_phy_i217) || 6234 (hw->phy.type == e1000_phy_82577)) { 6235 ret_val = hw->phy.ops.acquire(hw); 6236 if (ret_val) 6237 return; 6238 ret_val = hw->phy.ops.set_page(hw, 6239 HV_STATS_PAGE << IGP_PAGE_SHIFT); 6240 if (ret_val) 6241 goto release; 6242 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); 6243 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); 6244 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); 6245 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); 6246 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); 6247 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); 6248 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); 6249 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); 6250 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); 6251 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); 6252 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); 6253 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); 6254 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); 6255 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); 6256 release: 6257 hw->phy.ops.release(hw); 6258 } 6259 } 6260 6261 /** 6262 * e1000_configure_k0s_lpt - Configure K0s power state 6263 * @hw: pointer to the HW structure 6264 * @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3. 6265 * 0 corresponds to 128ns, each value over 0 doubles the duration. 6266 * @min_time: Minimum Tx idle period allowed - valid values are 0 to 4. 6267 * 0 corresponds to 128ns, each value over 0 doubles the duration. 6268 * 6269 * Configure the K1 power state based on the provided parameter. 6270 * Assumes semaphore already acquired. 6271 * 6272 * Success returns 0, Failure returns: 6273 * -E1000_ERR_PHY (-2) in case of access error 6274 * -E1000_ERR_PARAM (-4) in case of parameters error 6275 **/ 6276 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time) 6277 { 6278 s32 ret_val; 6279 u16 kmrn_reg = 0; 6280 6281 DEBUGFUNC("e1000_configure_k0s_lpt"); 6282 6283 if (entry_latency > 3 || min_time > 4) 6284 return -E1000_ERR_PARAM; 6285 6286 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL, 6287 &kmrn_reg); 6288 if (ret_val) 6289 return ret_val; 6290 6291 /* for now don't touch the latency */ 6292 kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK); 6293 kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT)); 6294 6295 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL, 6296 kmrn_reg); 6297 if (ret_val) 6298 return ret_val; 6299 6300 return E1000_SUCCESS; 6301 } 6302