xref: /illumos-gate/usr/src/uts/common/io/e1000api/e1000_ich8lan.c (revision 78801af7286cd73dbc996d470f789e75993cf15d)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 /* 82562G 10/100 Network Connection
36  * 82562G-2 10/100 Network Connection
37  * 82562GT 10/100 Network Connection
38  * 82562GT-2 10/100 Network Connection
39  * 82562V 10/100 Network Connection
40  * 82562V-2 10/100 Network Connection
41  * 82566DC-2 Gigabit Network Connection
42  * 82566DC Gigabit Network Connection
43  * 82566DM-2 Gigabit Network Connection
44  * 82566DM Gigabit Network Connection
45  * 82566MC Gigabit Network Connection
46  * 82566MM Gigabit Network Connection
47  * 82567LM Gigabit Network Connection
48  * 82567LF Gigabit Network Connection
49  * 82567V Gigabit Network Connection
50  * 82567LM-2 Gigabit Network Connection
51  * 82567LF-2 Gigabit Network Connection
52  * 82567V-2 Gigabit Network Connection
53  * 82567LF-3 Gigabit Network Connection
54  * 82567LM-3 Gigabit Network Connection
55  * 82567LM-4 Gigabit Network Connection
56  * 82577LM Gigabit Network Connection
57  * 82577LC Gigabit Network Connection
58  * 82578DM Gigabit Network Connection
59  * 82578DC Gigabit Network Connection
60  * 82579LM Gigabit Network Connection
61  * 82579V Gigabit Network Connection
62  * Ethernet Connection I217-LM
63  * Ethernet Connection I217-V
64  * Ethernet Connection I218-V
65  * Ethernet Connection I218-LM
66  * Ethernet Connection (2) I218-LM
67  * Ethernet Connection (2) I218-V
68  * Ethernet Connection (3) I218-LM
69  * Ethernet Connection (3) I218-V
70  */
71 
72 #include "e1000_api.h"
73 
74 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
84 					      u8 *mc_addr_list,
85 					      u32 mc_addr_count);
86 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
90 					    bool active);
91 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
92 					    bool active);
93 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 				   u16 words, u16 *data);
95 static s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
96 			       u16 *data);
97 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98 				    u16 words, u16 *data);
99 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
103 					    u16 *data);
104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112 					   u16 *speed, u16 *duplex);
113 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
115 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
116 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
118 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
120 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126 					  u32 offset, u8 *data);
127 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
128 					  u8 size, u16 *data);
129 static s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
130 					    u32 *data);
131 static s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
132 					   u32 offset, u32 *data);
133 static s32  e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
134 					     u32 offset, u32 data);
135 static s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
136 						  u32 offset, u32 dword);
137 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
138 					  u32 offset, u16 *data);
139 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
140 						 u32 offset, u8 byte);
141 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
142 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
143 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
144 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
145 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
146 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
147 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
148 
149 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
150 /* Offset 04h HSFSTS */
151 union ich8_hws_flash_status {
152 	struct ich8_hsfsts {
153 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
154 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
155 		u16 dael:1; /* bit 2 Direct Access error Log */
156 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
157 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
158 		u16 reserved1:2; /* bit 13:6 Reserved */
159 		u16 reserved2:6; /* bit 13:6 Reserved */
160 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
161 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
162 	} hsf_status;
163 	u16 regval;
164 };
165 
166 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
167 /* Offset 06h FLCTL */
168 union ich8_hws_flash_ctrl {
169 	struct ich8_hsflctl {
170 		u16 flcgo:1;   /* 0 Flash Cycle Go */
171 		u16 flcycle:2;   /* 2:1 Flash Cycle */
172 		u16 reserved:5;   /* 7:3 Reserved  */
173 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
174 		u16 flockdn:6;   /* 15:10 Reserved */
175 	} hsf_ctrl;
176 	u16 regval;
177 };
178 
179 /* ICH Flash Region Access Permissions */
180 union ich8_hws_flash_regacc {
181 	struct ich8_flracc {
182 		u32 grra:8; /* 0:7 GbE region Read Access */
183 		u32 grwa:8; /* 8:15 GbE region Write Access */
184 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
185 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
186 	} hsf_flregacc;
187 	u16 regval;
188 };
189 
190 /**
191  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
192  *  @hw: pointer to the HW structure
193  *
194  *  Test access to the PHY registers by reading the PHY ID registers.  If
195  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
196  *  otherwise assume the read PHY ID is correct if it is valid.
197  *
198  *  Assumes the sw/fw/hw semaphore is already acquired.
199  **/
200 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
201 {
202 	u16 phy_reg = 0;
203 	u32 phy_id = 0;
204 	s32 ret_val = 0;
205 	u16 retry_count;
206 	u32 mac_reg = 0;
207 
208 	for (retry_count = 0; retry_count < 2; retry_count++) {
209 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
210 		if (ret_val || (phy_reg == 0xFFFF))
211 			continue;
212 		phy_id = (u32)(phy_reg << 16);
213 
214 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
215 		if (ret_val || (phy_reg == 0xFFFF)) {
216 			phy_id = 0;
217 			continue;
218 		}
219 		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
220 		break;
221 	}
222 
223 	if (hw->phy.id) {
224 		if  (hw->phy.id == phy_id)
225 			goto out;
226 	} else if (phy_id) {
227 		hw->phy.id = phy_id;
228 		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
229 		goto out;
230 	}
231 
232 	/* In case the PHY needs to be in mdio slow mode,
233 	 * set slow mode and try to get the PHY id again.
234 	 */
235 	if (hw->mac.type < e1000_pch_lpt) {
236 		hw->phy.ops.release(hw);
237 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
238 		if (!ret_val)
239 			ret_val = e1000_get_phy_id(hw);
240 		hw->phy.ops.acquire(hw);
241 	}
242 
243 	if (ret_val)
244 		return FALSE;
245 out:
246 	if (hw->mac.type >= e1000_pch_lpt) {
247 		/* Only unforce SMBus if ME is not active */
248 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
249 		    E1000_ICH_FWSM_FW_VALID)) {
250 			/* Unforce SMBus mode in PHY */
251 			hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
252 			phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
253 			hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
254 
255 			/* Unforce SMBus mode in MAC */
256 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
257 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
258 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
259 		}
260 	}
261 
262 	return TRUE;
263 }
264 
265 /**
266  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
267  *  @hw: pointer to the HW structure
268  *
269  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
270  *  used to reset the PHY to a quiescent state when necessary.
271  **/
272 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
273 {
274 	u32 mac_reg;
275 
276 	DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
277 
278 	/* Set Phy Config Counter to 50msec */
279 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
280 	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
281 	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
282 	E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
283 
284 	/* Toggle LANPHYPC Value bit */
285 	mac_reg = E1000_READ_REG(hw, E1000_CTRL);
286 	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
287 	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
288 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
289 	E1000_WRITE_FLUSH(hw);
290 	msec_delay(1);
291 	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
292 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
293 	E1000_WRITE_FLUSH(hw);
294 
295 	if (hw->mac.type < e1000_pch_lpt) {
296 		msec_delay(50);
297 	} else {
298 		u16 count = 20;
299 
300 		do {
301 			msec_delay(5);
302 		} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
303 			   E1000_CTRL_EXT_LPCD) && count--);
304 
305 		msec_delay(30);
306 	}
307 }
308 
309 /**
310  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
311  *  @hw: pointer to the HW structure
312  *
313  *  Workarounds/flow necessary for PHY initialization during driver load
314  *  and resume paths.
315  **/
316 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
317 {
318 	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
319 	s32 ret_val;
320 
321 	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
322 
323 	/* Gate automatic PHY configuration by hardware on managed and
324 	 * non-managed 82579 and newer adapters.
325 	 */
326 	e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
327 
328 	/* It is not possible to be certain of the current state of ULP
329 	 * so forcibly disable it.
330 	 */
331 	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
332 	e1000_disable_ulp_lpt_lp(hw, TRUE);
333 
334 	ret_val = hw->phy.ops.acquire(hw);
335 	if (ret_val) {
336 		DEBUGOUT("Failed to initialize PHY flow\n");
337 		goto out;
338 	}
339 
340 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
341 	 * inaccessible and resetting the PHY is not blocked, toggle the
342 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
343 	 */
344 	switch (hw->mac.type) {
345 	case e1000_pch_lpt:
346 	case e1000_pch_spt:
347 	case e1000_pch_cnp:
348 	case e1000_pch_tgp:
349 		if (e1000_phy_is_accessible_pchlan(hw))
350 			break;
351 
352 		/* Before toggling LANPHYPC, see if PHY is accessible by
353 		 * forcing MAC to SMBus mode first.
354 		 */
355 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
356 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
357 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
358 
359 		/* Wait 50 milliseconds for MAC to finish any retries
360 		 * that it might be trying to perform from previous
361 		 * attempts to acknowledge any phy read requests.
362 		 */
363 		 msec_delay(50);
364 
365 		/* fall-through */
366 	case e1000_pch2lan:
367 		if (e1000_phy_is_accessible_pchlan(hw))
368 			break;
369 
370 		/* fall-through */
371 	case e1000_pchlan:
372 		if ((hw->mac.type == e1000_pchlan) &&
373 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
374 			break;
375 
376 		if (hw->phy.ops.check_reset_block(hw)) {
377 			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
378 			ret_val = -E1000_ERR_PHY;
379 			break;
380 		}
381 
382 		/* Toggle LANPHYPC Value bit */
383 		e1000_toggle_lanphypc_pch_lpt(hw);
384 		if (hw->mac.type >= e1000_pch_lpt) {
385 			if (e1000_phy_is_accessible_pchlan(hw))
386 				break;
387 
388 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
389 			 * so ensure that the MAC is also out of SMBus mode
390 			 */
391 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
392 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
393 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
394 
395 			if (e1000_phy_is_accessible_pchlan(hw))
396 				break;
397 
398 			ret_val = -E1000_ERR_PHY;
399 		}
400 		break;
401 	default:
402 		break;
403 	}
404 
405 	hw->phy.ops.release(hw);
406 	if (!ret_val) {
407 
408 		/* Check to see if able to reset PHY.  Print error if not */
409 		if (hw->phy.ops.check_reset_block(hw)) {
410 			ERROR_REPORT("Reset blocked by ME\n");
411 			goto out;
412 		}
413 
414 		/* Reset the PHY before any access to it.  Doing so, ensures
415 		 * that the PHY is in a known good state before we read/write
416 		 * PHY registers.  The generic reset is sufficient here,
417 		 * because we haven't determined the PHY type yet.
418 		 */
419 		ret_val = e1000_phy_hw_reset_generic(hw);
420 		if (ret_val)
421 			goto out;
422 
423 		/* On a successful reset, possibly need to wait for the PHY
424 		 * to quiesce to an accessible state before returning control
425 		 * to the calling function.  If the PHY does not quiesce, then
426 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
427 		 *  the PHY is in.
428 		 */
429 		ret_val = hw->phy.ops.check_reset_block(hw);
430 		if (ret_val)
431 			ERROR_REPORT("ME blocked access to PHY after reset\n");
432 	}
433 
434 out:
435 	/* Ungate automatic PHY configuration on non-managed 82579 */
436 	if ((hw->mac.type == e1000_pch2lan) &&
437 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
438 		msec_delay(10);
439 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
440 	}
441 
442 	return ret_val;
443 }
444 
445 /**
446  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
447  *  @hw: pointer to the HW structure
448  *
449  *  Initialize family-specific PHY parameters and function pointers.
450  **/
451 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
452 {
453 	struct e1000_phy_info *phy = &hw->phy;
454 	s32 ret_val;
455 
456 	DEBUGFUNC("e1000_init_phy_params_pchlan");
457 
458 	phy->addr		= 1;
459 	phy->reset_delay_us	= 100;
460 
461 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
462 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
463 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
464 	phy->ops.set_page	= e1000_set_page_igp;
465 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
466 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
467 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
468 	phy->ops.release	= e1000_release_swflag_ich8lan;
469 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
470 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
471 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
472 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
473 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
474 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
475 	phy->ops.power_up	= e1000_power_up_phy_copper;
476 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
477 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
478 
479 	phy->id = e1000_phy_unknown;
480 
481 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
482 	if (ret_val)
483 		return ret_val;
484 
485 	if (phy->id == e1000_phy_unknown)
486 		switch (hw->mac.type) {
487 		default:
488 			ret_val = e1000_get_phy_id(hw);
489 			if (ret_val)
490 				return ret_val;
491 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
492 				break;
493 			/* fall-through */
494 		case e1000_pch2lan:
495 		case e1000_pch_lpt:
496 		case e1000_pch_spt:
497 		case e1000_pch_cnp:
498 		case e1000_pch_tgp:
499 			/* In case the PHY needs to be in mdio slow mode,
500 			 * set slow mode and try to get the PHY id again.
501 			 */
502 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
503 			if (ret_val)
504 				return ret_val;
505 			ret_val = e1000_get_phy_id(hw);
506 			if (ret_val)
507 				return ret_val;
508 			break;
509 		}
510 	phy->type = e1000_get_phy_type_from_id(phy->id);
511 
512 	switch (phy->type) {
513 	case e1000_phy_82577:
514 	case e1000_phy_82579:
515 	case e1000_phy_i217:
516 		phy->ops.check_polarity = e1000_check_polarity_82577;
517 		phy->ops.force_speed_duplex =
518 			e1000_phy_force_speed_duplex_82577;
519 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
520 		phy->ops.get_info = e1000_get_phy_info_82577;
521 		phy->ops.commit = e1000_phy_sw_reset_generic;
522 		break;
523 	case e1000_phy_82578:
524 		phy->ops.check_polarity = e1000_check_polarity_m88;
525 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
526 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
527 		phy->ops.get_info = e1000_get_phy_info_m88;
528 		break;
529 	default:
530 		ret_val = -E1000_ERR_PHY;
531 		break;
532 	}
533 
534 	return ret_val;
535 }
536 
537 /**
538  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
539  *  @hw: pointer to the HW structure
540  *
541  *  Initialize family-specific PHY parameters and function pointers.
542  **/
543 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
544 {
545 	struct e1000_phy_info *phy = &hw->phy;
546 	s32 ret_val;
547 	u16 i = 0;
548 
549 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
550 
551 	phy->addr		= 1;
552 	phy->reset_delay_us	= 100;
553 
554 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
555 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
556 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
557 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
558 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
559 	phy->ops.release	= e1000_release_swflag_ich8lan;
560 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
561 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
562 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
563 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
564 	phy->ops.power_up	= e1000_power_up_phy_copper;
565 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
566 
567 	/* We may need to do this twice - once for IGP and if that fails,
568 	 * we'll set BM func pointers and try again
569 	 */
570 	ret_val = e1000_determine_phy_address(hw);
571 	if (ret_val) {
572 		phy->ops.write_reg = e1000_write_phy_reg_bm;
573 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
574 		ret_val = e1000_determine_phy_address(hw);
575 		if (ret_val) {
576 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
577 			return ret_val;
578 		}
579 	}
580 
581 	phy->id = 0;
582 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
583 	       (i++ < 100)) {
584 		msec_delay(1);
585 		ret_val = e1000_get_phy_id(hw);
586 		if (ret_val)
587 			return ret_val;
588 	}
589 
590 	/* Verify phy id */
591 	switch (phy->id) {
592 	case IGP03E1000_E_PHY_ID:
593 		phy->type = e1000_phy_igp_3;
594 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
595 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
596 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
597 		phy->ops.get_info = e1000_get_phy_info_igp;
598 		phy->ops.check_polarity = e1000_check_polarity_igp;
599 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
600 		break;
601 	case IFE_E_PHY_ID:
602 	case IFE_PLUS_E_PHY_ID:
603 	case IFE_C_E_PHY_ID:
604 		phy->type = e1000_phy_ife;
605 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
606 		phy->ops.get_info = e1000_get_phy_info_ife;
607 		phy->ops.check_polarity = e1000_check_polarity_ife;
608 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
609 		break;
610 	case BME1000_E_PHY_ID:
611 		phy->type = e1000_phy_bm;
612 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
613 		phy->ops.read_reg = e1000_read_phy_reg_bm;
614 		phy->ops.write_reg = e1000_write_phy_reg_bm;
615 		phy->ops.commit = e1000_phy_sw_reset_generic;
616 		phy->ops.get_info = e1000_get_phy_info_m88;
617 		phy->ops.check_polarity = e1000_check_polarity_m88;
618 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
619 		break;
620 	default:
621 		return -E1000_ERR_PHY;
622 		break;
623 	}
624 
625 	return E1000_SUCCESS;
626 }
627 
628 /**
629  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
630  *  @hw: pointer to the HW structure
631  *
632  *  Initialize family-specific NVM parameters and function
633  *  pointers.
634  **/
635 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
636 {
637 	struct e1000_nvm_info *nvm = &hw->nvm;
638 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
639 	u32 gfpreg, sector_base_addr, sector_end_addr;
640 	u16 i;
641 	u32 nvm_size;
642 
643 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
644 
645 	nvm->type = e1000_nvm_flash_sw;
646 
647 	if (hw->mac.type >= e1000_pch_spt) {
648 		/* in SPT, gfpreg doesn't exist. NVM size is taken from the
649 		 * STRAP register. This is because in SPT the GbE Flash region
650 		 * is no longer accessed through the flash registers. Instead,
651 		 * the mechanism has changed, and the Flash region access
652 		 * registers are now implemented in GbE memory space.
653 		 */
654 		nvm->flash_base_addr = 0;
655 		nvm_size =
656 		    (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
657 		    * NVM_SIZE_MULTIPLIER;
658 		nvm->flash_bank_size = nvm_size / 2;
659 		/* Adjust to word count */
660 		nvm->flash_bank_size /= sizeof(u16);
661 		/* Set the base address for flash register access */
662 		hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
663 	} else {
664 		/* Can't read flash registers if register set isn't mapped. */
665 		if (!hw->flash_address) {
666 			DEBUGOUT("ERROR: Flash registers not mapped\n");
667 			return -E1000_ERR_CONFIG;
668 		}
669 
670 		gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
671 
672 		/* sector_X_addr is a "sector"-aligned address (4096 bytes)
673 		 * Add 1 to sector_end_addr since this sector is included in
674 		 * the overall size.
675 		 */
676 		sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
677 		sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
678 
679 		/* flash_base_addr is byte-aligned */
680 		nvm->flash_base_addr = sector_base_addr
681 				       << FLASH_SECTOR_ADDR_SHIFT;
682 
683 		/* find total size of the NVM, then cut in half since the total
684 		 * size represents two separate NVM banks.
685 		 */
686 		nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
687 					<< FLASH_SECTOR_ADDR_SHIFT);
688 		nvm->flash_bank_size /= 2;
689 		/* Adjust to word count */
690 		nvm->flash_bank_size /= sizeof(u16);
691 	}
692 
693 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
694 
695 	/* Clear shadow ram */
696 	for (i = 0; i < nvm->word_size; i++) {
697 		dev_spec->shadow_ram[i].modified = FALSE;
698 		dev_spec->shadow_ram[i].value    = 0xFFFF;
699 	}
700 
701 	E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
702 	E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
703 
704 	/* Function Pointers */
705 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
706 	nvm->ops.release	= e1000_release_nvm_ich8lan;
707 	if (hw->mac.type >= e1000_pch_spt) {
708 		nvm->ops.read	= e1000_read_nvm_spt;
709 		nvm->ops.update	= e1000_update_nvm_checksum_spt;
710 	} else {
711 		nvm->ops.read	= e1000_read_nvm_ich8lan;
712 		nvm->ops.update	= e1000_update_nvm_checksum_ich8lan;
713 	}
714 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
715 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
716 	nvm->ops.write		= e1000_write_nvm_ich8lan;
717 
718 	return E1000_SUCCESS;
719 }
720 
721 /**
722  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
723  *  @hw: pointer to the HW structure
724  *
725  *  Initialize family-specific MAC parameters and function
726  *  pointers.
727  **/
728 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
729 {
730 	struct e1000_mac_info *mac = &hw->mac;
731 
732 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
733 
734 	/* Set media type function pointer */
735 	hw->phy.media_type = e1000_media_type_copper;
736 
737 	/* Set mta register count */
738 	mac->mta_reg_count = 32;
739 	/* Set rar entry count */
740 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
741 	if (mac->type == e1000_ich8lan)
742 		mac->rar_entry_count--;
743 	/* Set if part includes ASF firmware */
744 	mac->asf_firmware_present = TRUE;
745 	/* FWSM register */
746 	mac->has_fwsm = TRUE;
747 	/* ARC subsystem not supported */
748 	mac->arc_subsystem_valid = FALSE;
749 	/* Adaptive IFS supported */
750 	mac->adaptive_ifs = TRUE;
751 
752 	/* Function pointers */
753 
754 	/* bus type/speed/width */
755 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
756 	/* function id */
757 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
758 	/* reset */
759 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
760 	/* hw initialization */
761 	mac->ops.init_hw = e1000_init_hw_ich8lan;
762 	/* link setup */
763 	mac->ops.setup_link = e1000_setup_link_ich8lan;
764 	/* physical interface setup */
765 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
766 	/* check for link */
767 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
768 	/* link info */
769 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
770 	/* multicast address update */
771 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
772 	/* clear hardware counters */
773 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
774 
775 	/* LED and other operations */
776 	switch (mac->type) {
777 	case e1000_ich8lan:
778 	case e1000_ich9lan:
779 	case e1000_ich10lan:
780 		/* check management mode */
781 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
782 		/* ID LED init */
783 		mac->ops.id_led_init = e1000_id_led_init_generic;
784 		/* blink LED */
785 		mac->ops.blink_led = e1000_blink_led_generic;
786 		/* setup LED */
787 		mac->ops.setup_led = e1000_setup_led_generic;
788 		/* cleanup LED */
789 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
790 		/* turn on/off LED */
791 		mac->ops.led_on = e1000_led_on_ich8lan;
792 		mac->ops.led_off = e1000_led_off_ich8lan;
793 		break;
794 	case e1000_pch2lan:
795 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
796 		mac->ops.rar_set = e1000_rar_set_pch2lan;
797 		/* fall-through */
798 	case e1000_pch_lpt:
799 	case e1000_pch_spt:
800 	case e1000_pch_cnp:
801 	case e1000_pch_tgp:
802 		/* multicast address update for pch2 */
803 		mac->ops.update_mc_addr_list =
804 			e1000_update_mc_addr_list_pch2lan;
805 		/* fall-through */
806 	case e1000_pchlan:
807 		/* check management mode */
808 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
809 		/* ID LED init */
810 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
811 		/* setup LED */
812 		mac->ops.setup_led = e1000_setup_led_pchlan;
813 		/* cleanup LED */
814 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
815 		/* turn on/off LED */
816 		mac->ops.led_on = e1000_led_on_pchlan;
817 		mac->ops.led_off = e1000_led_off_pchlan;
818 		break;
819 	default:
820 		break;
821 	}
822 
823 	if (mac->type >= e1000_pch_lpt) {
824 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
825 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
826 		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
827 		mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
828 	}
829 
830 	/* Enable PCS Lock-loss workaround for ICH8 */
831 	if (mac->type == e1000_ich8lan)
832 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
833 
834 	return E1000_SUCCESS;
835 }
836 
837 /**
838  *  __e1000_access_emi_reg_locked - Read/write EMI register
839  *  @hw: pointer to the HW structure
840  *  @addr: EMI address to program
841  *  @data: pointer to value to read/write from/to the EMI address
842  *  @read: boolean flag to indicate read or write
843  *
844  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
845  **/
846 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
847 					 u16 *data, bool read)
848 {
849 	s32 ret_val;
850 
851 	DEBUGFUNC("__e1000_access_emi_reg_locked");
852 
853 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
854 	if (ret_val)
855 		return ret_val;
856 
857 	if (read)
858 		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
859 						      data);
860 	else
861 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
862 						       *data);
863 
864 	return ret_val;
865 }
866 
867 /**
868  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
869  *  @hw: pointer to the HW structure
870  *  @addr: EMI address to program
871  *  @data: value to be read from the EMI address
872  *
873  *  Assumes the SW/FW/HW Semaphore is already acquired.
874  **/
875 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
876 {
877 	DEBUGFUNC("e1000_read_emi_reg_locked");
878 
879 	return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
880 }
881 
882 /**
883  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
884  *  @hw: pointer to the HW structure
885  *  @addr: EMI address to program
886  *  @data: value to be written to the EMI address
887  *
888  *  Assumes the SW/FW/HW Semaphore is already acquired.
889  **/
890 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
891 {
892 	DEBUGFUNC("e1000_read_emi_reg_locked");
893 
894 	return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
895 }
896 
897 /**
898  *  e1000_set_eee_pchlan - Enable/disable EEE support
899  *  @hw: pointer to the HW structure
900  *
901  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
902  *  the link and the EEE capabilities of the link partner.  The LPI Control
903  *  register bits will remain set only if/when link is up.
904  *
905  *  EEE LPI must not be asserted earlier than one second after link is up.
906  *  On 82579, EEE LPI should not be enabled until such time otherwise there
907  *  can be link issues with some switches.  Other devices can have EEE LPI
908  *  enabled immediately upon link up since they have a timer in hardware which
909  *  prevents LPI from being asserted too early.
910  **/
911 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
912 {
913 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
914 	s32 ret_val;
915 	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
916 
917 	DEBUGFUNC("e1000_set_eee_pchlan");
918 
919 	switch (hw->phy.type) {
920 	case e1000_phy_82579:
921 		lpa = I82579_EEE_LP_ABILITY;
922 		pcs_status = I82579_EEE_PCS_STATUS;
923 		adv_addr = I82579_EEE_ADVERTISEMENT;
924 		break;
925 	case e1000_phy_i217:
926 		lpa = I217_EEE_LP_ABILITY;
927 		pcs_status = I217_EEE_PCS_STATUS;
928 		adv_addr = I217_EEE_ADVERTISEMENT;
929 		break;
930 	default:
931 		return E1000_SUCCESS;
932 	}
933 
934 	ret_val = hw->phy.ops.acquire(hw);
935 	if (ret_val)
936 		return ret_val;
937 
938 	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
939 	if (ret_val)
940 		goto release;
941 
942 	/* Clear bits that enable EEE in various speeds */
943 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
944 
945 	/* Enable EEE if not disabled by user */
946 	if (!dev_spec->eee_disable) {
947 		/* Save off link partner's EEE ability */
948 		ret_val = e1000_read_emi_reg_locked(hw, lpa,
949 						    &dev_spec->eee_lp_ability);
950 		if (ret_val)
951 			goto release;
952 
953 		/* Read EEE advertisement */
954 		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
955 		if (ret_val)
956 			goto release;
957 
958 		/* Enable EEE only for speeds in which the link partner is
959 		 * EEE capable and for which we advertise EEE.
960 		 */
961 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
962 			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
963 
964 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
965 			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
966 			if (data & NWAY_LPAR_100TX_FD_CAPS)
967 				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
968 			else
969 				/* EEE is not supported in 100Half, so ignore
970 				 * partner's EEE in 100 ability if full-duplex
971 				 * is not advertised.
972 				 */
973 				dev_spec->eee_lp_ability &=
974 				    ~I82579_EEE_100_SUPPORTED;
975 		}
976 	}
977 
978 	if (hw->phy.type == e1000_phy_82579) {
979 		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
980 						    &data);
981 		if (ret_val)
982 			goto release;
983 
984 		data &= ~I82579_LPI_100_PLL_SHUT;
985 		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
986 						     data);
987 	}
988 
989 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
990 	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
991 	if (ret_val)
992 		goto release;
993 
994 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
995 release:
996 	hw->phy.ops.release(hw);
997 
998 	return ret_val;
999 }
1000 
1001 /**
1002  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
1003  *  @hw:   pointer to the HW structure
1004  *  @link: link up bool flag
1005  *
1006  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1007  *  preventing further DMA write requests.  Workaround the issue by disabling
1008  *  the de-assertion of the clock request when in 1Gpbs mode.
1009  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1010  *  speeds in order to avoid Tx hangs.
1011  **/
1012 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1013 {
1014 	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1015 	u32 status = E1000_READ_REG(hw, E1000_STATUS);
1016 	s32 ret_val = E1000_SUCCESS;
1017 	u16 reg;
1018 
1019 	if (link && (status & E1000_STATUS_SPEED_1000)) {
1020 		ret_val = hw->phy.ops.acquire(hw);
1021 		if (ret_val)
1022 			return ret_val;
1023 
1024 		ret_val =
1025 		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1026 					       &reg);
1027 		if (ret_val)
1028 			goto release;
1029 
1030 		ret_val =
1031 		    e1000_write_kmrn_reg_locked(hw,
1032 						E1000_KMRNCTRLSTA_K1_CONFIG,
1033 						reg &
1034 						~E1000_KMRNCTRLSTA_K1_ENABLE);
1035 		if (ret_val)
1036 			goto release;
1037 
1038 		usec_delay(10);
1039 
1040 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1041 				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1042 
1043 		ret_val =
1044 		    e1000_write_kmrn_reg_locked(hw,
1045 						E1000_KMRNCTRLSTA_K1_CONFIG,
1046 						reg);
1047 release:
1048 		hw->phy.ops.release(hw);
1049 	} else {
1050 		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
1051 		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1052 
1053 		if ((hw->phy.revision > 5) || !link ||
1054 		    ((status & E1000_STATUS_SPEED_100) &&
1055 		     (status & E1000_STATUS_FD)))
1056 			goto update_fextnvm6;
1057 
1058 		ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1059 		if (ret_val)
1060 			return ret_val;
1061 
1062 		/* Clear link status transmit timeout */
1063 		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1064 
1065 		if (status & E1000_STATUS_SPEED_100) {
1066 			/* Set inband Tx timeout to 5x10us for 100Half */
1067 			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1068 
1069 			/* Do not extend the K1 entry latency for 100Half */
1070 			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1071 		} else {
1072 			/* Set inband Tx timeout to 50x10us for 10Full/Half */
1073 			reg |= 50 <<
1074 			       I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1075 
1076 			/* Extend the K1 entry latency for 10 Mbps */
1077 			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1078 		}
1079 
1080 		ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1081 		if (ret_val)
1082 			return ret_val;
1083 
1084 update_fextnvm6:
1085 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1086 	}
1087 
1088 	return ret_val;
1089 }
1090 
1091 static u64 e1000_ltr2ns(u16 ltr)
1092 {
1093 	u32 value, scale;
1094 
1095 	/* Determine the latency in nsec based on the LTR value & scale */
1096 	value = ltr & E1000_LTRV_VALUE_MASK;
1097 	scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1098 
1099 	return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1100 }
1101 
1102 /**
1103  *  e1000_platform_pm_pch_lpt - Set platform power management values
1104  *  @hw: pointer to the HW structure
1105  *  @link: bool indicating link status
1106  *
1107  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1108  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1109  *  when link is up (which must not exceed the maximum latency supported
1110  *  by the platform), otherwise specify there is no LTR requirement.
1111  *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1112  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1113  *  Capability register set, on this device LTR is set by writing the
1114  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1115  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1116  *  message to the PMC.
1117  *
1118  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1119  *  high-water mark.
1120  **/
1121 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1122 {
1123 	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1124 		  link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1125 	u16 lat_enc = 0;	/* latency encoded */
1126 	s32 obff_hwm = 0;
1127 
1128 	DEBUGFUNC("e1000_platform_pm_pch_lpt");
1129 
1130 	if (link) {
1131 		u16 speed, duplex, scale = 0;
1132 		u16 max_snoop, max_nosnoop;
1133 		u16 max_ltr_enc;	/* max LTR latency encoded */
1134 		s64 lat_ns;
1135 		s64 value;
1136 		u32 rxa;
1137 
1138 		if (!hw->mac.max_frame_size) {
1139 			DEBUGOUT("max_frame_size not set.\n");
1140 			return -E1000_ERR_CONFIG;
1141 		}
1142 
1143 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1144 		if (!speed) {
1145 			DEBUGOUT("Speed not set.\n");
1146 			return -E1000_ERR_CONFIG;
1147 		}
1148 
1149 		/* Rx Packet Buffer Allocation size (KB) */
1150 		rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1151 
1152 		/* Determine the maximum latency tolerated by the device.
1153 		 *
1154 		 * Per the PCIe spec, the tolerated latencies are encoded as
1155 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1156 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
1157 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1158 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1159 		 */
1160 		lat_ns = ((s64)rxa * 1024 -
1161 			  (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1162 		if (lat_ns < 0)
1163 			lat_ns = 0;
1164 		else
1165 			lat_ns /= speed;
1166 		value = lat_ns;
1167 
1168 		while (value > E1000_LTRV_VALUE_MASK) {
1169 			scale++;
1170 			value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1171 		}
1172 		if (scale > E1000_LTRV_SCALE_MAX) {
1173 			DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1174 			return -E1000_ERR_CONFIG;
1175 		}
1176 		lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1177 
1178 		/* Determine the maximum latency tolerated by the platform */
1179 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1180 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1181 		max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1182 
1183 		if (lat_enc > max_ltr_enc) {
1184 			lat_enc = max_ltr_enc;
1185 			lat_ns = e1000_ltr2ns(max_ltr_enc);
1186 		}
1187 
1188 		if (lat_ns) {
1189 			lat_ns *= speed * 1000;
1190 			lat_ns /= 8;
1191 			lat_ns /= 1000000000;
1192 			obff_hwm = (s32)(rxa - lat_ns);
1193 		}
1194 		if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1195 			DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1196 			return -E1000_ERR_CONFIG;
1197 		}
1198 	}
1199 
1200 	/* Set Snoop and No-Snoop latencies the same */
1201 	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1202 	E1000_WRITE_REG(hw, E1000_LTRV, reg);
1203 
1204 	/* Set OBFF high water mark */
1205 	reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1206 	reg |= obff_hwm;
1207 	E1000_WRITE_REG(hw, E1000_SVT, reg);
1208 
1209 	/* Enable OBFF */
1210 	reg = E1000_READ_REG(hw, E1000_SVCR);
1211 	reg |= E1000_SVCR_OFF_EN;
1212 	/* Always unblock interrupts to the CPU even when the system is
1213 	 * in OBFF mode. This ensures that small round-robin traffic
1214 	 * (like ping) does not get dropped or experience long latency.
1215 	 */
1216 	reg |= E1000_SVCR_OFF_MASKINT;
1217 	E1000_WRITE_REG(hw, E1000_SVCR, reg);
1218 
1219 	return E1000_SUCCESS;
1220 }
1221 
1222 /**
1223  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1224  *  @hw: pointer to the HW structure
1225  *  @itr: interrupt throttling rate
1226  *
1227  *  Configure OBFF with the updated interrupt rate.
1228  **/
1229 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1230 {
1231 	u32 svcr;
1232 	s32 timer;
1233 
1234 	DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1235 
1236 	/* Convert ITR value into microseconds for OBFF timer */
1237 	timer = itr & E1000_ITR_MASK;
1238 	timer = (timer * E1000_ITR_MULT) / 1000;
1239 
1240 	if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1241 		DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1242 		return -E1000_ERR_CONFIG;
1243 	}
1244 
1245 	svcr = E1000_READ_REG(hw, E1000_SVCR);
1246 	svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1247 	svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1248 	E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1249 
1250 	return E1000_SUCCESS;
1251 }
1252 
1253 /**
1254  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1255  *  @hw: pointer to the HW structure
1256  *  @to_sx: boolean indicating a system power state transition to Sx
1257  *
1258  *  When link is down, configure ULP mode to significantly reduce the power
1259  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1260  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1261  *  system, configure the ULP mode by software.
1262  */
1263 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1264 {
1265 	u32 mac_reg;
1266 	s32 ret_val = E1000_SUCCESS;
1267 	u16 phy_reg;
1268 	u16 oem_reg = 0;
1269 
1270 	if ((hw->mac.type < e1000_pch_lpt) ||
1271 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1272 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1273 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1274 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1275 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1276 		return 0;
1277 
1278 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1279 		/* Request ME configure ULP mode in the PHY */
1280 		mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1281 		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1282 		E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1283 
1284 		goto out;
1285 	}
1286 
1287 	if (!to_sx) {
1288 		int i = 0;
1289 
1290 		/* Poll up to 5 seconds for Cable Disconnected indication */
1291 		while (!(E1000_READ_REG(hw, E1000_FEXT) &
1292 			 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1293 			/* Bail if link is re-acquired */
1294 			if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1295 				return -E1000_ERR_PHY;
1296 
1297 			if (i++ == 100)
1298 				break;
1299 
1300 			msec_delay(50);
1301 		}
1302 		DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1303 			 (E1000_READ_REG(hw, E1000_FEXT) &
1304 			  E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1305 			 i * 50);
1306 	}
1307 
1308 	ret_val = hw->phy.ops.acquire(hw);
1309 	if (ret_val)
1310 		goto out;
1311 
1312 	/* Force SMBus mode in PHY */
1313 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1314 	if (ret_val)
1315 		goto release;
1316 	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1317 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1318 
1319 	/* Force SMBus mode in MAC */
1320 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1321 	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1322 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1323 
1324 	/* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1325 	 * LPLU and disable Gig speed when entering ULP
1326 	 */
1327 	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1328 		ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1329 						       &oem_reg);
1330 		if (ret_val)
1331 			goto release;
1332 
1333 		phy_reg = oem_reg;
1334 		phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1335 
1336 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1337 							phy_reg);
1338 
1339 		if (ret_val)
1340 			goto release;
1341 	}
1342 
1343 	/* Set Inband ULP Exit, Reset to SMBus mode and
1344 	 * Disable SMBus Release on PERST# in PHY
1345 	 */
1346 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1347 	if (ret_val)
1348 		goto release;
1349 	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1350 		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1351 	if (to_sx) {
1352 		if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1353 			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1354 		else
1355 			phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1356 
1357 		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1358 		phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1359 	} else {
1360 		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1361 		phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1362 		phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1363 	}
1364 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1365 
1366 	/* Set Disable SMBus Release on PERST# in MAC */
1367 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1368 	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1369 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1370 
1371 	/* Commit ULP changes in PHY by starting auto ULP configuration */
1372 	phy_reg |= I218_ULP_CONFIG1_START;
1373 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1374 
1375 	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1376 	    to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1377 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1378 							oem_reg);
1379 		if (ret_val)
1380 			goto release;
1381 	}
1382 
1383 release:
1384 	hw->phy.ops.release(hw);
1385 out:
1386 	if (ret_val)
1387 		DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1388 	else
1389 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1390 
1391 	return ret_val;
1392 }
1393 
1394 /**
1395  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1396  *  @hw: pointer to the HW structure
1397  *  @force: boolean indicating whether or not to force disabling ULP
1398  *
1399  *  Un-configure ULP mode when link is up, the system is transitioned from
1400  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1401  *  system, poll for an indication from ME that ULP has been un-configured.
1402  *  If not on an ME enabled system, un-configure the ULP mode by software.
1403  *
1404  *  During nominal operation, this function is called when link is acquired
1405  *  to disable ULP mode (force=FALSE); otherwise, for example when unloading
1406  *  the driver or during Sx->S0 transitions, this is called with force=TRUE
1407  *  to forcibly disable ULP.
1408  */
1409 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1410 {
1411 	s32 ret_val = E1000_SUCCESS;
1412 	u32 mac_reg;
1413 	u16 phy_reg;
1414 	int i = 0;
1415 
1416 	if ((hw->mac.type < e1000_pch_lpt) ||
1417 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1418 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1419 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1420 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1421 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1422 		return 0;
1423 
1424 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1425 		if (force) {
1426 			/* Request ME un-configure ULP mode in the PHY */
1427 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1428 			mac_reg &= ~E1000_H2ME_ULP;
1429 			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1430 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1431 		}
1432 
1433 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1434 		while (E1000_READ_REG(hw, E1000_FWSM) &
1435 		       E1000_FWSM_ULP_CFG_DONE) {
1436 			if (i++ == 30) {
1437 				ret_val = -E1000_ERR_PHY;
1438 				goto out;
1439 			}
1440 
1441 			msec_delay(10);
1442 		}
1443 		DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1444 
1445 		if (force) {
1446 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1447 			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1448 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1449 		} else {
1450 			/* Clear H2ME.ULP after ME ULP configuration */
1451 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1452 			mac_reg &= ~E1000_H2ME_ULP;
1453 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1454 		}
1455 
1456 		goto out;
1457 	}
1458 
1459 	ret_val = hw->phy.ops.acquire(hw);
1460 	if (ret_val)
1461 		goto out;
1462 
1463 	if (force)
1464 		/* Toggle LANPHYPC Value bit */
1465 		e1000_toggle_lanphypc_pch_lpt(hw);
1466 
1467 	/* Unforce SMBus mode in PHY */
1468 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1469 	if (ret_val) {
1470 		/* The MAC might be in PCIe mode, so temporarily force to
1471 		 * SMBus mode in order to access the PHY.
1472 		 */
1473 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1474 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1475 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1476 
1477 		msec_delay(50);
1478 
1479 		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1480 						       &phy_reg);
1481 		if (ret_val)
1482 			goto release;
1483 	}
1484 	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1485 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1486 
1487 	/* Unforce SMBus mode in MAC */
1488 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1489 	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1490 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1491 
1492 	/* When ULP mode was previously entered, K1 was disabled by the
1493 	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1494 	 */
1495 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1496 	if (ret_val)
1497 		goto release;
1498 	phy_reg |= HV_PM_CTRL_K1_ENABLE;
1499 	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1500 
1501 	/* Clear ULP enabled configuration */
1502 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1503 	if (ret_val)
1504 		goto release;
1505 	phy_reg &= ~(I218_ULP_CONFIG1_IND |
1506 		     I218_ULP_CONFIG1_STICKY_ULP |
1507 		     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1508 		     I218_ULP_CONFIG1_WOL_HOST |
1509 		     I218_ULP_CONFIG1_INBAND_EXIT |
1510 		     I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1511 		     I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1512 		     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1513 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1514 
1515 	/* Commit ULP changes by starting auto ULP configuration */
1516 	phy_reg |= I218_ULP_CONFIG1_START;
1517 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1518 
1519 	/* Clear Disable SMBus Release on PERST# in MAC */
1520 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1521 	mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1522 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1523 
1524 release:
1525 	hw->phy.ops.release(hw);
1526 	if (force) {
1527 		hw->phy.ops.reset(hw);
1528 		msec_delay(50);
1529 	}
1530 out:
1531 	if (ret_val)
1532 		DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1533 	else
1534 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1535 
1536 	return ret_val;
1537 }
1538 
1539 /**
1540  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1541  *  @hw: pointer to the HW structure
1542  *
1543  *  Checks to see of the link status of the hardware has changed.  If a
1544  *  change in link status has been detected, then we read the PHY registers
1545  *  to get the current speed/duplex if link exists.
1546  **/
1547 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1548 {
1549 	struct e1000_mac_info *mac = &hw->mac;
1550 	s32 ret_val, tipg_reg = 0;
1551 	u16 emi_addr, emi_val = 0;
1552 	bool link;
1553 	u16 phy_reg;
1554 
1555 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1556 
1557 	/* We only want to go out to the PHY registers to see if Auto-Neg
1558 	 * has completed and/or if our link status has changed.  The
1559 	 * get_link_status flag is set upon receiving a Link Status
1560 	 * Change or Rx Sequence Error interrupt.
1561 	 */
1562 	if (!mac->get_link_status)
1563 		return E1000_SUCCESS;
1564 
1565 	/* First we want to see if the MII Status Register reports
1566 	 * link.  If so, then we want to get the current speed/duplex
1567 	 * of the PHY.
1568 	 */
1569 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1570 	if (ret_val)
1571 		return ret_val;
1572 
1573 	if (hw->mac.type == e1000_pchlan) {
1574 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1575 		if (ret_val)
1576 			return ret_val;
1577 	}
1578 
1579 	/* When connected at 10Mbps half-duplex, some parts are excessively
1580 	 * aggressive resulting in many collisions. To avoid this, increase
1581 	 * the IPG and reduce Rx latency in the PHY.
1582 	 */
1583 	if ((hw->mac.type >= e1000_pch2lan) && link) {
1584 		u16 speed, duplex;
1585 
1586 		e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1587 		tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1588 		tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1589 
1590 		if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1591 			tipg_reg |= 0xFF;
1592 			/* Reduce Rx latency in analog PHY */
1593 			emi_val = 0;
1594 		} else if (hw->mac.type >= e1000_pch_spt &&
1595 			   duplex == FULL_DUPLEX && speed != SPEED_1000) {
1596 			tipg_reg |= 0xC;
1597 			emi_val = 1;
1598 		} else {
1599 			/* Roll back the default values */
1600 			tipg_reg |= 0x08;
1601 			emi_val = 1;
1602 		}
1603 
1604 		E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1605 
1606 		ret_val = hw->phy.ops.acquire(hw);
1607 		if (ret_val)
1608 			return ret_val;
1609 
1610 		if (hw->mac.type == e1000_pch2lan)
1611 			emi_addr = I82579_RX_CONFIG;
1612 		else
1613 			emi_addr = I217_RX_CONFIG;
1614 		ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1615 
1616 		if (hw->mac.type >= e1000_pch_lpt) {
1617 			u16 phy_reg;
1618 
1619 			hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1620 						    &phy_reg);
1621 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1622 			if (speed == SPEED_100 || speed == SPEED_10)
1623 				phy_reg |= 0x3E8;
1624 			else
1625 				phy_reg |= 0xFA;
1626 			hw->phy.ops.write_reg_locked(hw,
1627 						     I217_PLL_CLOCK_GATE_REG,
1628 						     phy_reg);
1629 
1630 			if (speed == SPEED_1000) {
1631 				hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1632 							    &phy_reg);
1633 
1634 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1635 
1636 				hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1637 							     phy_reg);
1638 				}
1639 		 }
1640 		hw->phy.ops.release(hw);
1641 
1642 		if (ret_val)
1643 			return ret_val;
1644 
1645 		if (hw->mac.type >= e1000_pch_spt) {
1646 			u16 data;
1647 			u16 ptr_gap;
1648 
1649 			if (speed == SPEED_1000) {
1650 				ret_val = hw->phy.ops.acquire(hw);
1651 				if (ret_val)
1652 					return ret_val;
1653 
1654 				ret_val = hw->phy.ops.read_reg_locked(hw,
1655 							      PHY_REG(776, 20),
1656 							      &data);
1657 				if (ret_val) {
1658 					hw->phy.ops.release(hw);
1659 					return ret_val;
1660 				}
1661 
1662 				ptr_gap = (data & (0x3FF << 2)) >> 2;
1663 				if (ptr_gap < 0x18) {
1664 					data &= ~(0x3FF << 2);
1665 					data |= (0x18 << 2);
1666 					ret_val =
1667 						hw->phy.ops.write_reg_locked(hw,
1668 							PHY_REG(776, 20), data);
1669 				}
1670 				hw->phy.ops.release(hw);
1671 				if (ret_val)
1672 					return ret_val;
1673 			} else {
1674 				ret_val = hw->phy.ops.acquire(hw);
1675 				if (ret_val)
1676 					return ret_val;
1677 
1678 				ret_val = hw->phy.ops.write_reg_locked(hw,
1679 							     PHY_REG(776, 20),
1680 							     0xC023);
1681 				hw->phy.ops.release(hw);
1682 				if (ret_val)
1683 					return ret_val;
1684 
1685 			}
1686 		}
1687 	}
1688 
1689 	/* I217 Packet Loss issue:
1690 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
1691 	 * on power up.
1692 	 * Set the Beacon Duration for I217 to 8 usec
1693 	 */
1694 	if (hw->mac.type >= e1000_pch_lpt) {
1695 		u32 mac_reg;
1696 
1697 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1698 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1699 		mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1700 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1701 	}
1702 
1703 	/* Work-around I218 hang issue */
1704 	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1705 	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1706 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1707 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1708 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1709 		if (ret_val)
1710 			return ret_val;
1711 	}
1712 	if (hw->mac.type >= e1000_pch_lpt) {
1713 		/* Set platform power management values for
1714 		 * Latency Tolerance Reporting (LTR)
1715 		 * Optimized Buffer Flush/Fill (OBFF)
1716 		 */
1717 		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1718 		if (ret_val)
1719 			return ret_val;
1720 	}
1721 
1722 	/* Clear link partner's EEE ability */
1723 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1724 
1725 	/* FEXTNVM6 K1-off workaround - for SPT only */
1726 	if (hw->mac.type == e1000_pch_spt) {
1727 		u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1728 		u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1729 
1730 		if ((pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) &&
1731 			(hw->dev_spec.ich8lan.disable_k1_off == FALSE))
1732 			fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1733 		else
1734 			fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1735 
1736 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1737 	}
1738 
1739 	if (!link)
1740 		return E1000_SUCCESS; /* No link detected */
1741 
1742 	mac->get_link_status = FALSE;
1743 
1744 	switch (hw->mac.type) {
1745 	case e1000_pch2lan:
1746 		ret_val = e1000_k1_workaround_lv(hw);
1747 		if (ret_val)
1748 			return ret_val;
1749 		/* fall-thru */
1750 	case e1000_pchlan:
1751 		if (hw->phy.type == e1000_phy_82578) {
1752 			ret_val = e1000_link_stall_workaround_hv(hw);
1753 			if (ret_val)
1754 				return ret_val;
1755 		}
1756 
1757 		/* Workaround for PCHx parts in half-duplex:
1758 		 * Set the number of preambles removed from the packet
1759 		 * when it is passed from the PHY to the MAC to prevent
1760 		 * the MAC from misinterpreting the packet type.
1761 		 */
1762 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1763 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1764 
1765 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1766 		    E1000_STATUS_FD)
1767 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1768 
1769 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1770 		break;
1771 	default:
1772 		break;
1773 	}
1774 
1775 	/* Check if there was DownShift, must be checked
1776 	 * immediately after link-up
1777 	 */
1778 	e1000_check_downshift_generic(hw);
1779 
1780 	/* Enable/Disable EEE after link up */
1781 	if (hw->phy.type > e1000_phy_82579) {
1782 		ret_val = e1000_set_eee_pchlan(hw);
1783 		if (ret_val)
1784 			return ret_val;
1785 	}
1786 
1787 	/* If we are forcing speed/duplex, then we simply return since
1788 	 * we have already determined whether we have link or not.
1789 	 */
1790 	if (!mac->autoneg)
1791 		return -E1000_ERR_CONFIG;
1792 
1793 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1794 	 * of MAC speed/duplex configuration.  So we only need to
1795 	 * configure Collision Distance in the MAC.
1796 	 */
1797 	mac->ops.config_collision_dist(hw);
1798 
1799 	/* Configure Flow Control now that Auto-Neg has completed.
1800 	 * First, we need to restore the desired flow control
1801 	 * settings because we may have had to re-autoneg with a
1802 	 * different link partner.
1803 	 */
1804 	ret_val = e1000_config_fc_after_link_up_generic(hw);
1805 	if (ret_val)
1806 		DEBUGOUT("Error configuring flow control\n");
1807 
1808 	return ret_val;
1809 }
1810 
1811 /**
1812  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1813  *  @hw: pointer to the HW structure
1814  *
1815  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1816  **/
1817 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1818 {
1819 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1820 
1821 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1822 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1823 	switch (hw->mac.type) {
1824 	case e1000_ich8lan:
1825 	case e1000_ich9lan:
1826 	case e1000_ich10lan:
1827 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1828 		break;
1829 	case e1000_pchlan:
1830 	case e1000_pch2lan:
1831 	case e1000_pch_lpt:
1832 	case e1000_pch_spt:
1833 	case e1000_pch_cnp:
1834 	case e1000_pch_tgp:
1835 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1836 		break;
1837 	default:
1838 		break;
1839 	}
1840 }
1841 
1842 /**
1843  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1844  *  @hw: pointer to the HW structure
1845  *
1846  *  Acquires the mutex for performing NVM operations.
1847  **/
1848 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1849 {
1850 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1851 
1852 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1853 
1854 	return E1000_SUCCESS;
1855 }
1856 
1857 /**
1858  *  e1000_release_nvm_ich8lan - Release NVM mutex
1859  *  @hw: pointer to the HW structure
1860  *
1861  *  Releases the mutex used while performing NVM operations.
1862  **/
1863 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1864 {
1865 	DEBUGFUNC("e1000_release_nvm_ich8lan");
1866 
1867 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1868 
1869 	return;
1870 }
1871 
1872 /**
1873  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1874  *  @hw: pointer to the HW structure
1875  *
1876  *  Acquires the software control flag for performing PHY and select
1877  *  MAC CSR accesses.
1878  **/
1879 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1880 {
1881 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1882 	s32 ret_val = E1000_SUCCESS;
1883 
1884 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1885 
1886 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1887 
1888 	while (timeout) {
1889 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1890 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1891 			break;
1892 
1893 		msec_delay_irq(1);
1894 		timeout--;
1895 	}
1896 
1897 	if (!timeout) {
1898 		DEBUGOUT("SW has already locked the resource.\n");
1899 		ret_val = -E1000_ERR_CONFIG;
1900 		goto out;
1901 	}
1902 
1903 	timeout = SW_FLAG_TIMEOUT;
1904 
1905 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1906 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1907 
1908 	while (timeout) {
1909 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1910 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1911 			break;
1912 
1913 		msec_delay_irq(1);
1914 		timeout--;
1915 	}
1916 
1917 	if (!timeout) {
1918 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1919 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1920 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1921 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1922 		ret_val = -E1000_ERR_CONFIG;
1923 		goto out;
1924 	}
1925 
1926 out:
1927 	if (ret_val)
1928 		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1929 
1930 	return ret_val;
1931 }
1932 
1933 /**
1934  *  e1000_release_swflag_ich8lan - Release software control flag
1935  *  @hw: pointer to the HW structure
1936  *
1937  *  Releases the software control flag for performing PHY and select
1938  *  MAC CSR accesses.
1939  **/
1940 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1941 {
1942 	u32 extcnf_ctrl;
1943 
1944 	DEBUGFUNC("e1000_release_swflag_ich8lan");
1945 
1946 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1947 
1948 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1949 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1950 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1951 	} else {
1952 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1953 	}
1954 
1955 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1956 
1957 	return;
1958 }
1959 
1960 /**
1961  *  e1000_check_mng_mode_ich8lan - Checks management mode
1962  *  @hw: pointer to the HW structure
1963  *
1964  *  This checks if the adapter has any manageability enabled.
1965  *  This is a function pointer entry point only called by read/write
1966  *  routines for the PHY and NVM parts.
1967  **/
1968 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1969 {
1970 	u32 fwsm;
1971 
1972 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1973 
1974 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1975 
1976 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1977 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1978 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1979 }
1980 
1981 /**
1982  *  e1000_check_mng_mode_pchlan - Checks management mode
1983  *  @hw: pointer to the HW structure
1984  *
1985  *  This checks if the adapter has iAMT enabled.
1986  *  This is a function pointer entry point only called by read/write
1987  *  routines for the PHY and NVM parts.
1988  **/
1989 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1990 {
1991 	u32 fwsm;
1992 
1993 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
1994 
1995 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1996 
1997 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1998 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1999 }
2000 
2001 /**
2002  *  e1000_rar_set_pch2lan - Set receive address register
2003  *  @hw: pointer to the HW structure
2004  *  @addr: pointer to the receive address
2005  *  @index: receive address array register
2006  *
2007  *  Sets the receive address array register at index to the address passed
2008  *  in by addr.  For 82579, RAR[0] is the base address register that is to
2009  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
2010  *  Use SHRA[0-3] in place of those reserved for ME.
2011  **/
2012 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
2013 {
2014 	u32 rar_low, rar_high;
2015 
2016 	DEBUGFUNC("e1000_rar_set_pch2lan");
2017 
2018 	/* HW expects these in little endian so we reverse the byte order
2019 	 * from network order (big endian) to little endian
2020 	 */
2021 	rar_low = ((u32) addr[0] |
2022 		   ((u32) addr[1] << 8) |
2023 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2024 
2025 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2026 
2027 	/* If MAC address zero, no need to set the AV bit */
2028 	if (rar_low || rar_high)
2029 		rar_high |= E1000_RAH_AV;
2030 
2031 	if (index == 0) {
2032 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2033 		E1000_WRITE_FLUSH(hw);
2034 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2035 		E1000_WRITE_FLUSH(hw);
2036 		return E1000_SUCCESS;
2037 	}
2038 
2039 	/* RAR[1-6] are owned by manageability.  Skip those and program the
2040 	 * next address into the SHRA register array.
2041 	 */
2042 	if (index < (u32) (hw->mac.rar_entry_count)) {
2043 		s32 ret_val;
2044 
2045 		ret_val = e1000_acquire_swflag_ich8lan(hw);
2046 		if (ret_val)
2047 			goto out;
2048 
2049 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2050 		E1000_WRITE_FLUSH(hw);
2051 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2052 		E1000_WRITE_FLUSH(hw);
2053 
2054 		e1000_release_swflag_ich8lan(hw);
2055 
2056 		/* verify the register updates */
2057 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2058 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2059 			return E1000_SUCCESS;
2060 
2061 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2062 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2063 	}
2064 
2065 out:
2066 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2067 	return -E1000_ERR_CONFIG;
2068 }
2069 
2070 /**
2071  *  e1000_rar_set_pch_lpt - Set receive address registers
2072  *  @hw: pointer to the HW structure
2073  *  @addr: pointer to the receive address
2074  *  @index: receive address array register
2075  *
2076  *  Sets the receive address register array at index to the address passed
2077  *  in by addr. For LPT, RAR[0] is the base address register that is to
2078  *  contain the MAC address. SHRA[0-10] are the shared receive address
2079  *  registers that are shared between the Host and manageability engine (ME).
2080  **/
2081 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2082 {
2083 	u32 rar_low, rar_high;
2084 	u32 wlock_mac;
2085 
2086 	DEBUGFUNC("e1000_rar_set_pch_lpt");
2087 
2088 	/* HW expects these in little endian so we reverse the byte order
2089 	 * from network order (big endian) to little endian
2090 	 */
2091 	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2092 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2093 
2094 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2095 
2096 	/* If MAC address zero, no need to set the AV bit */
2097 	if (rar_low || rar_high)
2098 		rar_high |= E1000_RAH_AV;
2099 
2100 	if (index == 0) {
2101 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2102 		E1000_WRITE_FLUSH(hw);
2103 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2104 		E1000_WRITE_FLUSH(hw);
2105 		return E1000_SUCCESS;
2106 	}
2107 
2108 	/* The manageability engine (ME) can lock certain SHRAR registers that
2109 	 * it is using - those registers are unavailable for use.
2110 	 */
2111 	if (index < hw->mac.rar_entry_count) {
2112 		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2113 			    E1000_FWSM_WLOCK_MAC_MASK;
2114 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2115 
2116 		/* Check if all SHRAR registers are locked */
2117 		if (wlock_mac == 1)
2118 			goto out;
2119 
2120 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
2121 			s32 ret_val;
2122 
2123 			ret_val = e1000_acquire_swflag_ich8lan(hw);
2124 
2125 			if (ret_val)
2126 				goto out;
2127 
2128 			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2129 					rar_low);
2130 			E1000_WRITE_FLUSH(hw);
2131 			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2132 					rar_high);
2133 			E1000_WRITE_FLUSH(hw);
2134 
2135 			e1000_release_swflag_ich8lan(hw);
2136 
2137 			/* verify the register updates */
2138 			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2139 			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2140 				return E1000_SUCCESS;
2141 		}
2142 	}
2143 
2144 out:
2145 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2146 	return -E1000_ERR_CONFIG;
2147 }
2148 
2149 /**
2150  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2151  *  @hw: pointer to the HW structure
2152  *  @mc_addr_list: array of multicast addresses to program
2153  *  @mc_addr_count: number of multicast addresses to program
2154  *
2155  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2156  *  The caller must have a packed mc_addr_list of multicast addresses.
2157  **/
2158 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2159 					      u8 *mc_addr_list,
2160 					      u32 mc_addr_count)
2161 {
2162 	u16 phy_reg = 0;
2163 	int i;
2164 	s32 ret_val;
2165 
2166 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2167 
2168 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2169 
2170 	ret_val = hw->phy.ops.acquire(hw);
2171 	if (ret_val)
2172 		return;
2173 
2174 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2175 	if (ret_val)
2176 		goto release;
2177 
2178 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
2179 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2180 					   (u16)(hw->mac.mta_shadow[i] &
2181 						 0xFFFF));
2182 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2183 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
2184 						 0xFFFF));
2185 	}
2186 
2187 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2188 
2189 release:
2190 	hw->phy.ops.release(hw);
2191 }
2192 
2193 /**
2194  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2195  *  @hw: pointer to the HW structure
2196  *
2197  *  Checks if firmware is blocking the reset of the PHY.
2198  *  This is a function pointer entry point only called by
2199  *  reset routines.
2200  **/
2201 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2202 {
2203 	u32 fwsm;
2204 	bool blocked = FALSE;
2205 	int i = 0;
2206 
2207 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
2208 
2209 	do {
2210 		fwsm = E1000_READ_REG(hw, E1000_FWSM);
2211 		if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2212 			blocked = TRUE;
2213 			msec_delay(10);
2214 			continue;
2215 		}
2216 		blocked = FALSE;
2217 	} while (blocked && (i++ < 30));
2218 	return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2219 }
2220 
2221 /**
2222  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2223  *  @hw: pointer to the HW structure
2224  *
2225  *  Assumes semaphore already acquired.
2226  *
2227  **/
2228 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2229 {
2230 	u16 phy_data;
2231 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2232 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2233 		E1000_STRAP_SMT_FREQ_SHIFT;
2234 	s32 ret_val;
2235 
2236 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2237 
2238 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2239 	if (ret_val)
2240 		return ret_val;
2241 
2242 	phy_data &= ~HV_SMB_ADDR_MASK;
2243 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2244 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2245 
2246 	if (hw->phy.type == e1000_phy_i217) {
2247 		/* Restore SMBus frequency */
2248 		if (freq--) {
2249 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2250 			phy_data |= (freq & (1 << 0)) <<
2251 				HV_SMB_ADDR_FREQ_LOW_SHIFT;
2252 			phy_data |= (freq & (1 << 1)) <<
2253 				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2254 		} else {
2255 			DEBUGOUT("Unsupported SMB frequency in PHY\n");
2256 		}
2257 	}
2258 
2259 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2260 }
2261 
2262 /**
2263  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2264  *  @hw:   pointer to the HW structure
2265  *
2266  *  SW should configure the LCD from the NVM extended configuration region
2267  *  as a workaround for certain parts.
2268  **/
2269 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2270 {
2271 	struct e1000_phy_info *phy = &hw->phy;
2272 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2273 	s32 ret_val = E1000_SUCCESS;
2274 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
2275 
2276 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2277 
2278 	/* Initialize the PHY from the NVM on ICH platforms.  This
2279 	 * is needed due to an issue where the NVM configuration is
2280 	 * not properly autoloaded after power transitions.
2281 	 * Therefore, after each PHY reset, we will load the
2282 	 * configuration data out of the NVM manually.
2283 	 */
2284 	switch (hw->mac.type) {
2285 	case e1000_ich8lan:
2286 		if (phy->type != e1000_phy_igp_3)
2287 			return ret_val;
2288 
2289 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2290 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2291 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2292 			break;
2293 		}
2294 		/* Fall-thru */
2295 	case e1000_pchlan:
2296 	case e1000_pch2lan:
2297 	case e1000_pch_lpt:
2298 	case e1000_pch_spt:
2299 	case e1000_pch_cnp:
2300 	case e1000_pch_tgp:
2301 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2302 		break;
2303 	default:
2304 		return ret_val;
2305 	}
2306 
2307 	ret_val = hw->phy.ops.acquire(hw);
2308 	if (ret_val)
2309 		return ret_val;
2310 
2311 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
2312 	if (!(data & sw_cfg_mask))
2313 		goto release;
2314 
2315 	/* Make sure HW does not configure LCD from PHY
2316 	 * extended configuration before SW configuration
2317 	 */
2318 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2319 	if ((hw->mac.type < e1000_pch2lan) &&
2320 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2321 			goto release;
2322 
2323 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2324 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2325 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2326 	if (!cnf_size)
2327 		goto release;
2328 
2329 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2330 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2331 
2332 	if (((hw->mac.type == e1000_pchlan) &&
2333 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2334 	    (hw->mac.type > e1000_pchlan)) {
2335 		/* HW configures the SMBus address and LEDs when the
2336 		 * OEM and LCD Write Enable bits are set in the NVM.
2337 		 * When both NVM bits are cleared, SW will configure
2338 		 * them instead.
2339 		 */
2340 		ret_val = e1000_write_smbus_addr(hw);
2341 		if (ret_val)
2342 			goto release;
2343 
2344 		data = E1000_READ_REG(hw, E1000_LEDCTL);
2345 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2346 							(u16)data);
2347 		if (ret_val)
2348 			goto release;
2349 	}
2350 
2351 	/* Configure LCD from extended configuration region. */
2352 
2353 	/* cnf_base_addr is in DWORD */
2354 	word_addr = (u16)(cnf_base_addr << 1);
2355 
2356 	for (i = 0; i < cnf_size; i++) {
2357 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2358 					   &reg_data);
2359 		if (ret_val)
2360 			goto release;
2361 
2362 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2363 					   1, &reg_addr);
2364 		if (ret_val)
2365 			goto release;
2366 
2367 		/* Save off the PHY page for future writes. */
2368 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2369 			phy_page = reg_data;
2370 			continue;
2371 		}
2372 
2373 		reg_addr &= PHY_REG_MASK;
2374 		reg_addr |= phy_page;
2375 
2376 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2377 						    reg_data);
2378 		if (ret_val)
2379 			goto release;
2380 	}
2381 
2382 release:
2383 	hw->phy.ops.release(hw);
2384 	return ret_val;
2385 }
2386 
2387 /**
2388  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2389  *  @hw:   pointer to the HW structure
2390  *  @link: link up bool flag
2391  *
2392  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2393  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2394  *  If link is down, the function will restore the default K1 setting located
2395  *  in the NVM.
2396  **/
2397 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2398 {
2399 	s32 ret_val = E1000_SUCCESS;
2400 	u16 status_reg = 0;
2401 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2402 
2403 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
2404 
2405 	if (hw->mac.type != e1000_pchlan)
2406 		return E1000_SUCCESS;
2407 
2408 	/* Wrap the whole flow with the sw flag */
2409 	ret_val = hw->phy.ops.acquire(hw);
2410 	if (ret_val)
2411 		return ret_val;
2412 
2413 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2414 	if (link) {
2415 		if (hw->phy.type == e1000_phy_82578) {
2416 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2417 							      &status_reg);
2418 			if (ret_val)
2419 				goto release;
2420 
2421 			status_reg &= (BM_CS_STATUS_LINK_UP |
2422 				       BM_CS_STATUS_RESOLVED |
2423 				       BM_CS_STATUS_SPEED_MASK);
2424 
2425 			if (status_reg == (BM_CS_STATUS_LINK_UP |
2426 					   BM_CS_STATUS_RESOLVED |
2427 					   BM_CS_STATUS_SPEED_1000))
2428 				k1_enable = FALSE;
2429 		}
2430 
2431 		if (hw->phy.type == e1000_phy_82577) {
2432 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2433 							      &status_reg);
2434 			if (ret_val)
2435 				goto release;
2436 
2437 			status_reg &= (HV_M_STATUS_LINK_UP |
2438 				       HV_M_STATUS_AUTONEG_COMPLETE |
2439 				       HV_M_STATUS_SPEED_MASK);
2440 
2441 			if (status_reg == (HV_M_STATUS_LINK_UP |
2442 					   HV_M_STATUS_AUTONEG_COMPLETE |
2443 					   HV_M_STATUS_SPEED_1000))
2444 				k1_enable = FALSE;
2445 		}
2446 
2447 		/* Link stall fix for link up */
2448 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2449 						       0x0100);
2450 		if (ret_val)
2451 			goto release;
2452 
2453 	} else {
2454 		/* Link stall fix for link down */
2455 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2456 						       0x4100);
2457 		if (ret_val)
2458 			goto release;
2459 	}
2460 
2461 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2462 
2463 release:
2464 	hw->phy.ops.release(hw);
2465 
2466 	return ret_val;
2467 }
2468 
2469 /**
2470  *  e1000_configure_k1_ich8lan - Configure K1 power state
2471  *  @hw: pointer to the HW structure
2472  *  @enable: K1 state to configure
2473  *
2474  *  Configure the K1 power state based on the provided parameter.
2475  *  Assumes semaphore already acquired.
2476  *
2477  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2478  **/
2479 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2480 {
2481 	s32 ret_val;
2482 	u32 ctrl_reg = 0;
2483 	u32 ctrl_ext = 0;
2484 	u32 reg = 0;
2485 	u16 kmrn_reg = 0;
2486 
2487 	DEBUGFUNC("e1000_configure_k1_ich8lan");
2488 
2489 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2490 					     &kmrn_reg);
2491 	if (ret_val)
2492 		return ret_val;
2493 
2494 	if (k1_enable)
2495 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2496 	else
2497 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2498 
2499 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2500 					      kmrn_reg);
2501 	if (ret_val)
2502 		return ret_val;
2503 
2504 	usec_delay(20);
2505 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2506 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2507 
2508 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2509 	reg |= E1000_CTRL_FRCSPD;
2510 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2511 
2512 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2513 	E1000_WRITE_FLUSH(hw);
2514 	usec_delay(20);
2515 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2516 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2517 	E1000_WRITE_FLUSH(hw);
2518 	usec_delay(20);
2519 
2520 	return E1000_SUCCESS;
2521 }
2522 
2523 /**
2524  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2525  *  @hw:       pointer to the HW structure
2526  *  @d0_state: boolean if entering d0 or d3 device state
2527  *
2528  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2529  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2530  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2531  **/
2532 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2533 {
2534 	s32 ret_val = 0;
2535 	u32 mac_reg;
2536 	u16 oem_reg;
2537 
2538 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2539 
2540 	if (hw->mac.type < e1000_pchlan)
2541 		return ret_val;
2542 
2543 	ret_val = hw->phy.ops.acquire(hw);
2544 	if (ret_val)
2545 		return ret_val;
2546 
2547 	if (hw->mac.type == e1000_pchlan) {
2548 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2549 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2550 			goto release;
2551 	}
2552 
2553 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2554 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2555 		goto release;
2556 
2557 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2558 
2559 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2560 	if (ret_val)
2561 		goto release;
2562 
2563 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2564 
2565 	if (d0_state) {
2566 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2567 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2568 
2569 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2570 			oem_reg |= HV_OEM_BITS_LPLU;
2571 	} else {
2572 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2573 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2574 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2575 
2576 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2577 		    E1000_PHY_CTRL_NOND0A_LPLU))
2578 			oem_reg |= HV_OEM_BITS_LPLU;
2579 	}
2580 
2581 	/* Set Restart auto-neg to activate the bits */
2582 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2583 	    !hw->phy.ops.check_reset_block(hw))
2584 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2585 
2586 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2587 
2588 release:
2589 	hw->phy.ops.release(hw);
2590 
2591 	return ret_val;
2592 }
2593 
2594 
2595 /**
2596  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2597  *  @hw:   pointer to the HW structure
2598  **/
2599 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2600 {
2601 	s32 ret_val;
2602 	u16 data;
2603 
2604 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2605 
2606 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2607 	if (ret_val)
2608 		return ret_val;
2609 
2610 	data |= HV_KMRN_MDIO_SLOW;
2611 
2612 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2613 
2614 	return ret_val;
2615 }
2616 
2617 /**
2618  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2619  *  done after every PHY reset.
2620  **/
2621 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2622 {
2623 	s32 ret_val = E1000_SUCCESS;
2624 	u16 phy_data;
2625 
2626 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2627 
2628 	if (hw->mac.type != e1000_pchlan)
2629 		return E1000_SUCCESS;
2630 
2631 	/* Set MDIO slow mode before any other MDIO access */
2632 	if (hw->phy.type == e1000_phy_82577) {
2633 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2634 		if (ret_val)
2635 			return ret_val;
2636 	}
2637 
2638 	if (((hw->phy.type == e1000_phy_82577) &&
2639 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2640 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2641 		/* Disable generation of early preamble */
2642 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2643 		if (ret_val)
2644 			return ret_val;
2645 
2646 		/* Preamble tuning for SSC */
2647 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2648 						0xA204);
2649 		if (ret_val)
2650 			return ret_val;
2651 	}
2652 
2653 	if (hw->phy.type == e1000_phy_82578) {
2654 		/* Return registers to default by doing a soft reset then
2655 		 * writing 0x3140 to the control register.
2656 		 */
2657 		if (hw->phy.revision < 2) {
2658 			e1000_phy_sw_reset_generic(hw);
2659 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2660 							0x3140);
2661 		}
2662 	}
2663 
2664 	/* Select page 0 */
2665 	ret_val = hw->phy.ops.acquire(hw);
2666 	if (ret_val)
2667 		return ret_val;
2668 
2669 	hw->phy.addr = 1;
2670 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2671 	hw->phy.ops.release(hw);
2672 	if (ret_val)
2673 		return ret_val;
2674 
2675 	/* Configure the K1 Si workaround during phy reset assuming there is
2676 	 * link so that it disables K1 if link is in 1Gbps.
2677 	 */
2678 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2679 	if (ret_val)
2680 		return ret_val;
2681 
2682 	/* Workaround for link disconnects on a busy hub in half duplex */
2683 	ret_val = hw->phy.ops.acquire(hw);
2684 	if (ret_val)
2685 		return ret_val;
2686 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2687 	if (ret_val)
2688 		goto release;
2689 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2690 					       phy_data & 0x00FF);
2691 	if (ret_val)
2692 		goto release;
2693 
2694 	/* set MSE higher to enable link to stay up when noise is high */
2695 	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2696 release:
2697 	hw->phy.ops.release(hw);
2698 
2699 	return ret_val;
2700 }
2701 
2702 /**
2703  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2704  *  @hw:   pointer to the HW structure
2705  **/
2706 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2707 {
2708 	u32 mac_reg;
2709 	u16 i, phy_reg = 0;
2710 	s32 ret_val;
2711 
2712 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2713 
2714 	ret_val = hw->phy.ops.acquire(hw);
2715 	if (ret_val)
2716 		return;
2717 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2718 	if (ret_val)
2719 		goto release;
2720 
2721 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2722 	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2723 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2724 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2725 					   (u16)(mac_reg & 0xFFFF));
2726 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2727 					   (u16)((mac_reg >> 16) & 0xFFFF));
2728 
2729 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2730 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2731 					   (u16)(mac_reg & 0xFFFF));
2732 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2733 					   (u16)((mac_reg & E1000_RAH_AV)
2734 						 >> 16));
2735 	}
2736 
2737 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2738 
2739 release:
2740 	hw->phy.ops.release(hw);
2741 }
2742 
2743 static u32 e1000_calc_rx_da_crc(u8 mac[])
2744 {
2745 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2746 	u32 i, j, mask, crc;
2747 
2748 	DEBUGFUNC("e1000_calc_rx_da_crc");
2749 
2750 	crc = 0xffffffff;
2751 	for (i = 0; i < 6; i++) {
2752 		crc = crc ^ mac[i];
2753 		for (j = 8; j > 0; j--) {
2754 			mask = (crc & 1) * (-1);
2755 			crc = (crc >> 1) ^ (poly & mask);
2756 		}
2757 	}
2758 	return ~crc;
2759 }
2760 
2761 /**
2762  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2763  *  with 82579 PHY
2764  *  @hw: pointer to the HW structure
2765  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2766  **/
2767 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2768 {
2769 	s32 ret_val = E1000_SUCCESS;
2770 	u16 phy_reg, data;
2771 	u32 mac_reg;
2772 	u16 i;
2773 
2774 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2775 
2776 	if (hw->mac.type < e1000_pch2lan)
2777 		return E1000_SUCCESS;
2778 
2779 	/* disable Rx path while enabling/disabling workaround */
2780 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2781 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2782 					phy_reg | (1 << 14));
2783 	if (ret_val)
2784 		return ret_val;
2785 
2786 	if (enable) {
2787 		/* Write Rx addresses (rar_entry_count for RAL/H, and
2788 		 * SHRAL/H) and initial CRC values to the MAC
2789 		 */
2790 		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2791 			u8 mac_addr[ETH_ADDR_LEN] = {0};
2792 			u32 addr_high, addr_low;
2793 
2794 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2795 			if (!(addr_high & E1000_RAH_AV))
2796 				continue;
2797 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2798 			mac_addr[0] = (addr_low & 0xFF);
2799 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2800 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2801 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2802 			mac_addr[4] = (addr_high & 0xFF);
2803 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2804 
2805 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2806 					e1000_calc_rx_da_crc(mac_addr));
2807 		}
2808 
2809 		/* Write Rx addresses to the PHY */
2810 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2811 
2812 		/* Enable jumbo frame workaround in the MAC */
2813 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2814 		mac_reg &= ~(1 << 14);
2815 		mac_reg |= (7 << 15);
2816 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2817 
2818 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2819 		mac_reg |= E1000_RCTL_SECRC;
2820 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2821 
2822 		ret_val = e1000_read_kmrn_reg_generic(hw,
2823 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2824 						&data);
2825 		if (ret_val)
2826 			return ret_val;
2827 		ret_val = e1000_write_kmrn_reg_generic(hw,
2828 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2829 						data | (1 << 0));
2830 		if (ret_val)
2831 			return ret_val;
2832 		ret_val = e1000_read_kmrn_reg_generic(hw,
2833 						E1000_KMRNCTRLSTA_HD_CTRL,
2834 						&data);
2835 		if (ret_val)
2836 			return ret_val;
2837 		data &= ~(0xF << 8);
2838 		data |= (0xB << 8);
2839 		ret_val = e1000_write_kmrn_reg_generic(hw,
2840 						E1000_KMRNCTRLSTA_HD_CTRL,
2841 						data);
2842 		if (ret_val)
2843 			return ret_val;
2844 
2845 		/* Enable jumbo frame workaround in the PHY */
2846 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2847 		data &= ~(0x7F << 5);
2848 		data |= (0x37 << 5);
2849 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2850 		if (ret_val)
2851 			return ret_val;
2852 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2853 		data &= ~(1 << 13);
2854 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2855 		if (ret_val)
2856 			return ret_val;
2857 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2858 		data &= ~(0x3FF << 2);
2859 		data |= (E1000_TX_PTR_GAP << 2);
2860 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2861 		if (ret_val)
2862 			return ret_val;
2863 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2864 		if (ret_val)
2865 			return ret_val;
2866 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2867 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2868 						(1 << 10));
2869 		if (ret_val)
2870 			return ret_val;
2871 	} else {
2872 		/* Write MAC register values back to h/w defaults */
2873 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2874 		mac_reg &= ~(0xF << 14);
2875 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2876 
2877 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2878 		mac_reg &= ~E1000_RCTL_SECRC;
2879 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2880 
2881 		ret_val = e1000_read_kmrn_reg_generic(hw,
2882 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2883 						&data);
2884 		if (ret_val)
2885 			return ret_val;
2886 		ret_val = e1000_write_kmrn_reg_generic(hw,
2887 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2888 						data & ~(1 << 0));
2889 		if (ret_val)
2890 			return ret_val;
2891 		ret_val = e1000_read_kmrn_reg_generic(hw,
2892 						E1000_KMRNCTRLSTA_HD_CTRL,
2893 						&data);
2894 		if (ret_val)
2895 			return ret_val;
2896 		data &= ~(0xF << 8);
2897 		data |= (0xB << 8);
2898 		ret_val = e1000_write_kmrn_reg_generic(hw,
2899 						E1000_KMRNCTRLSTA_HD_CTRL,
2900 						data);
2901 		if (ret_val)
2902 			return ret_val;
2903 
2904 		/* Write PHY register values back to h/w defaults */
2905 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2906 		data &= ~(0x7F << 5);
2907 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2908 		if (ret_val)
2909 			return ret_val;
2910 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2911 		data |= (1 << 13);
2912 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2913 		if (ret_val)
2914 			return ret_val;
2915 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2916 		data &= ~(0x3FF << 2);
2917 		data |= (0x8 << 2);
2918 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2919 		if (ret_val)
2920 			return ret_val;
2921 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2922 		if (ret_val)
2923 			return ret_val;
2924 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2925 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2926 						~(1 << 10));
2927 		if (ret_val)
2928 			return ret_val;
2929 	}
2930 
2931 	/* re-enable Rx path after enabling/disabling workaround */
2932 	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2933 				     ~(1 << 14));
2934 }
2935 
2936 /**
2937  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2938  *  done after every PHY reset.
2939  **/
2940 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2941 {
2942 	s32 ret_val = E1000_SUCCESS;
2943 
2944 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2945 
2946 	if (hw->mac.type != e1000_pch2lan)
2947 		return E1000_SUCCESS;
2948 
2949 	/* Set MDIO slow mode before any other MDIO access */
2950 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2951 	if (ret_val)
2952 		return ret_val;
2953 
2954 	ret_val = hw->phy.ops.acquire(hw);
2955 	if (ret_val)
2956 		return ret_val;
2957 	/* set MSE higher to enable link to stay up when noise is high */
2958 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2959 	if (ret_val)
2960 		goto release;
2961 	/* drop link after 5 times MSE threshold was reached */
2962 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2963 release:
2964 	hw->phy.ops.release(hw);
2965 
2966 	return ret_val;
2967 }
2968 
2969 /**
2970  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2971  *  @hw:   pointer to the HW structure
2972  *
2973  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2974  *  Disable K1 for 1000 and 100 speeds
2975  **/
2976 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2977 {
2978 	s32 ret_val = E1000_SUCCESS;
2979 	u16 status_reg = 0;
2980 
2981 	DEBUGFUNC("e1000_k1_workaround_lv");
2982 
2983 	if (hw->mac.type != e1000_pch2lan)
2984 		return E1000_SUCCESS;
2985 
2986 	/* Set K1 beacon duration based on 10Mbs speed */
2987 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2988 	if (ret_val)
2989 		return ret_val;
2990 
2991 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2992 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2993 		if (status_reg &
2994 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2995 			u16 pm_phy_reg;
2996 
2997 			/* LV 1G/100 Packet drop issue wa  */
2998 			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2999 						       &pm_phy_reg);
3000 			if (ret_val)
3001 				return ret_val;
3002 			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
3003 			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
3004 							pm_phy_reg);
3005 			if (ret_val)
3006 				return ret_val;
3007 		} else {
3008 			u32 mac_reg;
3009 			mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
3010 			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
3011 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
3012 			E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
3013 		}
3014 	}
3015 
3016 	return ret_val;
3017 }
3018 
3019 /**
3020  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
3021  *  @hw:   pointer to the HW structure
3022  *  @gate: boolean set to TRUE to gate, FALSE to ungate
3023  *
3024  *  Gate/ungate the automatic PHY configuration via hardware; perform
3025  *  the configuration via software instead.
3026  **/
3027 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
3028 {
3029 	u32 extcnf_ctrl;
3030 
3031 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3032 
3033 	if (hw->mac.type < e1000_pch2lan)
3034 		return;
3035 
3036 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3037 
3038 	if (gate)
3039 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3040 	else
3041 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3042 
3043 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3044 }
3045 
3046 /**
3047  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
3048  *  @hw: pointer to the HW structure
3049  *
3050  *  Check the appropriate indication the MAC has finished configuring the
3051  *  PHY after a software reset.
3052  **/
3053 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3054 {
3055 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3056 
3057 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
3058 
3059 	/* Wait for basic configuration completes before proceeding */
3060 	do {
3061 		data = E1000_READ_REG(hw, E1000_STATUS);
3062 		data &= E1000_STATUS_LAN_INIT_DONE;
3063 		usec_delay(100);
3064 	} while ((!data) && --loop);
3065 
3066 	/* If basic configuration is incomplete before the above loop
3067 	 * count reaches 0, loading the configuration from NVM will
3068 	 * leave the PHY in a bad state possibly resulting in no link.
3069 	 */
3070 	if (loop == 0)
3071 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3072 
3073 	/* Clear the Init Done bit for the next init event */
3074 	data = E1000_READ_REG(hw, E1000_STATUS);
3075 	data &= ~E1000_STATUS_LAN_INIT_DONE;
3076 	E1000_WRITE_REG(hw, E1000_STATUS, data);
3077 }
3078 
3079 /**
3080  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3081  *  @hw: pointer to the HW structure
3082  **/
3083 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3084 {
3085 	s32 ret_val = E1000_SUCCESS;
3086 	u16 reg;
3087 
3088 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3089 
3090 	if (hw->phy.ops.check_reset_block(hw))
3091 		return E1000_SUCCESS;
3092 
3093 	/* Allow time for h/w to get to quiescent state after reset */
3094 	msec_delay(10);
3095 
3096 	/* Perform any necessary post-reset workarounds */
3097 	switch (hw->mac.type) {
3098 	case e1000_pchlan:
3099 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3100 		if (ret_val)
3101 			return ret_val;
3102 		break;
3103 	case e1000_pch2lan:
3104 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3105 		if (ret_val)
3106 			return ret_val;
3107 		break;
3108 	default:
3109 		break;
3110 	}
3111 
3112 	/* Clear the host wakeup bit after lcd reset */
3113 	if (hw->mac.type >= e1000_pchlan) {
3114 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3115 		reg &= ~BM_WUC_HOST_WU_BIT;
3116 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3117 	}
3118 
3119 	/* Configure the LCD with the extended configuration region in NVM */
3120 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
3121 	if (ret_val)
3122 		return ret_val;
3123 
3124 	/* Configure the LCD with the OEM bits in NVM */
3125 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3126 
3127 	if (hw->mac.type == e1000_pch2lan) {
3128 		/* Ungate automatic PHY configuration on non-managed 82579 */
3129 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
3130 		    E1000_ICH_FWSM_FW_VALID)) {
3131 			msec_delay(10);
3132 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3133 		}
3134 
3135 		/* Set EEE LPI Update Timer to 200usec */
3136 		ret_val = hw->phy.ops.acquire(hw);
3137 		if (ret_val)
3138 			return ret_val;
3139 		ret_val = e1000_write_emi_reg_locked(hw,
3140 						     I82579_LPI_UPDATE_TIMER,
3141 						     0x1387);
3142 		hw->phy.ops.release(hw);
3143 	}
3144 
3145 	return ret_val;
3146 }
3147 
3148 /**
3149  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3150  *  @hw: pointer to the HW structure
3151  *
3152  *  Resets the PHY
3153  *  This is a function pointer entry point called by drivers
3154  *  or other shared routines.
3155  **/
3156 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3157 {
3158 	s32 ret_val = E1000_SUCCESS;
3159 
3160 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3161 
3162 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
3163 	if ((hw->mac.type == e1000_pch2lan) &&
3164 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3165 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3166 
3167 	ret_val = e1000_phy_hw_reset_generic(hw);
3168 	if (ret_val)
3169 		return ret_val;
3170 
3171 	return e1000_post_phy_reset_ich8lan(hw);
3172 }
3173 
3174 /**
3175  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3176  *  @hw: pointer to the HW structure
3177  *  @active: TRUE to enable LPLU, FALSE to disable
3178  *
3179  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3180  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3181  *  the phy speed. This function will manually set the LPLU bit and restart
3182  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3183  *  since it configures the same bit.
3184  **/
3185 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3186 {
3187 	s32 ret_val;
3188 	u16 oem_reg;
3189 
3190 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
3191 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3192 	if (ret_val)
3193 		return ret_val;
3194 
3195 	if (active)
3196 		oem_reg |= HV_OEM_BITS_LPLU;
3197 	else
3198 		oem_reg &= ~HV_OEM_BITS_LPLU;
3199 
3200 	if (!hw->phy.ops.check_reset_block(hw))
3201 		oem_reg |= HV_OEM_BITS_RESTART_AN;
3202 
3203 	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3204 }
3205 
3206 /**
3207  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3208  *  @hw: pointer to the HW structure
3209  *  @active: TRUE to enable LPLU, FALSE to disable
3210  *
3211  *  Sets the LPLU D0 state according to the active flag.  When
3212  *  activating LPLU this function also disables smart speed
3213  *  and vice versa.  LPLU will not be activated unless the
3214  *  device autonegotiation advertisement meets standards of
3215  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3216  *  This is a function pointer entry point only called by
3217  *  PHY setup routines.
3218  **/
3219 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3220 {
3221 	struct e1000_phy_info *phy = &hw->phy;
3222 	u32 phy_ctrl;
3223 	s32 ret_val = E1000_SUCCESS;
3224 	u16 data;
3225 
3226 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3227 
3228 	if (phy->type == e1000_phy_ife)
3229 		return E1000_SUCCESS;
3230 
3231 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3232 
3233 	if (active) {
3234 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3235 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3236 
3237 		if (phy->type != e1000_phy_igp_3)
3238 			return E1000_SUCCESS;
3239 
3240 		/* Call gig speed drop workaround on LPLU before accessing
3241 		 * any PHY registers
3242 		 */
3243 		if (hw->mac.type == e1000_ich8lan)
3244 			e1000_gig_downshift_workaround_ich8lan(hw);
3245 
3246 		/* When LPLU is enabled, we should disable SmartSpeed */
3247 		ret_val = phy->ops.read_reg(hw,
3248 					    IGP01E1000_PHY_PORT_CONFIG,
3249 					    &data);
3250 		if (ret_val)
3251 			return ret_val;
3252 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3253 		ret_val = phy->ops.write_reg(hw,
3254 					     IGP01E1000_PHY_PORT_CONFIG,
3255 					     data);
3256 		if (ret_val)
3257 			return ret_val;
3258 	} else {
3259 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3260 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3261 
3262 		if (phy->type != e1000_phy_igp_3)
3263 			return E1000_SUCCESS;
3264 
3265 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3266 		 * during Dx states where the power conservation is most
3267 		 * important.  During driver activity we should enable
3268 		 * SmartSpeed, so performance is maintained.
3269 		 */
3270 		if (phy->smart_speed == e1000_smart_speed_on) {
3271 			ret_val = phy->ops.read_reg(hw,
3272 						    IGP01E1000_PHY_PORT_CONFIG,
3273 						    &data);
3274 			if (ret_val)
3275 				return ret_val;
3276 
3277 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3278 			ret_val = phy->ops.write_reg(hw,
3279 						     IGP01E1000_PHY_PORT_CONFIG,
3280 						     data);
3281 			if (ret_val)
3282 				return ret_val;
3283 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3284 			ret_val = phy->ops.read_reg(hw,
3285 						    IGP01E1000_PHY_PORT_CONFIG,
3286 						    &data);
3287 			if (ret_val)
3288 				return ret_val;
3289 
3290 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3291 			ret_val = phy->ops.write_reg(hw,
3292 						     IGP01E1000_PHY_PORT_CONFIG,
3293 						     data);
3294 			if (ret_val)
3295 				return ret_val;
3296 		}
3297 	}
3298 
3299 	return E1000_SUCCESS;
3300 }
3301 
3302 /**
3303  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3304  *  @hw: pointer to the HW structure
3305  *  @active: TRUE to enable LPLU, FALSE to disable
3306  *
3307  *  Sets the LPLU D3 state according to the active flag.  When
3308  *  activating LPLU this function also disables smart speed
3309  *  and vice versa.  LPLU will not be activated unless the
3310  *  device autonegotiation advertisement meets standards of
3311  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3312  *  This is a function pointer entry point only called by
3313  *  PHY setup routines.
3314  **/
3315 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3316 {
3317 	struct e1000_phy_info *phy = &hw->phy;
3318 	u32 phy_ctrl;
3319 	s32 ret_val = E1000_SUCCESS;
3320 	u16 data;
3321 
3322 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3323 
3324 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3325 
3326 	if (!active) {
3327 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3328 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3329 
3330 		if (phy->type != e1000_phy_igp_3)
3331 			return E1000_SUCCESS;
3332 
3333 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3334 		 * during Dx states where the power conservation is most
3335 		 * important.  During driver activity we should enable
3336 		 * SmartSpeed, so performance is maintained.
3337 		 */
3338 		if (phy->smart_speed == e1000_smart_speed_on) {
3339 			ret_val = phy->ops.read_reg(hw,
3340 						    IGP01E1000_PHY_PORT_CONFIG,
3341 						    &data);
3342 			if (ret_val)
3343 				return ret_val;
3344 
3345 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3346 			ret_val = phy->ops.write_reg(hw,
3347 						     IGP01E1000_PHY_PORT_CONFIG,
3348 						     data);
3349 			if (ret_val)
3350 				return ret_val;
3351 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3352 			ret_val = phy->ops.read_reg(hw,
3353 						    IGP01E1000_PHY_PORT_CONFIG,
3354 						    &data);
3355 			if (ret_val)
3356 				return ret_val;
3357 
3358 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3359 			ret_val = phy->ops.write_reg(hw,
3360 						     IGP01E1000_PHY_PORT_CONFIG,
3361 						     data);
3362 			if (ret_val)
3363 				return ret_val;
3364 		}
3365 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3366 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3367 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3368 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3369 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3370 
3371 		if (phy->type != e1000_phy_igp_3)
3372 			return E1000_SUCCESS;
3373 
3374 		/* Call gig speed drop workaround on LPLU before accessing
3375 		 * any PHY registers
3376 		 */
3377 		if (hw->mac.type == e1000_ich8lan)
3378 			e1000_gig_downshift_workaround_ich8lan(hw);
3379 
3380 		/* When LPLU is enabled, we should disable SmartSpeed */
3381 		ret_val = phy->ops.read_reg(hw,
3382 					    IGP01E1000_PHY_PORT_CONFIG,
3383 					    &data);
3384 		if (ret_val)
3385 			return ret_val;
3386 
3387 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3388 		ret_val = phy->ops.write_reg(hw,
3389 					     IGP01E1000_PHY_PORT_CONFIG,
3390 					     data);
3391 	}
3392 
3393 	return ret_val;
3394 }
3395 
3396 /**
3397  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3398  *  @hw: pointer to the HW structure
3399  *  @bank:  pointer to the variable that returns the active bank
3400  *
3401  *  Reads signature byte from the NVM using the flash access registers.
3402  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3403  **/
3404 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3405 {
3406 	u32 eecd;
3407 	struct e1000_nvm_info *nvm = &hw->nvm;
3408 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3409 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3410 	u32 nvm_dword = 0;
3411 	u8 sig_byte = 0;
3412 	s32 ret_val;
3413 
3414 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3415 
3416 	switch (hw->mac.type) {
3417 	case e1000_pch_spt:
3418 	case e1000_pch_cnp:
3419 	case e1000_pch_tgp:
3420 		bank1_offset = nvm->flash_bank_size;
3421 		act_offset = E1000_ICH_NVM_SIG_WORD;
3422 
3423 		/* set bank to 0 in case flash read fails */
3424 		*bank = 0;
3425 
3426 		/* Check bank 0 */
3427 		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3428 							 &nvm_dword);
3429 		if (ret_val)
3430 			return ret_val;
3431 		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3432 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3433 		    E1000_ICH_NVM_SIG_VALUE) {
3434 			*bank = 0;
3435 			return E1000_SUCCESS;
3436 		}
3437 
3438 		/* Check bank 1 */
3439 		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3440 							 bank1_offset,
3441 							 &nvm_dword);
3442 		if (ret_val)
3443 			return ret_val;
3444 		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3445 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3446 		    E1000_ICH_NVM_SIG_VALUE) {
3447 			*bank = 1;
3448 			return E1000_SUCCESS;
3449 		}
3450 
3451 		DEBUGOUT("ERROR: No valid NVM bank present\n");
3452 		return -E1000_ERR_NVM;
3453 	case e1000_ich8lan:
3454 	case e1000_ich9lan:
3455 		eecd = E1000_READ_REG(hw, E1000_EECD);
3456 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3457 		    E1000_EECD_SEC1VAL_VALID_MASK) {
3458 			if (eecd & E1000_EECD_SEC1VAL)
3459 				*bank = 1;
3460 			else
3461 				*bank = 0;
3462 
3463 			return E1000_SUCCESS;
3464 		}
3465 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3466 		/* fall-thru */
3467 	default:
3468 		/* set bank to 0 in case flash read fails */
3469 		*bank = 0;
3470 
3471 		/* Check bank 0 */
3472 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3473 							&sig_byte);
3474 		if (ret_val)
3475 			return ret_val;
3476 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3477 		    E1000_ICH_NVM_SIG_VALUE) {
3478 			*bank = 0;
3479 			return E1000_SUCCESS;
3480 		}
3481 
3482 		/* Check bank 1 */
3483 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3484 							bank1_offset,
3485 							&sig_byte);
3486 		if (ret_val)
3487 			return ret_val;
3488 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3489 		    E1000_ICH_NVM_SIG_VALUE) {
3490 			*bank = 1;
3491 			return E1000_SUCCESS;
3492 		}
3493 
3494 		DEBUGOUT("ERROR: No valid NVM bank present\n");
3495 		return -E1000_ERR_NVM;
3496 	}
3497 }
3498 
3499 /**
3500  *  e1000_read_nvm_spt - NVM access for SPT
3501  *  @hw: pointer to the HW structure
3502  *  @offset: The offset (in bytes) of the word(s) to read.
3503  *  @words: Size of data to read in words.
3504  *  @data: pointer to the word(s) to read at offset.
3505  *
3506  *  Reads a word(s) from the NVM
3507  **/
3508 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3509 			      u16 *data)
3510 {
3511 	struct e1000_nvm_info *nvm = &hw->nvm;
3512 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3513 	u32 act_offset;
3514 	s32 ret_val = E1000_SUCCESS;
3515 	u32 bank = 0;
3516 	u32 dword = 0;
3517 	u16 offset_to_read;
3518 	u16 i;
3519 
3520 	DEBUGFUNC("e1000_read_nvm_spt");
3521 
3522 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3523 	    (words == 0)) {
3524 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3525 		ret_val = -E1000_ERR_NVM;
3526 		goto out;
3527 	}
3528 
3529 	nvm->ops.acquire(hw);
3530 
3531 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3532 	if (ret_val != E1000_SUCCESS) {
3533 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3534 		bank = 0;
3535 	}
3536 
3537 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3538 	act_offset += offset;
3539 
3540 	ret_val = E1000_SUCCESS;
3541 
3542 	for (i = 0; i < words; i += 2) {
3543 		if (words - i == 1) {
3544 			if (dev_spec->shadow_ram[offset+i].modified) {
3545 				data[i] = dev_spec->shadow_ram[offset+i].value;
3546 			} else {
3547 				offset_to_read = act_offset + i -
3548 						 ((act_offset + i) % 2);
3549 				ret_val =
3550 				   e1000_read_flash_dword_ich8lan(hw,
3551 								 offset_to_read,
3552 								 &dword);
3553 				if (ret_val)
3554 					break;
3555 				if ((act_offset + i) % 2 == 0)
3556 					data[i] = (u16)(dword & 0xFFFF);
3557 				else
3558 					data[i] = (u16)((dword >> 16) & 0xFFFF);
3559 			}
3560 		} else {
3561 			offset_to_read = act_offset + i;
3562 			if (!(dev_spec->shadow_ram[offset+i].modified) ||
3563 			    !(dev_spec->shadow_ram[offset+i+1].modified)) {
3564 				ret_val =
3565 				   e1000_read_flash_dword_ich8lan(hw,
3566 								 offset_to_read,
3567 								 &dword);
3568 				if (ret_val)
3569 					break;
3570 			}
3571 			if (dev_spec->shadow_ram[offset+i].modified)
3572 				data[i] = dev_spec->shadow_ram[offset+i].value;
3573 			else
3574 				data[i] = (u16) (dword & 0xFFFF);
3575 			if (dev_spec->shadow_ram[offset+i].modified)
3576 				data[i+1] =
3577 				   dev_spec->shadow_ram[offset+i+1].value;
3578 			else
3579 				data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3580 		}
3581 	}
3582 
3583 	nvm->ops.release(hw);
3584 
3585 out:
3586 	if (ret_val)
3587 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3588 
3589 	return ret_val;
3590 }
3591 
3592 /**
3593  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3594  *  @hw: pointer to the HW structure
3595  *  @offset: The offset (in bytes) of the word(s) to read.
3596  *  @words: Size of data to read in words
3597  *  @data: Pointer to the word(s) to read at offset.
3598  *
3599  *  Reads a word(s) from the NVM using the flash access registers.
3600  **/
3601 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3602 				  u16 *data)
3603 {
3604 	struct e1000_nvm_info *nvm = &hw->nvm;
3605 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3606 	u32 act_offset;
3607 	s32 ret_val = E1000_SUCCESS;
3608 	u32 bank = 0;
3609 	u16 i, word;
3610 
3611 	DEBUGFUNC("e1000_read_nvm_ich8lan");
3612 
3613 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3614 	    (words == 0)) {
3615 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3616 		ret_val = -E1000_ERR_NVM;
3617 		goto out;
3618 	}
3619 
3620 	nvm->ops.acquire(hw);
3621 
3622 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3623 	if (ret_val != E1000_SUCCESS) {
3624 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3625 		bank = 0;
3626 	}
3627 
3628 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3629 	act_offset += offset;
3630 
3631 	ret_val = E1000_SUCCESS;
3632 	for (i = 0; i < words; i++) {
3633 		if (dev_spec->shadow_ram[offset+i].modified) {
3634 			data[i] = dev_spec->shadow_ram[offset+i].value;
3635 		} else {
3636 			ret_val = e1000_read_flash_word_ich8lan(hw,
3637 								act_offset + i,
3638 								&word);
3639 			if (ret_val)
3640 				break;
3641 			data[i] = word;
3642 		}
3643 	}
3644 
3645 	nvm->ops.release(hw);
3646 
3647 out:
3648 	if (ret_val)
3649 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3650 
3651 	return ret_val;
3652 }
3653 
3654 /**
3655  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3656  *  @hw: pointer to the HW structure
3657  *
3658  *  This function does initial flash setup so that a new read/write/erase cycle
3659  *  can be started.
3660  **/
3661 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3662 {
3663 	union ich8_hws_flash_status hsfsts;
3664 	s32 ret_val = -E1000_ERR_NVM;
3665 
3666 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3667 
3668 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3669 
3670 	/* Check if the flash descriptor is valid */
3671 	if (!hsfsts.hsf_status.fldesvalid) {
3672 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3673 		return -E1000_ERR_NVM;
3674 	}
3675 
3676 	/* Clear FCERR and DAEL in hw status by writing 1 */
3677 	hsfsts.hsf_status.flcerr = 1;
3678 	hsfsts.hsf_status.dael = 1;
3679 	if (hw->mac.type >= e1000_pch_spt)
3680 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3681 				      hsfsts.regval & 0xFFFF);
3682 	else
3683 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3684 
3685 	/* Either we should have a hardware SPI cycle in progress
3686 	 * bit to check against, in order to start a new cycle or
3687 	 * FDONE bit should be changed in the hardware so that it
3688 	 * is 1 after hardware reset, which can then be used as an
3689 	 * indication whether a cycle is in progress or has been
3690 	 * completed.
3691 	 */
3692 
3693 	if (!hsfsts.hsf_status.flcinprog) {
3694 		/* There is no cycle running at present,
3695 		 * so we can start a cycle.
3696 		 * Begin by setting Flash Cycle Done.
3697 		 */
3698 		hsfsts.hsf_status.flcdone = 1;
3699 		if (hw->mac.type >= e1000_pch_spt)
3700 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3701 					      hsfsts.regval & 0xFFFF);
3702 		else
3703 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3704 						hsfsts.regval);
3705 		ret_val = E1000_SUCCESS;
3706 	} else {
3707 		s32 i;
3708 
3709 		/* Otherwise poll for sometime so the current
3710 		 * cycle has a chance to end before giving up.
3711 		 */
3712 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3713 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3714 							      ICH_FLASH_HSFSTS);
3715 			if (!hsfsts.hsf_status.flcinprog) {
3716 				ret_val = E1000_SUCCESS;
3717 				break;
3718 			}
3719 			usec_delay(1);
3720 		}
3721 		if (ret_val == E1000_SUCCESS) {
3722 			/* Successful in waiting for previous cycle to timeout,
3723 			 * now set the Flash Cycle Done.
3724 			 */
3725 			hsfsts.hsf_status.flcdone = 1;
3726 			if (hw->mac.type >= e1000_pch_spt)
3727 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3728 						      hsfsts.regval & 0xFFFF);
3729 			else
3730 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3731 							hsfsts.regval);
3732 		} else {
3733 			DEBUGOUT("Flash controller busy, cannot get access\n");
3734 		}
3735 	}
3736 
3737 	return ret_val;
3738 }
3739 
3740 /**
3741  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3742  *  @hw: pointer to the HW structure
3743  *  @timeout: maximum time to wait for completion
3744  *
3745  *  This function starts a flash cycle and waits for its completion.
3746  **/
3747 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3748 {
3749 	union ich8_hws_flash_ctrl hsflctl;
3750 	union ich8_hws_flash_status hsfsts;
3751 	u32 i = 0;
3752 
3753 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3754 
3755 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3756 	if (hw->mac.type >= e1000_pch_spt)
3757 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3758 	else
3759 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3760 	hsflctl.hsf_ctrl.flcgo = 1;
3761 
3762 	if (hw->mac.type >= e1000_pch_spt)
3763 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3764 				      hsflctl.regval << 16);
3765 	else
3766 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3767 
3768 	/* wait till FDONE bit is set to 1 */
3769 	do {
3770 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3771 		if (hsfsts.hsf_status.flcdone)
3772 			break;
3773 		usec_delay(1);
3774 	} while (i++ < timeout);
3775 
3776 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3777 		return E1000_SUCCESS;
3778 
3779 	return -E1000_ERR_NVM;
3780 }
3781 
3782 /**
3783  *  e1000_read_flash_dword_ich8lan - Read dword from flash
3784  *  @hw: pointer to the HW structure
3785  *  @offset: offset to data location
3786  *  @data: pointer to the location for storing the data
3787  *
3788  *  Reads the flash dword at offset into data.  Offset is converted
3789  *  to bytes before read.
3790  **/
3791 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3792 					  u32 *data)
3793 {
3794 	DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3795 
3796 	if (!data)
3797 		return -E1000_ERR_NVM;
3798 
3799 	/* Must convert word offset into bytes. */
3800 	offset <<= 1;
3801 
3802 	return e1000_read_flash_data32_ich8lan(hw, offset, data);
3803 }
3804 
3805 /**
3806  *  e1000_read_flash_word_ich8lan - Read word from flash
3807  *  @hw: pointer to the HW structure
3808  *  @offset: offset to data location
3809  *  @data: pointer to the location for storing the data
3810  *
3811  *  Reads the flash word at offset into data.  Offset is converted
3812  *  to bytes before read.
3813  **/
3814 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3815 					 u16 *data)
3816 {
3817 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3818 
3819 	if (!data)
3820 		return -E1000_ERR_NVM;
3821 
3822 	/* Must convert offset into bytes. */
3823 	offset <<= 1;
3824 
3825 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3826 }
3827 
3828 /**
3829  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3830  *  @hw: pointer to the HW structure
3831  *  @offset: The offset of the byte to read.
3832  *  @data: Pointer to a byte to store the value read.
3833  *
3834  *  Reads a single byte from the NVM using the flash access registers.
3835  **/
3836 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3837 					 u8 *data)
3838 {
3839 	s32 ret_val;
3840 	u16 word = 0;
3841 
3842 	/* In SPT, only 32 bits access is supported,
3843 	 * so this function should not be called.
3844 	 */
3845 	if (hw->mac.type >= e1000_pch_spt)
3846 		return -E1000_ERR_NVM;
3847 	else
3848 		ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3849 
3850 	if (ret_val)
3851 		return ret_val;
3852 
3853 	*data = (u8)word;
3854 
3855 	return E1000_SUCCESS;
3856 }
3857 
3858 /**
3859  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3860  *  @hw: pointer to the HW structure
3861  *  @offset: The offset (in bytes) of the byte or word to read.
3862  *  @size: Size of data to read, 1=byte 2=word
3863  *  @data: Pointer to the word to store the value read.
3864  *
3865  *  Reads a byte or word from the NVM using the flash access registers.
3866  **/
3867 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3868 					 u8 size, u16 *data)
3869 {
3870 	union ich8_hws_flash_status hsfsts;
3871 	union ich8_hws_flash_ctrl hsflctl;
3872 	u32 flash_linear_addr;
3873 	u32 flash_data = 0;
3874 	s32 ret_val = -E1000_ERR_NVM;
3875 	u8 count = 0;
3876 
3877 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3878 
3879 	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3880 		return -E1000_ERR_NVM;
3881 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3882 			     hw->nvm.flash_base_addr);
3883 
3884 	do {
3885 		usec_delay(1);
3886 		/* Steps */
3887 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3888 		if (ret_val != E1000_SUCCESS)
3889 			break;
3890 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3891 
3892 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3893 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3894 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3895 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3896 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3897 
3898 		ret_val = e1000_flash_cycle_ich8lan(hw,
3899 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3900 
3901 		/* Check if FCERR is set to 1, if set to 1, clear it
3902 		 * and try the whole sequence a few more times, else
3903 		 * read in (shift in) the Flash Data0, the order is
3904 		 * least significant byte first msb to lsb
3905 		 */
3906 		if (ret_val == E1000_SUCCESS) {
3907 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3908 			if (size == 1)
3909 				*data = (u8)(flash_data & 0x000000FF);
3910 			else if (size == 2)
3911 				*data = (u16)(flash_data & 0x0000FFFF);
3912 			break;
3913 		} else {
3914 			/* If we've gotten here, then things are probably
3915 			 * completely hosed, but if the error condition is
3916 			 * detected, it won't hurt to give it another try...
3917 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3918 			 */
3919 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3920 							      ICH_FLASH_HSFSTS);
3921 			if (hsfsts.hsf_status.flcerr) {
3922 				/* Repeat for some time before giving up. */
3923 				continue;
3924 			} else if (!hsfsts.hsf_status.flcdone) {
3925 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3926 				break;
3927 			}
3928 		}
3929 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3930 
3931 	return ret_val;
3932 }
3933 
3934 /**
3935  *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3936  *  @hw: pointer to the HW structure
3937  *  @offset: The offset (in bytes) of the dword to read.
3938  *  @data: Pointer to the dword to store the value read.
3939  *
3940  *  Reads a byte or word from the NVM using the flash access registers.
3941  **/
3942 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3943 					   u32 *data)
3944 {
3945 	union ich8_hws_flash_status hsfsts;
3946 	union ich8_hws_flash_ctrl hsflctl;
3947 	u32 flash_linear_addr;
3948 	s32 ret_val = -E1000_ERR_NVM;
3949 	u8 count = 0;
3950 
3951 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3952 
3953 	if (offset > ICH_FLASH_LINEAR_ADDR_MASK && hw->mac.type < e1000_pch_spt)
3954 		return -E1000_ERR_NVM;
3955 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3956 			     hw->nvm.flash_base_addr);
3957 
3958 	do {
3959 		usec_delay(1);
3960 		/* Steps */
3961 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3962 		if (ret_val != E1000_SUCCESS)
3963 			break;
3964 		/* In SPT, This register is in Lan memory space, not flash.
3965 		 * Therefore, only 32 bit access is supported
3966 		 */
3967 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3968 
3969 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3970 		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3971 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3972 		/* In SPT, This register is in Lan memory space, not flash.
3973 		 * Therefore, only 32 bit access is supported
3974 		 */
3975 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3976 				      (u32)hsflctl.regval << 16);
3977 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3978 
3979 		ret_val = e1000_flash_cycle_ich8lan(hw,
3980 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3981 
3982 		/* Check if FCERR is set to 1, if set to 1, clear it
3983 		 * and try the whole sequence a few more times, else
3984 		 * read in (shift in) the Flash Data0, the order is
3985 		 * least significant byte first msb to lsb
3986 		 */
3987 		if (ret_val == E1000_SUCCESS) {
3988 			*data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3989 			break;
3990 		} else {
3991 			/* If we've gotten here, then things are probably
3992 			 * completely hosed, but if the error condition is
3993 			 * detected, it won't hurt to give it another try...
3994 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3995 			 */
3996 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3997 							      ICH_FLASH_HSFSTS);
3998 			if (hsfsts.hsf_status.flcerr) {
3999 				/* Repeat for some time before giving up. */
4000 				continue;
4001 			} else if (!hsfsts.hsf_status.flcdone) {
4002 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4003 				break;
4004 			}
4005 		}
4006 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4007 
4008 	return ret_val;
4009 }
4010 
4011 /**
4012  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
4013  *  @hw: pointer to the HW structure
4014  *  @offset: The offset (in bytes) of the word(s) to write.
4015  *  @words: Size of data to write in words
4016  *  @data: Pointer to the word(s) to write at offset.
4017  *
4018  *  Writes a byte or word to the NVM using the flash access registers.
4019  **/
4020 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
4021 				   u16 *data)
4022 {
4023 	struct e1000_nvm_info *nvm = &hw->nvm;
4024 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4025 	u16 i;
4026 
4027 	DEBUGFUNC("e1000_write_nvm_ich8lan");
4028 
4029 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
4030 	    (words == 0)) {
4031 		DEBUGOUT("nvm parameter(s) out of bounds\n");
4032 		return -E1000_ERR_NVM;
4033 	}
4034 
4035 	nvm->ops.acquire(hw);
4036 
4037 	for (i = 0; i < words; i++) {
4038 		dev_spec->shadow_ram[offset+i].modified = TRUE;
4039 		dev_spec->shadow_ram[offset+i].value = data[i];
4040 	}
4041 
4042 	nvm->ops.release(hw);
4043 
4044 	return E1000_SUCCESS;
4045 }
4046 
4047 /**
4048  *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
4049  *  @hw: pointer to the HW structure
4050  *
4051  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4052  *  which writes the checksum to the shadow ram.  The changes in the shadow
4053  *  ram are then committed to the EEPROM by processing each bank at a time
4054  *  checking for the modified bit and writing only the pending changes.
4055  *  After a successful commit, the shadow ram is cleared and is ready for
4056  *  future writes.
4057  **/
4058 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4059 {
4060 	struct e1000_nvm_info *nvm = &hw->nvm;
4061 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4062 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4063 	s32 ret_val;
4064 	u32 dword = 0;
4065 
4066 	DEBUGFUNC("e1000_update_nvm_checksum_spt");
4067 
4068 	ret_val = e1000_update_nvm_checksum_generic(hw);
4069 	if (ret_val)
4070 		goto out;
4071 
4072 	if (nvm->type != e1000_nvm_flash_sw)
4073 		goto out;
4074 
4075 	nvm->ops.acquire(hw);
4076 
4077 	/* We're writing to the opposite bank so if we're on bank 1,
4078 	 * write to bank 0 etc.  We also need to erase the segment that
4079 	 * is going to be written
4080 	 */
4081 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4082 	if (ret_val != E1000_SUCCESS) {
4083 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4084 		bank = 0;
4085 	}
4086 
4087 	if (bank == 0) {
4088 		new_bank_offset = nvm->flash_bank_size;
4089 		old_bank_offset = 0;
4090 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4091 		if (ret_val)
4092 			goto release;
4093 	} else {
4094 		old_bank_offset = nvm->flash_bank_size;
4095 		new_bank_offset = 0;
4096 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4097 		if (ret_val)
4098 			goto release;
4099 	}
4100 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4101 		/* Determine whether to write the value stored
4102 		 * in the other NVM bank or a modified value stored
4103 		 * in the shadow RAM
4104 		 */
4105 		ret_val = e1000_read_flash_dword_ich8lan(hw,
4106 							 i + old_bank_offset,
4107 							 &dword);
4108 
4109 		if (dev_spec->shadow_ram[i].modified) {
4110 			dword &= 0xffff0000;
4111 			dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4112 		}
4113 		if (dev_spec->shadow_ram[i + 1].modified) {
4114 			dword &= 0x0000ffff;
4115 			dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4116 				  << 16);
4117 		}
4118 		if (ret_val)
4119 			break;
4120 
4121 		/* If the word is 0x13, then make sure the signature bits
4122 		 * (15:14) are 11b until the commit has completed.
4123 		 * This will allow us to write 10b which indicates the
4124 		 * signature is valid.  We want to do this after the write
4125 		 * has completed so that we don't mark the segment valid
4126 		 * while the write is still in progress
4127 		 */
4128 		if (i == E1000_ICH_NVM_SIG_WORD - 1)
4129 			dword |= E1000_ICH_NVM_SIG_MASK << 16;
4130 
4131 		/* Convert offset to bytes. */
4132 		act_offset = (i + new_bank_offset) << 1;
4133 
4134 		usec_delay(100);
4135 
4136 		/* Write the data to the new bank. Offset in words*/
4137 		act_offset = i + new_bank_offset;
4138 		ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4139 								dword);
4140 		if (ret_val)
4141 			break;
4142 	 }
4143 
4144 	/* Don't bother writing the segment valid bits if sector
4145 	 * programming failed.
4146 	 */
4147 	if (ret_val) {
4148 		DEBUGOUT("Flash commit failed.\n");
4149 		goto release;
4150 	}
4151 
4152 	/* Finally validate the new segment by setting bit 15:14
4153 	 * to 10b in word 0x13 , this can be done without an
4154 	 * erase as well since these bits are 11 to start with
4155 	 * and we need to change bit 14 to 0b
4156 	 */
4157 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4158 
4159 	/*offset in words but we read dword*/
4160 	--act_offset;
4161 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4162 
4163 	if (ret_val)
4164 		goto release;
4165 
4166 	dword &= 0xBFFFFFFF;
4167 	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4168 
4169 	if (ret_val)
4170 		goto release;
4171 
4172 	/* And invalidate the previously valid segment by setting
4173 	 * its signature word (0x13) high_byte to 0b. This can be
4174 	 * done without an erase because flash erase sets all bits
4175 	 * to 1's. We can write 1's to 0's without an erase
4176 	 */
4177 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4178 
4179 	/* offset in words but we read dword*/
4180 	act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4181 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4182 
4183 	if (ret_val)
4184 		goto release;
4185 
4186 	dword &= 0x00FFFFFF;
4187 	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4188 
4189 	if (ret_val)
4190 		goto release;
4191 
4192 	/* Great!  Everything worked, we can now clear the cached entries. */
4193 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4194 		dev_spec->shadow_ram[i].modified = FALSE;
4195 		dev_spec->shadow_ram[i].value = 0xFFFF;
4196 	}
4197 
4198 release:
4199 	nvm->ops.release(hw);
4200 
4201 	/* Reload the EEPROM, or else modifications will not appear
4202 	 * until after the next adapter reset.
4203 	 */
4204 	if (!ret_val) {
4205 		nvm->ops.reload(hw);
4206 		msec_delay(10);
4207 	}
4208 
4209 out:
4210 	if (ret_val)
4211 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4212 
4213 	return ret_val;
4214 }
4215 
4216 /**
4217  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4218  *  @hw: pointer to the HW structure
4219  *
4220  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4221  *  which writes the checksum to the shadow ram.  The changes in the shadow
4222  *  ram are then committed to the EEPROM by processing each bank at a time
4223  *  checking for the modified bit and writing only the pending changes.
4224  *  After a successful commit, the shadow ram is cleared and is ready for
4225  *  future writes.
4226  **/
4227 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4228 {
4229 	struct e1000_nvm_info *nvm = &hw->nvm;
4230 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4231 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4232 	s32 ret_val;
4233 	u16 data = 0;
4234 
4235 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4236 
4237 	ret_val = e1000_update_nvm_checksum_generic(hw);
4238 	if (ret_val)
4239 		goto out;
4240 
4241 	if (nvm->type != e1000_nvm_flash_sw)
4242 		goto out;
4243 
4244 	nvm->ops.acquire(hw);
4245 
4246 	/* We're writing to the opposite bank so if we're on bank 1,
4247 	 * write to bank 0 etc.  We also need to erase the segment that
4248 	 * is going to be written
4249 	 */
4250 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4251 	if (ret_val != E1000_SUCCESS) {
4252 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4253 		bank = 0;
4254 	}
4255 
4256 	if (bank == 0) {
4257 		new_bank_offset = nvm->flash_bank_size;
4258 		old_bank_offset = 0;
4259 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4260 		if (ret_val)
4261 			goto release;
4262 	} else {
4263 		old_bank_offset = nvm->flash_bank_size;
4264 		new_bank_offset = 0;
4265 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4266 		if (ret_val)
4267 			goto release;
4268 	}
4269 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4270 		if (dev_spec->shadow_ram[i].modified) {
4271 			data = dev_spec->shadow_ram[i].value;
4272 		} else {
4273 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
4274 								old_bank_offset,
4275 								&data);
4276 			if (ret_val)
4277 				break;
4278 		}
4279 		/* If the word is 0x13, then make sure the signature bits
4280 		 * (15:14) are 11b until the commit has completed.
4281 		 * This will allow us to write 10b which indicates the
4282 		 * signature is valid.  We want to do this after the write
4283 		 * has completed so that we don't mark the segment valid
4284 		 * while the write is still in progress
4285 		 */
4286 		if (i == E1000_ICH_NVM_SIG_WORD)
4287 			data |= E1000_ICH_NVM_SIG_MASK;
4288 
4289 		/* Convert offset to bytes. */
4290 		act_offset = (i + new_bank_offset) << 1;
4291 
4292 		usec_delay(100);
4293 
4294 		/* Write the bytes to the new bank. */
4295 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4296 							       act_offset,
4297 							       (u8)data);
4298 		if (ret_val)
4299 			break;
4300 
4301 		usec_delay(100);
4302 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4303 							  act_offset + 1,
4304 							  (u8)(data >> 8));
4305 		if (ret_val)
4306 			break;
4307 	 }
4308 
4309 	/* Don't bother writing the segment valid bits if sector
4310 	 * programming failed.
4311 	 */
4312 	if (ret_val) {
4313 		DEBUGOUT("Flash commit failed.\n");
4314 		goto release;
4315 	}
4316 
4317 	/* Finally validate the new segment by setting bit 15:14
4318 	 * to 10b in word 0x13 , this can be done without an
4319 	 * erase as well since these bits are 11 to start with
4320 	 * and we need to change bit 14 to 0b
4321 	 */
4322 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4323 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4324 	if (ret_val)
4325 		goto release;
4326 
4327 	data &= 0xBFFF;
4328 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4329 						       (u8)(data >> 8));
4330 	if (ret_val)
4331 		goto release;
4332 
4333 	/* And invalidate the previously valid segment by setting
4334 	 * its signature word (0x13) high_byte to 0b. This can be
4335 	 * done without an erase because flash erase sets all bits
4336 	 * to 1's. We can write 1's to 0's without an erase
4337 	 */
4338 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4339 
4340 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4341 
4342 	if (ret_val)
4343 		goto release;
4344 
4345 	/* Great!  Everything worked, we can now clear the cached entries. */
4346 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4347 		dev_spec->shadow_ram[i].modified = FALSE;
4348 		dev_spec->shadow_ram[i].value = 0xFFFF;
4349 	}
4350 
4351 release:
4352 	nvm->ops.release(hw);
4353 
4354 	/* Reload the EEPROM, or else modifications will not appear
4355 	 * until after the next adapter reset.
4356 	 */
4357 	if (!ret_val) {
4358 		nvm->ops.reload(hw);
4359 		msec_delay(10);
4360 	}
4361 
4362 out:
4363 	if (ret_val)
4364 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4365 
4366 	return ret_val;
4367 }
4368 
4369 /**
4370  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4371  *  @hw: pointer to the HW structure
4372  *
4373  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4374  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4375  *  calculated, in which case we need to calculate the checksum and set bit 6.
4376  **/
4377 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4378 {
4379 	s32 ret_val;
4380 	u16 data;
4381 	u16 word;
4382 	u16 valid_csum_mask;
4383 
4384 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4385 
4386 	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4387 	 * the checksum needs to be fixed.  This bit is an indication that
4388 	 * the NVM was prepared by OEM software and did not calculate
4389 	 * the checksum...a likely scenario.
4390 	 */
4391 	switch (hw->mac.type) {
4392 	case e1000_pch_lpt:
4393 	case e1000_pch_spt:
4394 	case e1000_pch_cnp:
4395 	case e1000_pch_tgp:
4396 		word = NVM_COMPAT;
4397 		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4398 		break;
4399 	default:
4400 		word = NVM_FUTURE_INIT_WORD1;
4401 		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4402 		break;
4403 	}
4404 
4405 	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4406 	if (ret_val)
4407 		return ret_val;
4408 
4409 	if (!(data & valid_csum_mask)) {
4410 		data |= valid_csum_mask;
4411 		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4412 		if (ret_val)
4413 			return ret_val;
4414 		ret_val = hw->nvm.ops.update(hw);
4415 		if (ret_val)
4416 			return ret_val;
4417 	}
4418 
4419 	return e1000_validate_nvm_checksum_generic(hw);
4420 }
4421 
4422 /**
4423  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4424  *  @hw: pointer to the HW structure
4425  *  @offset: The offset (in bytes) of the byte/word to read.
4426  *  @size: Size of data to read, 1=byte 2=word
4427  *  @data: The byte(s) to write to the NVM.
4428  *
4429  *  Writes one/two bytes to the NVM using the flash access registers.
4430  **/
4431 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4432 					  u8 size, u16 data)
4433 {
4434 	union ich8_hws_flash_status hsfsts;
4435 	union ich8_hws_flash_ctrl hsflctl;
4436 	u32 flash_linear_addr;
4437 	u32 flash_data = 0;
4438 	s32 ret_val;
4439 	u8 count = 0;
4440 
4441 	DEBUGFUNC("e1000_write_ich8_data");
4442 
4443 	if (hw->mac.type >= e1000_pch_spt) {
4444 		if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4445 			return -E1000_ERR_NVM;
4446 	} else {
4447 		if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4448 			return -E1000_ERR_NVM;
4449 	}
4450 
4451 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4452 			     hw->nvm.flash_base_addr);
4453 
4454 	do {
4455 		usec_delay(1);
4456 		/* Steps */
4457 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4458 		if (ret_val != E1000_SUCCESS)
4459 			break;
4460 		/* In SPT, This register is in Lan memory space, not
4461 		 * flash.  Therefore, only 32 bit access is supported
4462 		 */
4463 		if (hw->mac.type >= e1000_pch_spt)
4464 			hsflctl.regval =
4465 			    E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4466 		else
4467 			hsflctl.regval =
4468 			    E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4469 
4470 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4471 		hsflctl.hsf_ctrl.fldbcount = size - 1;
4472 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4473 		/* In SPT, This register is in Lan memory space,
4474 		 * not flash.  Therefore, only 32 bit access is
4475 		 * supported
4476 		 */
4477 		if (hw->mac.type >= e1000_pch_spt)
4478 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4479 					      hsflctl.regval << 16);
4480 		else
4481 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4482 						hsflctl.regval);
4483 
4484 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4485 
4486 		if (size == 1)
4487 			flash_data = (u32)data & 0x00FF;
4488 		else
4489 			flash_data = (u32)data;
4490 
4491 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4492 
4493 		/* check if FCERR is set to 1 , if set to 1, clear it
4494 		 * and try the whole sequence a few more times else done
4495 		 */
4496 		ret_val =
4497 		    e1000_flash_cycle_ich8lan(hw,
4498 					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4499 		if (ret_val == E1000_SUCCESS)
4500 			break;
4501 
4502 		/* If we're here, then things are most likely
4503 		 * completely hosed, but if the error condition
4504 		 * is detected, it won't hurt to give it another
4505 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4506 		 */
4507 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4508 		if (hsfsts.hsf_status.flcerr)
4509 			/* Repeat for some time before giving up. */
4510 			continue;
4511 		if (!hsfsts.hsf_status.flcdone) {
4512 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4513 			break;
4514 		}
4515 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4516 
4517 	return ret_val;
4518 }
4519 
4520 /**
4521 *  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4522 *  @hw: pointer to the HW structure
4523 *  @offset: The offset (in bytes) of the dwords to read.
4524 *  @data: The 4 bytes to write to the NVM.
4525 *
4526 *  Writes one/two/four bytes to the NVM using the flash access registers.
4527 **/
4528 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4529 					    u32 data)
4530 {
4531 	union ich8_hws_flash_status hsfsts;
4532 	union ich8_hws_flash_ctrl hsflctl;
4533 	u32 flash_linear_addr;
4534 	s32 ret_val;
4535 	u8 count = 0;
4536 
4537 	DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4538 
4539 	if (hw->mac.type >= e1000_pch_spt) {
4540 		if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4541 			return -E1000_ERR_NVM;
4542 	}
4543 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4544 			     hw->nvm.flash_base_addr);
4545 	do {
4546 		usec_delay(1);
4547 		/* Steps */
4548 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4549 		if (ret_val != E1000_SUCCESS)
4550 			break;
4551 
4552 		/* In SPT, This register is in Lan memory space, not
4553 		 * flash.  Therefore, only 32 bit access is supported
4554 		 */
4555 		if (hw->mac.type >= e1000_pch_spt)
4556 			hsflctl.regval = E1000_READ_FLASH_REG(hw,
4557 							      ICH_FLASH_HSFSTS)
4558 					 >> 16;
4559 		else
4560 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4561 							      ICH_FLASH_HSFCTL);
4562 
4563 		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4564 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4565 
4566 		/* In SPT, This register is in Lan memory space,
4567 		 * not flash.  Therefore, only 32 bit access is
4568 		 * supported
4569 		 */
4570 		if (hw->mac.type >= e1000_pch_spt)
4571 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4572 					      hsflctl.regval << 16);
4573 		else
4574 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4575 						hsflctl.regval);
4576 
4577 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4578 
4579 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4580 
4581 		/* check if FCERR is set to 1 , if set to 1, clear it
4582 		 * and try the whole sequence a few more times else done
4583 		 */
4584 		ret_val = e1000_flash_cycle_ich8lan(hw,
4585 					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4586 
4587 		if (ret_val == E1000_SUCCESS)
4588 			break;
4589 
4590 		/* If we're here, then things are most likely
4591 		 * completely hosed, but if the error condition
4592 		 * is detected, it won't hurt to give it another
4593 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4594 		 */
4595 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4596 
4597 		if (hsfsts.hsf_status.flcerr)
4598 			/* Repeat for some time before giving up. */
4599 			continue;
4600 		if (!hsfsts.hsf_status.flcdone) {
4601 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4602 			break;
4603 		}
4604 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4605 
4606 	return ret_val;
4607 }
4608 
4609 /**
4610  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4611  *  @hw: pointer to the HW structure
4612  *  @offset: The index of the byte to read.
4613  *  @data: The byte to write to the NVM.
4614  *
4615  *  Writes a single byte to the NVM using the flash access registers.
4616  **/
4617 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4618 					  u8 data)
4619 {
4620 	u16 word = (u16)data;
4621 
4622 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4623 
4624 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4625 }
4626 
4627 /**
4628 *  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4629 *  @hw: pointer to the HW structure
4630 *  @offset: The offset of the word to write.
4631 *  @dword: The dword to write to the NVM.
4632 *
4633 *  Writes a single dword to the NVM using the flash access registers.
4634 *  Goes through a retry algorithm before giving up.
4635 **/
4636 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4637 						 u32 offset, u32 dword)
4638 {
4639 	s32 ret_val;
4640 	u16 program_retries;
4641 
4642 	DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4643 
4644 	/* Must convert word offset into bytes. */
4645 	offset <<= 1;
4646 
4647 	ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4648 
4649 	if (!ret_val)
4650 		return ret_val;
4651 	for (program_retries = 0; program_retries < 100; program_retries++) {
4652 		DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4653 		usec_delay(100);
4654 		ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4655 		if (ret_val == E1000_SUCCESS)
4656 			break;
4657 	}
4658 	if (program_retries == 100)
4659 		return -E1000_ERR_NVM;
4660 
4661 	return E1000_SUCCESS;
4662 }
4663 
4664 /**
4665  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4666  *  @hw: pointer to the HW structure
4667  *  @offset: The offset of the byte to write.
4668  *  @byte: The byte to write to the NVM.
4669  *
4670  *  Writes a single byte to the NVM using the flash access registers.
4671  *  Goes through a retry algorithm before giving up.
4672  **/
4673 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4674 						u32 offset, u8 byte)
4675 {
4676 	s32 ret_val;
4677 	u16 program_retries;
4678 
4679 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4680 
4681 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4682 	if (!ret_val)
4683 		return ret_val;
4684 
4685 	for (program_retries = 0; program_retries < 100; program_retries++) {
4686 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4687 		usec_delay(100);
4688 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4689 		if (ret_val == E1000_SUCCESS)
4690 			break;
4691 	}
4692 	if (program_retries == 100)
4693 		return -E1000_ERR_NVM;
4694 
4695 	return E1000_SUCCESS;
4696 }
4697 
4698 /**
4699  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4700  *  @hw: pointer to the HW structure
4701  *  @bank: 0 for first bank, 1 for second bank, etc.
4702  *
4703  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4704  *  bank N is 4096 * N + flash_reg_addr.
4705  **/
4706 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4707 {
4708 	struct e1000_nvm_info *nvm = &hw->nvm;
4709 	union ich8_hws_flash_status hsfsts;
4710 	union ich8_hws_flash_ctrl hsflctl;
4711 	u32 flash_linear_addr;
4712 	/* bank size is in 16bit words - adjust to bytes */
4713 	u32 flash_bank_size = nvm->flash_bank_size * 2;
4714 	s32 ret_val;
4715 	s32 count = 0;
4716 	s32 j, iteration, sector_size;
4717 
4718 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4719 
4720 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4721 
4722 	/* Determine HW Sector size: Read BERASE bits of hw flash status
4723 	 * register
4724 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4725 	 *     consecutive sectors.  The start index for the nth Hw sector
4726 	 *     can be calculated as = bank * 4096 + n * 256
4727 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4728 	 *     The start index for the nth Hw sector can be calculated
4729 	 *     as = bank * 4096
4730 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4731 	 *     (ich9 only, otherwise error condition)
4732 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4733 	 */
4734 	switch (hsfsts.hsf_status.berasesz) {
4735 	case 0:
4736 		/* Hw sector size 256 */
4737 		sector_size = ICH_FLASH_SEG_SIZE_256;
4738 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4739 		break;
4740 	case 1:
4741 		sector_size = ICH_FLASH_SEG_SIZE_4K;
4742 		iteration = 1;
4743 		break;
4744 	case 2:
4745 		sector_size = ICH_FLASH_SEG_SIZE_8K;
4746 		iteration = 1;
4747 		break;
4748 	case 3:
4749 		sector_size = ICH_FLASH_SEG_SIZE_64K;
4750 		iteration = 1;
4751 		break;
4752 	default:
4753 		return -E1000_ERR_NVM;
4754 	}
4755 
4756 	/* Start with the base address, then add the sector offset. */
4757 	flash_linear_addr = hw->nvm.flash_base_addr;
4758 	flash_linear_addr += (bank) ? flash_bank_size : 0;
4759 
4760 	for (j = 0; j < iteration; j++) {
4761 		do {
4762 			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4763 
4764 			/* Steps */
4765 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
4766 			if (ret_val)
4767 				return ret_val;
4768 
4769 			/* Write a value 11 (block Erase) in Flash
4770 			 * Cycle field in hw flash control
4771 			 */
4772 			if (hw->mac.type >= e1000_pch_spt)
4773 				hsflctl.regval =
4774 				    E1000_READ_FLASH_REG(hw,
4775 							 ICH_FLASH_HSFSTS)>>16;
4776 			else
4777 				hsflctl.regval =
4778 				    E1000_READ_FLASH_REG16(hw,
4779 							   ICH_FLASH_HSFCTL);
4780 
4781 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4782 			if (hw->mac.type >= e1000_pch_spt)
4783 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4784 						      hsflctl.regval << 16);
4785 			else
4786 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4787 							hsflctl.regval);
4788 
4789 			/* Write the last 24 bits of an index within the
4790 			 * block into Flash Linear address field in Flash
4791 			 * Address.
4792 			 */
4793 			flash_linear_addr += (j * sector_size);
4794 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4795 					      flash_linear_addr);
4796 
4797 			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4798 			if (ret_val == E1000_SUCCESS)
4799 				break;
4800 
4801 			/* Check if FCERR is set to 1.  If 1,
4802 			 * clear it and try the whole sequence
4803 			 * a few more times else Done
4804 			 */
4805 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4806 						      ICH_FLASH_HSFSTS);
4807 			if (hsfsts.hsf_status.flcerr)
4808 				/* repeat for some time before giving up */
4809 				continue;
4810 			else if (!hsfsts.hsf_status.flcdone)
4811 				return ret_val;
4812 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4813 	}
4814 
4815 	return E1000_SUCCESS;
4816 }
4817 
4818 /**
4819  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4820  *  @hw: pointer to the HW structure
4821  *  @data: Pointer to the LED settings
4822  *
4823  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4824  *  settings is all 0's or F's, set the LED default to a valid LED default
4825  *  setting.
4826  **/
4827 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4828 {
4829 	s32 ret_val;
4830 
4831 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
4832 
4833 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4834 	if (ret_val) {
4835 		DEBUGOUT("NVM Read Error\n");
4836 		return ret_val;
4837 	}
4838 
4839 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4840 		*data = ID_LED_DEFAULT_ICH8LAN;
4841 
4842 	return E1000_SUCCESS;
4843 }
4844 
4845 /**
4846  *  e1000_id_led_init_pchlan - store LED configurations
4847  *  @hw: pointer to the HW structure
4848  *
4849  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4850  *  the PHY LED configuration register.
4851  *
4852  *  PCH also does not have an "always on" or "always off" mode which
4853  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4854  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4855  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4856  *  link based on logic in e1000_led_[on|off]_pchlan().
4857  **/
4858 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4859 {
4860 	struct e1000_mac_info *mac = &hw->mac;
4861 	s32 ret_val;
4862 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4863 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4864 	u16 data, i, temp, shift;
4865 
4866 	DEBUGFUNC("e1000_id_led_init_pchlan");
4867 
4868 	/* Get default ID LED modes */
4869 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4870 	if (ret_val)
4871 		return ret_val;
4872 
4873 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4874 	mac->ledctl_mode1 = mac->ledctl_default;
4875 	mac->ledctl_mode2 = mac->ledctl_default;
4876 
4877 	for (i = 0; i < 4; i++) {
4878 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4879 		shift = (i * 5);
4880 		switch (temp) {
4881 		case ID_LED_ON1_DEF2:
4882 		case ID_LED_ON1_ON2:
4883 		case ID_LED_ON1_OFF2:
4884 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4885 			mac->ledctl_mode1 |= (ledctl_on << shift);
4886 			break;
4887 		case ID_LED_OFF1_DEF2:
4888 		case ID_LED_OFF1_ON2:
4889 		case ID_LED_OFF1_OFF2:
4890 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4891 			mac->ledctl_mode1 |= (ledctl_off << shift);
4892 			break;
4893 		default:
4894 			/* Do nothing */
4895 			break;
4896 		}
4897 		switch (temp) {
4898 		case ID_LED_DEF1_ON2:
4899 		case ID_LED_ON1_ON2:
4900 		case ID_LED_OFF1_ON2:
4901 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4902 			mac->ledctl_mode2 |= (ledctl_on << shift);
4903 			break;
4904 		case ID_LED_DEF1_OFF2:
4905 		case ID_LED_ON1_OFF2:
4906 		case ID_LED_OFF1_OFF2:
4907 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4908 			mac->ledctl_mode2 |= (ledctl_off << shift);
4909 			break;
4910 		default:
4911 			/* Do nothing */
4912 			break;
4913 		}
4914 	}
4915 
4916 	return E1000_SUCCESS;
4917 }
4918 
4919 /**
4920  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4921  *  @hw: pointer to the HW structure
4922  *
4923  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4924  *  register, so the bus width is hard coded.
4925  **/
4926 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4927 {
4928 	struct e1000_bus_info *bus = &hw->bus;
4929 	s32 ret_val;
4930 
4931 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
4932 
4933 	ret_val = e1000_get_bus_info_pcie_generic(hw);
4934 
4935 	/* ICH devices are "PCI Express"-ish.  They have
4936 	 * a configuration space, but do not contain
4937 	 * PCI Express Capability registers, so bus width
4938 	 * must be hardcoded.
4939 	 */
4940 	if (bus->width == e1000_bus_width_unknown)
4941 		bus->width = e1000_bus_width_pcie_x1;
4942 
4943 	return ret_val;
4944 }
4945 
4946 /**
4947  *  e1000_reset_hw_ich8lan - Reset the hardware
4948  *  @hw: pointer to the HW structure
4949  *
4950  *  Does a full reset of the hardware which includes a reset of the PHY and
4951  *  MAC.
4952  **/
4953 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4954 {
4955 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4956 	u16 kum_cfg;
4957 	u32 ctrl, reg;
4958 	s32 ret_val;
4959 
4960 	DEBUGFUNC("e1000_reset_hw_ich8lan");
4961 
4962 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
4963 	 * on the last TLP read/write transaction when MAC is reset.
4964 	 */
4965 	ret_val = e1000_disable_pcie_master_generic(hw);
4966 	if (ret_val)
4967 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
4968 
4969 	DEBUGOUT("Masking off all interrupts\n");
4970 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4971 
4972 	/* Disable the Transmit and Receive units.  Then delay to allow
4973 	 * any pending transactions to complete before we hit the MAC
4974 	 * with the global reset.
4975 	 */
4976 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
4977 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4978 	E1000_WRITE_FLUSH(hw);
4979 
4980 	msec_delay(10);
4981 
4982 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
4983 	if (hw->mac.type == e1000_ich8lan) {
4984 		/* Set Tx and Rx buffer allocation to 8k apiece. */
4985 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4986 		/* Set Packet Buffer Size to 16k. */
4987 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4988 	}
4989 
4990 	if (hw->mac.type == e1000_pchlan) {
4991 		/* Save the NVM K1 bit setting*/
4992 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4993 		if (ret_val)
4994 			return ret_val;
4995 
4996 		if (kum_cfg & E1000_NVM_K1_ENABLE)
4997 			dev_spec->nvm_k1_enabled = TRUE;
4998 		else
4999 			dev_spec->nvm_k1_enabled = FALSE;
5000 	}
5001 
5002 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5003 
5004 	if (!hw->phy.ops.check_reset_block(hw)) {
5005 		/* Full-chip reset requires MAC and PHY reset at the same
5006 		 * time to make sure the interface between MAC and the
5007 		 * external PHY is reset.
5008 		 */
5009 		ctrl |= E1000_CTRL_PHY_RST;
5010 
5011 		/* Gate automatic PHY configuration by hardware on
5012 		 * non-managed 82579
5013 		 */
5014 		if ((hw->mac.type == e1000_pch2lan) &&
5015 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
5016 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
5017 	}
5018 	ret_val = e1000_acquire_swflag_ich8lan(hw);
5019 	DEBUGOUT("Issuing a global reset to ich8lan\n");
5020 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
5021 	/* cannot issue a flush here because it hangs the hardware */
5022 	msec_delay(20);
5023 
5024 	/* Set Phy Config Counter to 50msec */
5025 	if (hw->mac.type == e1000_pch2lan) {
5026 		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
5027 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
5028 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
5029 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
5030 	}
5031 
5032 	if (!ret_val)
5033 		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
5034 
5035 	if (ctrl & E1000_CTRL_PHY_RST) {
5036 		ret_val = hw->phy.ops.get_cfg_done(hw);
5037 		if (ret_val)
5038 			return ret_val;
5039 
5040 		ret_val = e1000_post_phy_reset_ich8lan(hw);
5041 		if (ret_val)
5042 			return ret_val;
5043 	}
5044 
5045 	/* For PCH, this write will make sure that any noise
5046 	 * will be detected as a CRC error and be dropped rather than show up
5047 	 * as a bad packet to the DMA engine.
5048 	 */
5049 	if (hw->mac.type == e1000_pchlan)
5050 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5051 
5052 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5053 	E1000_READ_REG(hw, E1000_ICR);
5054 
5055 	reg = E1000_READ_REG(hw, E1000_KABGTXD);
5056 	reg |= E1000_KABGTXD_BGSQLBIAS;
5057 	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5058 
5059 	return E1000_SUCCESS;
5060 }
5061 
5062 /**
5063  *  e1000_init_hw_ich8lan - Initialize the hardware
5064  *  @hw: pointer to the HW structure
5065  *
5066  *  Prepares the hardware for transmit and receive by doing the following:
5067  *   - initialize hardware bits
5068  *   - initialize LED identification
5069  *   - setup receive address registers
5070  *   - setup flow control
5071  *   - setup transmit descriptors
5072  *   - clear statistics
5073  **/
5074 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5075 {
5076 	struct e1000_mac_info *mac = &hw->mac;
5077 	u32 ctrl_ext, txdctl, snoop;
5078 	s32 ret_val;
5079 	u16 i;
5080 
5081 	DEBUGFUNC("e1000_init_hw_ich8lan");
5082 
5083 	e1000_initialize_hw_bits_ich8lan(hw);
5084 
5085 	/* Initialize identification LED */
5086 	ret_val = mac->ops.id_led_init(hw);
5087 	/* An error is not fatal and we should not stop init due to this */
5088 	if (ret_val)
5089 		DEBUGOUT("Error initializing identification LED\n");
5090 
5091 	/* Setup the receive address. */
5092 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5093 
5094 	/* Zero out the Multicast HASH table */
5095 	DEBUGOUT("Zeroing the MTA\n");
5096 	for (i = 0; i < mac->mta_reg_count; i++)
5097 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5098 
5099 	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
5100 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
5101 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
5102 	 */
5103 	if (hw->phy.type == e1000_phy_82578) {
5104 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5105 		i &= ~BM_WUC_HOST_WU_BIT;
5106 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5107 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
5108 		if (ret_val)
5109 			return ret_val;
5110 	}
5111 
5112 	/* Setup link and flow control */
5113 	ret_val = mac->ops.setup_link(hw);
5114 
5115 	/* Set the transmit descriptor write-back policy for both queues */
5116 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5117 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5118 		  E1000_TXDCTL_FULL_TX_DESC_WB);
5119 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5120 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5121 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5122 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5123 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5124 		  E1000_TXDCTL_FULL_TX_DESC_WB);
5125 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5126 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5127 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5128 
5129 	/* ICH8 has opposite polarity of no_snoop bits.
5130 	 * By default, we should use snoop behavior.
5131 	 */
5132 	if (mac->type == e1000_ich8lan)
5133 		snoop = PCIE_ICH8_SNOOP_ALL;
5134 	else
5135 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5136 	e1000_set_pcie_no_snoop_generic(hw, snoop);
5137 
5138 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5139 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5140 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5141 
5142 	/* Clear all of the statistics registers (clear on read).  It is
5143 	 * important that we do this after we have tried to establish link
5144 	 * because the symbol error count will increment wildly if there
5145 	 * is no link.
5146 	 */
5147 	e1000_clear_hw_cntrs_ich8lan(hw);
5148 
5149 	return ret_val;
5150 }
5151 
5152 /**
5153  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5154  *  @hw: pointer to the HW structure
5155  *
5156  *  Sets/Clears required hardware bits necessary for correctly setting up the
5157  *  hardware for transmit and receive.
5158  **/
5159 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5160 {
5161 	u32 reg;
5162 
5163 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5164 
5165 	/* Extended Device Control */
5166 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5167 	reg |= (1 << 22);
5168 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
5169 	if (hw->mac.type >= e1000_pchlan)
5170 		reg |= E1000_CTRL_EXT_PHYPDEN;
5171 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5172 
5173 	/* Transmit Descriptor Control 0 */
5174 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5175 	reg |= (1 << 22);
5176 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5177 
5178 	/* Transmit Descriptor Control 1 */
5179 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5180 	reg |= (1 << 22);
5181 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5182 
5183 	/* Transmit Arbitration Control 0 */
5184 	reg = E1000_READ_REG(hw, E1000_TARC(0));
5185 	if (hw->mac.type == e1000_ich8lan)
5186 		reg |= (1 << 28) | (1 << 29);
5187 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5188 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5189 
5190 	/* Transmit Arbitration Control 1 */
5191 	reg = E1000_READ_REG(hw, E1000_TARC(1));
5192 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5193 		reg &= ~(1 << 28);
5194 	else
5195 		reg |= (1 << 28);
5196 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
5197 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5198 
5199 	/* Device Status */
5200 	if (hw->mac.type == e1000_ich8lan) {
5201 		reg = E1000_READ_REG(hw, E1000_STATUS);
5202 		reg &= ~(1UL << 31);
5203 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
5204 	}
5205 
5206 	/* work-around descriptor data corruption issue during nfs v2 udp
5207 	 * traffic, just disable the nfs filtering capability
5208 	 */
5209 	reg = E1000_READ_REG(hw, E1000_RFCTL);
5210 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5211 
5212 	/* Disable IPv6 extension header parsing because some malformed
5213 	 * IPv6 headers can hang the Rx.
5214 	 */
5215 	if (hw->mac.type == e1000_ich8lan)
5216 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5217 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5218 
5219 	/* Enable ECC on Lynxpoint */
5220 	if (hw->mac.type >= e1000_pch_lpt) {
5221 		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5222 		reg |= E1000_PBECCSTS_ECC_ENABLE;
5223 		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5224 
5225 		reg = E1000_READ_REG(hw, E1000_CTRL);
5226 		reg |= E1000_CTRL_MEHE;
5227 		E1000_WRITE_REG(hw, E1000_CTRL, reg);
5228 	}
5229 
5230 	return;
5231 }
5232 
5233 /**
5234  *  e1000_setup_link_ich8lan - Setup flow control and link settings
5235  *  @hw: pointer to the HW structure
5236  *
5237  *  Determines which flow control settings to use, then configures flow
5238  *  control.  Calls the appropriate media-specific link configuration
5239  *  function.  Assuming the adapter has a valid link partner, a valid link
5240  *  should be established.  Assumes the hardware has previously been reset
5241  *  and the transmitter and receiver are not enabled.
5242  **/
5243 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5244 {
5245 	s32 ret_val;
5246 
5247 	DEBUGFUNC("e1000_setup_link_ich8lan");
5248 
5249 	if (hw->phy.ops.check_reset_block(hw))
5250 		return E1000_SUCCESS;
5251 
5252 	/* ICH parts do not have a word in the NVM to determine
5253 	 * the default flow control setting, so we explicitly
5254 	 * set it to full.
5255 	 */
5256 	if (hw->fc.requested_mode == e1000_fc_default)
5257 		hw->fc.requested_mode = e1000_fc_full;
5258 
5259 	/* Save off the requested flow control mode for use later.  Depending
5260 	 * on the link partner's capabilities, we may or may not use this mode.
5261 	 */
5262 	hw->fc.current_mode = hw->fc.requested_mode;
5263 
5264 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5265 		hw->fc.current_mode);
5266 
5267 	/* Continue to configure the copper link. */
5268 	ret_val = hw->mac.ops.setup_physical_interface(hw);
5269 	if (ret_val)
5270 		return ret_val;
5271 
5272 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5273 	if ((hw->phy.type == e1000_phy_82578) ||
5274 	    (hw->phy.type == e1000_phy_82579) ||
5275 	    (hw->phy.type == e1000_phy_i217) ||
5276 	    (hw->phy.type == e1000_phy_82577)) {
5277 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5278 
5279 		ret_val = hw->phy.ops.write_reg(hw,
5280 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
5281 					     hw->fc.pause_time);
5282 		if (ret_val)
5283 			return ret_val;
5284 	}
5285 
5286 	return e1000_set_fc_watermarks_generic(hw);
5287 }
5288 
5289 /**
5290  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5291  *  @hw: pointer to the HW structure
5292  *
5293  *  Configures the kumeran interface to the PHY to wait the appropriate time
5294  *  when polling the PHY, then call the generic setup_copper_link to finish
5295  *  configuring the copper link.
5296  **/
5297 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5298 {
5299 	u32 ctrl;
5300 	s32 ret_val;
5301 	u16 reg_data;
5302 
5303 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5304 
5305 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5306 	ctrl |= E1000_CTRL_SLU;
5307 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5308 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5309 
5310 	/* Set the mac to wait the maximum time between each iteration
5311 	 * and increase the max iterations when polling the phy;
5312 	 * this fixes erroneous timeouts at 10Mbps.
5313 	 */
5314 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5315 					       0xFFFF);
5316 	if (ret_val)
5317 		return ret_val;
5318 	ret_val = e1000_read_kmrn_reg_generic(hw,
5319 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
5320 					      &reg_data);
5321 	if (ret_val)
5322 		return ret_val;
5323 	reg_data |= 0x3F;
5324 	ret_val = e1000_write_kmrn_reg_generic(hw,
5325 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
5326 					       reg_data);
5327 	if (ret_val)
5328 		return ret_val;
5329 
5330 	switch (hw->phy.type) {
5331 	case e1000_phy_igp_3:
5332 		ret_val = e1000_copper_link_setup_igp(hw);
5333 		if (ret_val)
5334 			return ret_val;
5335 		break;
5336 	case e1000_phy_bm:
5337 	case e1000_phy_82578:
5338 		ret_val = e1000_copper_link_setup_m88(hw);
5339 		if (ret_val)
5340 			return ret_val;
5341 		break;
5342 	case e1000_phy_82577:
5343 	case e1000_phy_82579:
5344 		ret_val = e1000_copper_link_setup_82577(hw);
5345 		if (ret_val)
5346 			return ret_val;
5347 		break;
5348 	case e1000_phy_ife:
5349 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5350 					       &reg_data);
5351 		if (ret_val)
5352 			return ret_val;
5353 
5354 		reg_data &= ~IFE_PMC_AUTO_MDIX;
5355 
5356 		switch (hw->phy.mdix) {
5357 		case 1:
5358 			reg_data &= ~IFE_PMC_FORCE_MDIX;
5359 			break;
5360 		case 2:
5361 			reg_data |= IFE_PMC_FORCE_MDIX;
5362 			break;
5363 		case 0:
5364 		default:
5365 			reg_data |= IFE_PMC_AUTO_MDIX;
5366 			break;
5367 		}
5368 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5369 						reg_data);
5370 		if (ret_val)
5371 			return ret_val;
5372 		break;
5373 	default:
5374 		break;
5375 	}
5376 
5377 	return e1000_setup_copper_link_generic(hw);
5378 }
5379 
5380 /**
5381  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5382  *  @hw: pointer to the HW structure
5383  *
5384  *  Calls the PHY specific link setup function and then calls the
5385  *  generic setup_copper_link to finish configuring the link for
5386  *  Lynxpoint PCH devices
5387  **/
5388 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5389 {
5390 	u32 ctrl;
5391 	s32 ret_val;
5392 
5393 	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5394 
5395 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5396 	ctrl |= E1000_CTRL_SLU;
5397 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5398 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5399 
5400 	ret_val = e1000_copper_link_setup_82577(hw);
5401 	if (ret_val)
5402 		return ret_val;
5403 
5404 	return e1000_setup_copper_link_generic(hw);
5405 }
5406 
5407 /**
5408  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5409  *  @hw: pointer to the HW structure
5410  *  @speed: pointer to store current link speed
5411  *  @duplex: pointer to store the current link duplex
5412  *
5413  *  Calls the generic get_speed_and_duplex to retrieve the current link
5414  *  information and then calls the Kumeran lock loss workaround for links at
5415  *  gigabit speeds.
5416  **/
5417 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5418 					  u16 *duplex)
5419 {
5420 	s32 ret_val;
5421 
5422 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5423 
5424 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5425 	if (ret_val)
5426 		return ret_val;
5427 
5428 	if ((hw->mac.type == e1000_ich8lan) &&
5429 	    (hw->phy.type == e1000_phy_igp_3) &&
5430 	    (*speed == SPEED_1000)) {
5431 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5432 	}
5433 
5434 	return ret_val;
5435 }
5436 
5437 /**
5438  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5439  *  @hw: pointer to the HW structure
5440  *
5441  *  Work-around for 82566 Kumeran PCS lock loss:
5442  *  On link status change (i.e. PCI reset, speed change) and link is up and
5443  *  speed is gigabit-
5444  *    0) if workaround is optionally disabled do nothing
5445  *    1) wait 1ms for Kumeran link to come up
5446  *    2) check Kumeran Diagnostic register PCS lock loss bit
5447  *    3) if not set the link is locked (all is good), otherwise...
5448  *    4) reset the PHY
5449  *    5) repeat up to 10 times
5450  *  Note: this is only called for IGP3 copper when speed is 1gb.
5451  **/
5452 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5453 {
5454 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5455 	u32 phy_ctrl;
5456 	s32 ret_val;
5457 	u16 i, data;
5458 	bool link;
5459 
5460 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5461 
5462 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5463 		return E1000_SUCCESS;
5464 
5465 	/* Make sure link is up before proceeding.  If not just return.
5466 	 * Attempting this while link is negotiating fouled up link
5467 	 * stability
5468 	 */
5469 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5470 	if (!link)
5471 		return E1000_SUCCESS;
5472 
5473 	for (i = 0; i < 10; i++) {
5474 		/* read once to clear */
5475 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5476 		if (ret_val)
5477 			return ret_val;
5478 		/* and again to get new status */
5479 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5480 		if (ret_val)
5481 			return ret_val;
5482 
5483 		/* check for PCS lock */
5484 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5485 			return E1000_SUCCESS;
5486 
5487 		/* Issue PHY reset */
5488 		hw->phy.ops.reset(hw);
5489 		msec_delay_irq(5);
5490 	}
5491 	/* Disable GigE link negotiation */
5492 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5493 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5494 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5495 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5496 
5497 	/* Call gig speed drop workaround on Gig disable before accessing
5498 	 * any PHY registers
5499 	 */
5500 	e1000_gig_downshift_workaround_ich8lan(hw);
5501 
5502 	/* unable to acquire PCS lock */
5503 	return -E1000_ERR_PHY;
5504 }
5505 
5506 /**
5507  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5508  *  @hw: pointer to the HW structure
5509  *  @state: boolean value used to set the current Kumeran workaround state
5510  *
5511  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
5512  *  /disabled - FALSE).
5513  **/
5514 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5515 						 bool state)
5516 {
5517 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5518 
5519 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5520 
5521 	if (hw->mac.type != e1000_ich8lan) {
5522 		DEBUGOUT("Workaround applies to ICH8 only.\n");
5523 		return;
5524 	}
5525 
5526 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
5527 
5528 	return;
5529 }
5530 
5531 /**
5532  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5533  *  @hw: pointer to the HW structure
5534  *
5535  *  Workaround for 82566 power-down on D3 entry:
5536  *    1) disable gigabit link
5537  *    2) write VR power-down enable
5538  *    3) read it back
5539  *  Continue if successful, else issue LCD reset and repeat
5540  **/
5541 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5542 {
5543 	u32 reg;
5544 	u16 data;
5545 	u8  retry = 0;
5546 
5547 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5548 
5549 	if (hw->phy.type != e1000_phy_igp_3)
5550 		return;
5551 
5552 	/* Try the workaround twice (if needed) */
5553 	do {
5554 		/* Disable link */
5555 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5556 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5557 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5558 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5559 
5560 		/* Call gig speed drop workaround on Gig disable before
5561 		 * accessing any PHY registers
5562 		 */
5563 		if (hw->mac.type == e1000_ich8lan)
5564 			e1000_gig_downshift_workaround_ich8lan(hw);
5565 
5566 		/* Write VR power-down enable */
5567 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5568 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5569 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5570 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5571 
5572 		/* Read it back and test */
5573 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5574 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5575 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5576 			break;
5577 
5578 		/* Issue PHY reset and repeat at most one more time */
5579 		reg = E1000_READ_REG(hw, E1000_CTRL);
5580 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5581 		retry++;
5582 	} while (retry);
5583 }
5584 
5585 /**
5586  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5587  *  @hw: pointer to the HW structure
5588  *
5589  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5590  *  LPLU, Gig disable, MDIC PHY reset):
5591  *    1) Set Kumeran Near-end loopback
5592  *    2) Clear Kumeran Near-end loopback
5593  *  Should only be called for ICH8[m] devices with any 1G Phy.
5594  **/
5595 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5596 {
5597 	s32 ret_val;
5598 	u16 reg_data;
5599 
5600 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5601 
5602 	if ((hw->mac.type != e1000_ich8lan) ||
5603 	    (hw->phy.type == e1000_phy_ife))
5604 		return;
5605 
5606 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5607 					      &reg_data);
5608 	if (ret_val)
5609 		return;
5610 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5611 	ret_val = e1000_write_kmrn_reg_generic(hw,
5612 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
5613 					       reg_data);
5614 	if (ret_val)
5615 		return;
5616 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5617 	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5618 				     reg_data);
5619 }
5620 
5621 /**
5622  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5623  *  @hw: pointer to the HW structure
5624  *
5625  *  During S0 to Sx transition, it is possible the link remains at gig
5626  *  instead of negotiating to a lower speed.  Before going to Sx, set
5627  *  'Gig Disable' to force link speed negotiation to a lower speed based on
5628  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5629  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5630  *  needs to be written.
5631  *  Parts that support (and are linked to a partner which support) EEE in
5632  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5633  *  than 10Mbps w/o EEE.
5634  **/
5635 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5636 {
5637 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5638 	u32 phy_ctrl;
5639 	s32 ret_val;
5640 
5641 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5642 
5643 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5644 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5645 
5646 	if (hw->phy.type == e1000_phy_i217) {
5647 		u16 phy_reg, device_id = hw->device_id;
5648 
5649 		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5650 		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5651 		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5652 		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5653 		    (hw->mac.type >= e1000_pch_spt)) {
5654 			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5655 
5656 			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5657 					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5658 		}
5659 
5660 		ret_val = hw->phy.ops.acquire(hw);
5661 		if (ret_val)
5662 			goto out;
5663 
5664 		if (!dev_spec->eee_disable) {
5665 			u16 eee_advert;
5666 
5667 			ret_val =
5668 			    e1000_read_emi_reg_locked(hw,
5669 						      I217_EEE_ADVERTISEMENT,
5670 						      &eee_advert);
5671 			if (ret_val)
5672 				goto release;
5673 
5674 			/* Disable LPLU if both link partners support 100BaseT
5675 			 * EEE and 100Full is advertised on both ends of the
5676 			 * link, and enable Auto Enable LPI since there will
5677 			 * be no driver to enable LPI while in Sx.
5678 			 */
5679 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5680 			    (dev_spec->eee_lp_ability &
5681 			     I82579_EEE_100_SUPPORTED) &&
5682 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5683 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5684 					      E1000_PHY_CTRL_NOND0A_LPLU);
5685 
5686 				/* Set Auto Enable LPI after link up */
5687 				hw->phy.ops.read_reg_locked(hw,
5688 							    I217_LPI_GPIO_CTRL,
5689 							    &phy_reg);
5690 				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5691 				hw->phy.ops.write_reg_locked(hw,
5692 							     I217_LPI_GPIO_CTRL,
5693 							     phy_reg);
5694 			}
5695 		}
5696 
5697 		/* For i217 Intel Rapid Start Technology support,
5698 		 * when the system is going into Sx and no manageability engine
5699 		 * is present, the driver must configure proxy to reset only on
5700 		 * power good.  LPI (Low Power Idle) state must also reset only
5701 		 * on power good, as well as the MTA (Multicast table array).
5702 		 * The SMBus release must also be disabled on LCD reset.
5703 		 */
5704 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5705 		      E1000_ICH_FWSM_FW_VALID)) {
5706 			/* Enable proxy to reset only on power good. */
5707 			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5708 						    &phy_reg);
5709 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5710 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5711 						     phy_reg);
5712 
5713 			/* Set bit enable LPI (EEE) to reset only on
5714 			 * power good.
5715 			*/
5716 			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5717 			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5718 			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5719 
5720 			/* Disable the SMB release on LCD reset. */
5721 			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5722 			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5723 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5724 		}
5725 
5726 		/* Enable MTA to reset for Intel Rapid Start Technology
5727 		 * Support
5728 		 */
5729 		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5730 		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5731 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5732 
5733 release:
5734 		hw->phy.ops.release(hw);
5735 	}
5736 out:
5737 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5738 
5739 	if (hw->mac.type == e1000_ich8lan)
5740 		e1000_gig_downshift_workaround_ich8lan(hw);
5741 
5742 	if (hw->mac.type >= e1000_pchlan) {
5743 		e1000_oem_bits_config_ich8lan(hw, FALSE);
5744 
5745 		/* Reset PHY to activate OEM bits on 82577/8 */
5746 		if (hw->mac.type == e1000_pchlan)
5747 			e1000_phy_hw_reset_generic(hw);
5748 
5749 		ret_val = hw->phy.ops.acquire(hw);
5750 		if (ret_val)
5751 			return;
5752 		e1000_write_smbus_addr(hw);
5753 		hw->phy.ops.release(hw);
5754 	}
5755 
5756 	return;
5757 }
5758 
5759 /**
5760  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5761  *  @hw: pointer to the HW structure
5762  *
5763  *  During Sx to S0 transitions on non-managed devices or managed devices
5764  *  on which PHY resets are not blocked, if the PHY registers cannot be
5765  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5766  *  the PHY.
5767  *  On i217, setup Intel Rapid Start Technology.
5768  **/
5769 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5770 {
5771 	s32 ret_val;
5772 
5773 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
5774 	if (hw->mac.type < e1000_pch2lan)
5775 		return E1000_SUCCESS;
5776 
5777 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
5778 	if (ret_val) {
5779 		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5780 		return ret_val;
5781 	}
5782 
5783 	/* For i217 Intel Rapid Start Technology support when the system
5784 	 * is transitioning from Sx and no manageability engine is present
5785 	 * configure SMBus to restore on reset, disable proxy, and enable
5786 	 * the reset on MTA (Multicast table array).
5787 	 */
5788 	if (hw->phy.type == e1000_phy_i217) {
5789 		u16 phy_reg;
5790 
5791 		ret_val = hw->phy.ops.acquire(hw);
5792 		if (ret_val) {
5793 			DEBUGOUT("Failed to setup iRST\n");
5794 			return ret_val;
5795 		}
5796 
5797 		/* Clear Auto Enable LPI after link up */
5798 		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5799 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5800 		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5801 
5802 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5803 		    E1000_ICH_FWSM_FW_VALID)) {
5804 			/* Restore clear on SMB if no manageability engine
5805 			 * is present
5806 			 */
5807 			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5808 							      &phy_reg);
5809 			if (ret_val)
5810 				goto release;
5811 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5812 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5813 
5814 			/* Disable Proxy */
5815 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5816 		}
5817 		/* Enable reset on MTA */
5818 		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5819 						      &phy_reg);
5820 		if (ret_val)
5821 			goto release;
5822 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5823 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5824 release:
5825 		if (ret_val)
5826 			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5827 		hw->phy.ops.release(hw);
5828 		return ret_val;
5829 	}
5830 	return E1000_SUCCESS;
5831 }
5832 
5833 /**
5834  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5835  *  @hw: pointer to the HW structure
5836  *
5837  *  Return the LED back to the default configuration.
5838  **/
5839 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5840 {
5841 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
5842 
5843 	if (hw->phy.type == e1000_phy_ife)
5844 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5845 					     0);
5846 
5847 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5848 	return E1000_SUCCESS;
5849 }
5850 
5851 /**
5852  *  e1000_led_on_ich8lan - Turn LEDs on
5853  *  @hw: pointer to the HW structure
5854  *
5855  *  Turn on the LEDs.
5856  **/
5857 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5858 {
5859 	DEBUGFUNC("e1000_led_on_ich8lan");
5860 
5861 	if (hw->phy.type == e1000_phy_ife)
5862 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5863 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5864 
5865 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5866 	return E1000_SUCCESS;
5867 }
5868 
5869 /**
5870  *  e1000_led_off_ich8lan - Turn LEDs off
5871  *  @hw: pointer to the HW structure
5872  *
5873  *  Turn off the LEDs.
5874  **/
5875 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5876 {
5877 	DEBUGFUNC("e1000_led_off_ich8lan");
5878 
5879 	if (hw->phy.type == e1000_phy_ife)
5880 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5881 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5882 
5883 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5884 	return E1000_SUCCESS;
5885 }
5886 
5887 /**
5888  *  e1000_setup_led_pchlan - Configures SW controllable LED
5889  *  @hw: pointer to the HW structure
5890  *
5891  *  This prepares the SW controllable LED for use.
5892  **/
5893 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5894 {
5895 	DEBUGFUNC("e1000_setup_led_pchlan");
5896 
5897 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5898 				     (u16)hw->mac.ledctl_mode1);
5899 }
5900 
5901 /**
5902  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5903  *  @hw: pointer to the HW structure
5904  *
5905  *  Return the LED back to the default configuration.
5906  **/
5907 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5908 {
5909 	DEBUGFUNC("e1000_cleanup_led_pchlan");
5910 
5911 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5912 				     (u16)hw->mac.ledctl_default);
5913 }
5914 
5915 /**
5916  *  e1000_led_on_pchlan - Turn LEDs on
5917  *  @hw: pointer to the HW structure
5918  *
5919  *  Turn on the LEDs.
5920  **/
5921 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5922 {
5923 	u16 data = (u16)hw->mac.ledctl_mode2;
5924 	u32 i, led;
5925 
5926 	DEBUGFUNC("e1000_led_on_pchlan");
5927 
5928 	/* If no link, then turn LED on by setting the invert bit
5929 	 * for each LED that's mode is "link_up" in ledctl_mode2.
5930 	 */
5931 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5932 		for (i = 0; i < 3; i++) {
5933 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5934 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5935 			    E1000_LEDCTL_MODE_LINK_UP)
5936 				continue;
5937 			if (led & E1000_PHY_LED0_IVRT)
5938 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5939 			else
5940 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5941 		}
5942 	}
5943 
5944 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5945 }
5946 
5947 /**
5948  *  e1000_led_off_pchlan - Turn LEDs off
5949  *  @hw: pointer to the HW structure
5950  *
5951  *  Turn off the LEDs.
5952  **/
5953 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5954 {
5955 	u16 data = (u16)hw->mac.ledctl_mode1;
5956 	u32 i, led;
5957 
5958 	DEBUGFUNC("e1000_led_off_pchlan");
5959 
5960 	/* If no link, then turn LED off by clearing the invert bit
5961 	 * for each LED that's mode is "link_up" in ledctl_mode1.
5962 	 */
5963 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5964 		for (i = 0; i < 3; i++) {
5965 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5966 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5967 			    E1000_LEDCTL_MODE_LINK_UP)
5968 				continue;
5969 			if (led & E1000_PHY_LED0_IVRT)
5970 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5971 			else
5972 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5973 		}
5974 	}
5975 
5976 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5977 }
5978 
5979 /**
5980  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5981  *  @hw: pointer to the HW structure
5982  *
5983  *  Read appropriate register for the config done bit for completion status
5984  *  and configure the PHY through s/w for EEPROM-less parts.
5985  *
5986  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5987  *  config done bit, so only an error is logged and continues.  If we were
5988  *  to return with error, EEPROM-less silicon would not be able to be reset
5989  *  or change link.
5990  **/
5991 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5992 {
5993 	s32 ret_val = E1000_SUCCESS;
5994 	u32 bank = 0;
5995 	u32 status;
5996 
5997 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5998 
5999 	e1000_get_cfg_done_generic(hw);
6000 
6001 	/* Wait for indication from h/w that it has completed basic config */
6002 	if (hw->mac.type >= e1000_ich10lan) {
6003 		e1000_lan_init_done_ich8lan(hw);
6004 	} else {
6005 		ret_val = e1000_get_auto_rd_done_generic(hw);
6006 		if (ret_val) {
6007 			/* When auto config read does not complete, do not
6008 			 * return with an error. This can happen in situations
6009 			 * where there is no eeprom and prevents getting link.
6010 			 */
6011 			DEBUGOUT("Auto Read Done did not complete\n");
6012 			ret_val = E1000_SUCCESS;
6013 		}
6014 	}
6015 
6016 	/* Clear PHY Reset Asserted bit */
6017 	status = E1000_READ_REG(hw, E1000_STATUS);
6018 	if (status & E1000_STATUS_PHYRA)
6019 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
6020 	else
6021 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
6022 
6023 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
6024 	if (hw->mac.type <= e1000_ich9lan) {
6025 		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
6026 		    (hw->phy.type == e1000_phy_igp_3)) {
6027 			e1000_phy_init_script_igp3(hw);
6028 		}
6029 	} else {
6030 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
6031 			/* Maybe we should do a basic PHY config */
6032 			DEBUGOUT("EEPROM not present\n");
6033 			ret_val = -E1000_ERR_CONFIG;
6034 		}
6035 	}
6036 
6037 	return ret_val;
6038 }
6039 
6040 /**
6041  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6042  * @hw: pointer to the HW structure
6043  *
6044  * In the case of a PHY power down to save power, or to turn off link during a
6045  * driver unload, or wake on lan is not enabled, remove the link.
6046  **/
6047 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6048 {
6049 	/* If the management interface is not enabled, then power down */
6050 	if (!(hw->mac.ops.check_mng_mode(hw) ||
6051 	      hw->phy.ops.check_reset_block(hw)))
6052 		e1000_power_down_phy_copper(hw);
6053 
6054 	return;
6055 }
6056 
6057 /**
6058  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6059  *  @hw: pointer to the HW structure
6060  *
6061  *  Clears hardware counters specific to the silicon family and calls
6062  *  clear_hw_cntrs_generic to clear all general purpose counters.
6063  **/
6064 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6065 {
6066 	u16 phy_data;
6067 	s32 ret_val;
6068 
6069 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6070 
6071 	e1000_clear_hw_cntrs_base_generic(hw);
6072 
6073 	E1000_READ_REG(hw, E1000_ALGNERRC);
6074 	E1000_READ_REG(hw, E1000_RXERRC);
6075 	E1000_READ_REG(hw, E1000_TNCRS);
6076 	E1000_READ_REG(hw, E1000_CEXTERR);
6077 	E1000_READ_REG(hw, E1000_TSCTC);
6078 	E1000_READ_REG(hw, E1000_TSCTFC);
6079 
6080 	E1000_READ_REG(hw, E1000_MGTPRC);
6081 	E1000_READ_REG(hw, E1000_MGTPDC);
6082 	E1000_READ_REG(hw, E1000_MGTPTC);
6083 
6084 	E1000_READ_REG(hw, E1000_IAC);
6085 	E1000_READ_REG(hw, E1000_ICRXOC);
6086 
6087 	/* Clear PHY statistics registers */
6088 	if ((hw->phy.type == e1000_phy_82578) ||
6089 	    (hw->phy.type == e1000_phy_82579) ||
6090 	    (hw->phy.type == e1000_phy_i217) ||
6091 	    (hw->phy.type == e1000_phy_82577)) {
6092 		ret_val = hw->phy.ops.acquire(hw);
6093 		if (ret_val)
6094 			return;
6095 		ret_val = hw->phy.ops.set_page(hw,
6096 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
6097 		if (ret_val)
6098 			goto release;
6099 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6100 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6101 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6102 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6103 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6104 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6105 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6106 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6107 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6108 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6109 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6110 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6111 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6112 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6113 release:
6114 		hw->phy.ops.release(hw);
6115 	}
6116 }
6117 
6118