xref: /freebsd/sys/dev/e1000/e1000_ich8lan.c (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /******************************************************************************
2   SPDX-License-Identifier: BSD-3-Clause
3 
4   Copyright (c) 2001-2020, Intel Corporation
5   All rights reserved.
6 
7   Redistribution and use in source and binary forms, with or without
8   modification, are permitted provided that the following conditions are met:
9 
10    1. Redistributions of source code must retain the above copyright notice,
11       this list of conditions and the following disclaimer.
12 
13    2. Redistributions in binary form must reproduce the above copyright
14       notice, this list of conditions and the following disclaimer in the
15       documentation and/or other materials provided with the distribution.
16 
17    3. Neither the name of the Intel Corporation nor the names of its
18       contributors may be used to endorse or promote products derived from
19       this software without specific prior written permission.
20 
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32 
33 ******************************************************************************/
34 /*$FreeBSD$*/
35 
36 /* 82562G 10/100 Network Connection
37  * 82562G-2 10/100 Network Connection
38  * 82562GT 10/100 Network Connection
39  * 82562GT-2 10/100 Network Connection
40  * 82562V 10/100 Network Connection
41  * 82562V-2 10/100 Network Connection
42  * 82566DC-2 Gigabit Network Connection
43  * 82566DC Gigabit Network Connection
44  * 82566DM-2 Gigabit Network Connection
45  * 82566DM Gigabit Network Connection
46  * 82566MC Gigabit Network Connection
47  * 82566MM Gigabit Network Connection
48  * 82567LM Gigabit Network Connection
49  * 82567LF Gigabit Network Connection
50  * 82567V Gigabit Network Connection
51  * 82567LM-2 Gigabit Network Connection
52  * 82567LF-2 Gigabit Network Connection
53  * 82567V-2 Gigabit Network Connection
54  * 82567LF-3 Gigabit Network Connection
55  * 82567LM-3 Gigabit Network Connection
56  * 82567LM-4 Gigabit Network Connection
57  * 82577LM Gigabit Network Connection
58  * 82577LC Gigabit Network Connection
59  * 82578DM Gigabit Network Connection
60  * 82578DC Gigabit Network Connection
61  * 82579LM Gigabit Network Connection
62  * 82579V Gigabit Network Connection
63  * Ethernet Connection I217-LM
64  * Ethernet Connection I217-V
65  * Ethernet Connection I218-V
66  * Ethernet Connection I218-LM
67  * Ethernet Connection (2) I218-LM
68  * Ethernet Connection (2) I218-V
69  * Ethernet Connection (3) I218-LM
70  * Ethernet Connection (3) I218-V
71  */
72 
73 #include "e1000_api.h"
74 
75 static s32  e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
76 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
78 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
79 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
80 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
81 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
82 static int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
83 static int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
84 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
85 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
86 					      u8 *mc_addr_list,
87 					      u32 mc_addr_count);
88 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92 					    bool active);
93 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94 					    bool active);
95 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96 				   u16 words, u16 *data);
97 static s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
98 			       u16 *data);
99 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
100 				    u16 words, u16 *data);
101 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
102 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
103 static s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
104 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
105 					    u16 *data);
106 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
107 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
108 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
109 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
111 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
112 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
113 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
114 					   u16 *speed, u16 *duplex);
115 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
116 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
117 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
118 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
119 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
120 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
121 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
122 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
123 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
124 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
125 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
126 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
127 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
128 					  u32 offset, u8 *data);
129 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
130 					  u8 size, u16 *data);
131 static s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
132 					    u32 *data);
133 static s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
134 					   u32 offset, u32 *data);
135 static s32  e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
136 					     u32 offset, u32 data);
137 static s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
138 						  u32 offset, u32 dword);
139 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
140 					  u32 offset, u16 *data);
141 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
142 						 u32 offset, u8 byte);
143 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
144 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
145 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
146 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
147 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
148 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
149 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
150 
151 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
152 /* Offset 04h HSFSTS */
153 union ich8_hws_flash_status {
154 	struct ich8_hsfsts {
155 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
156 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
157 		u16 dael:1; /* bit 2 Direct Access error Log */
158 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
159 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
160 		u16 reserved1:2; /* bit 13:6 Reserved */
161 		u16 reserved2:6; /* bit 13:6 Reserved */
162 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
163 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
164 	} hsf_status;
165 	u16 regval;
166 };
167 
168 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
169 /* Offset 06h FLCTL */
170 union ich8_hws_flash_ctrl {
171 	struct ich8_hsflctl {
172 		u16 flcgo:1;   /* 0 Flash Cycle Go */
173 		u16 flcycle:2;   /* 2:1 Flash Cycle */
174 		u16 reserved:5;   /* 7:3 Reserved  */
175 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
176 		u16 flockdn:6;   /* 15:10 Reserved */
177 	} hsf_ctrl;
178 	u16 regval;
179 };
180 
181 /* ICH Flash Region Access Permissions */
182 union ich8_hws_flash_regacc {
183 	struct ich8_flracc {
184 		u32 grra:8; /* 0:7 GbE region Read Access */
185 		u32 grwa:8; /* 8:15 GbE region Write Access */
186 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
187 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
188 	} hsf_flregacc;
189 	u16 regval;
190 };
191 
192 /**
193  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
194  *  @hw: pointer to the HW structure
195  *
196  *  Test access to the PHY registers by reading the PHY ID registers.  If
197  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
198  *  otherwise assume the read PHY ID is correct if it is valid.
199  *
200  *  Assumes the sw/fw/hw semaphore is already acquired.
201  **/
202 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
203 {
204 	u16 phy_reg = 0;
205 	u32 phy_id = 0;
206 	s32 ret_val = 0;
207 	u16 retry_count;
208 	u32 mac_reg = 0;
209 
210 	for (retry_count = 0; retry_count < 2; retry_count++) {
211 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
212 		if (ret_val || (phy_reg == 0xFFFF))
213 			continue;
214 		phy_id = (u32)(phy_reg << 16);
215 
216 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
217 		if (ret_val || (phy_reg == 0xFFFF)) {
218 			phy_id = 0;
219 			continue;
220 		}
221 		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
222 		break;
223 	}
224 
225 	if (hw->phy.id) {
226 		if  (hw->phy.id == phy_id)
227 			goto out;
228 	} else if (phy_id) {
229 		hw->phy.id = phy_id;
230 		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
231 		goto out;
232 	}
233 
234 	/* In case the PHY needs to be in mdio slow mode,
235 	 * set slow mode and try to get the PHY id again.
236 	 */
237 	if (hw->mac.type < e1000_pch_lpt) {
238 		hw->phy.ops.release(hw);
239 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
240 		if (!ret_val)
241 			ret_val = e1000_get_phy_id(hw);
242 		hw->phy.ops.acquire(hw);
243 	}
244 
245 	if (ret_val)
246 		return false;
247 out:
248 	if (hw->mac.type >= e1000_pch_lpt) {
249 		/* Only unforce SMBus if ME is not active */
250 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
251 		    E1000_ICH_FWSM_FW_VALID)) {
252 			/* Unforce SMBus mode in PHY */
253 			hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
254 			phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
255 			hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
256 
257 			/* Unforce SMBus mode in MAC */
258 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
259 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
260 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
261 		}
262 	}
263 
264 	return true;
265 }
266 
267 /**
268  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
269  *  @hw: pointer to the HW structure
270  *
271  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
272  *  used to reset the PHY to a quiescent state when necessary.
273  **/
274 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
275 {
276 	u32 mac_reg;
277 
278 	DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
279 
280 	/* Set Phy Config Counter to 50msec */
281 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
282 	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
283 	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
284 	E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
285 
286 	/* Toggle LANPHYPC Value bit */
287 	mac_reg = E1000_READ_REG(hw, E1000_CTRL);
288 	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
289 	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
290 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
291 	E1000_WRITE_FLUSH(hw);
292 	msec_delay(1);
293 	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
294 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
295 	E1000_WRITE_FLUSH(hw);
296 
297 	if (hw->mac.type < e1000_pch_lpt) {
298 		msec_delay(50);
299 	} else {
300 		u16 count = 20;
301 
302 		do {
303 			msec_delay(5);
304 		} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
305 			   E1000_CTRL_EXT_LPCD) && count--);
306 
307 		msec_delay(30);
308 	}
309 }
310 
311 /**
312  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
313  *  @hw: pointer to the HW structure
314  *
315  *  Workarounds/flow necessary for PHY initialization during driver load
316  *  and resume paths.
317  **/
318 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
319 {
320 	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
321 	s32 ret_val;
322 
323 	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
324 
325 	/* Gate automatic PHY configuration by hardware on managed and
326 	 * non-managed 82579 and newer adapters.
327 	 */
328 	e1000_gate_hw_phy_config_ich8lan(hw, true);
329 
330 	/* It is not possible to be certain of the current state of ULP
331 	 * so forcibly disable it.
332 	 */
333 	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
334 	ret_val = e1000_disable_ulp_lpt_lp(hw, true);
335 	if (ret_val)
336 		ERROR_REPORT("Failed to disable ULP\n");
337 
338 	ret_val = hw->phy.ops.acquire(hw);
339 	if (ret_val) {
340 		DEBUGOUT("Failed to initialize PHY flow\n");
341 		goto out;
342 	}
343 
344 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
345 	 * inaccessible and resetting the PHY is not blocked, toggle the
346 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
347 	 */
348 	switch (hw->mac.type) {
349 	case e1000_pch_lpt:
350 	case e1000_pch_spt:
351 	case e1000_pch_cnp:
352 	case e1000_pch_tgp:
353 	case e1000_pch_adp:
354 	case e1000_pch_mtp:
355 	case e1000_pch_ptp:
356 		if (e1000_phy_is_accessible_pchlan(hw))
357 			break;
358 
359 		/* Before toggling LANPHYPC, see if PHY is accessible by
360 		 * forcing MAC to SMBus mode first.
361 		 */
362 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
363 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
364 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
365 
366 		/* Wait 50 milliseconds for MAC to finish any retries
367 		 * that it might be trying to perform from previous
368 		 * attempts to acknowledge any phy read requests.
369 		 */
370 		 msec_delay(50);
371 
372 		/* FALLTHROUGH */
373 	case e1000_pch2lan:
374 		if (e1000_phy_is_accessible_pchlan(hw))
375 			break;
376 
377 		/* FALLTHROUGH */
378 	case e1000_pchlan:
379 		if ((hw->mac.type == e1000_pchlan) &&
380 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
381 			break;
382 
383 		if (hw->phy.ops.check_reset_block(hw)) {
384 			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
385 			ret_val = -E1000_ERR_PHY;
386 			break;
387 		}
388 
389 		/* Toggle LANPHYPC Value bit */
390 		e1000_toggle_lanphypc_pch_lpt(hw);
391 		if (hw->mac.type >= e1000_pch_lpt) {
392 			if (e1000_phy_is_accessible_pchlan(hw))
393 				break;
394 
395 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
396 			 * so ensure that the MAC is also out of SMBus mode
397 			 */
398 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
399 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
400 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
401 
402 			if (e1000_phy_is_accessible_pchlan(hw))
403 				break;
404 
405 			ret_val = -E1000_ERR_PHY;
406 		}
407 		break;
408 	default:
409 		break;
410 	}
411 
412 	hw->phy.ops.release(hw);
413 	if (!ret_val) {
414 
415 		/* Check to see if able to reset PHY.  Print error if not */
416 		if (hw->phy.ops.check_reset_block(hw)) {
417 			ERROR_REPORT("Reset blocked by ME\n");
418 			goto out;
419 		}
420 
421 		/* Reset the PHY before any access to it.  Doing so, ensures
422 		 * that the PHY is in a known good state before we read/write
423 		 * PHY registers.  The generic reset is sufficient here,
424 		 * because we haven't determined the PHY type yet.
425 		 */
426 		ret_val = e1000_phy_hw_reset_generic(hw);
427 		if (ret_val)
428 			goto out;
429 
430 		/* On a successful reset, possibly need to wait for the PHY
431 		 * to quiesce to an accessible state before returning control
432 		 * to the calling function.  If the PHY does not quiesce, then
433 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
434 		 *  the PHY is in.
435 		 */
436 		ret_val = hw->phy.ops.check_reset_block(hw);
437 		if (ret_val)
438 			ERROR_REPORT("ME blocked access to PHY after reset\n");
439 	}
440 
441 out:
442 	/* Ungate automatic PHY configuration on non-managed 82579 */
443 	if ((hw->mac.type == e1000_pch2lan) &&
444 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
445 		msec_delay(10);
446 		e1000_gate_hw_phy_config_ich8lan(hw, false);
447 	}
448 
449 	return ret_val;
450 }
451 
452 /**
453  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
454  *  @hw: pointer to the HW structure
455  *
456  *  Initialize family-specific PHY parameters and function pointers.
457  **/
458 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
459 {
460 	struct e1000_phy_info *phy = &hw->phy;
461 	s32 ret_val;
462 
463 	DEBUGFUNC("e1000_init_phy_params_pchlan");
464 
465 	phy->addr		= 1;
466 	phy->reset_delay_us	= 100;
467 
468 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
469 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
470 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
471 	phy->ops.set_page	= e1000_set_page_igp;
472 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
473 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
474 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
475 	phy->ops.release	= e1000_release_swflag_ich8lan;
476 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
477 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
478 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
479 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
480 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
481 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
482 	phy->ops.power_up	= e1000_power_up_phy_copper;
483 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
484 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
485 
486 	phy->id = e1000_phy_unknown;
487 
488 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
489 	if (ret_val)
490 		return ret_val;
491 
492 	if (phy->id == e1000_phy_unknown)
493 		switch (hw->mac.type) {
494 		default:
495 			ret_val = e1000_get_phy_id(hw);
496 			if (ret_val)
497 				return ret_val;
498 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
499 				break;
500 			/* FALLTHROUGH */
501 		case e1000_pch2lan:
502 		case e1000_pch_lpt:
503 		case e1000_pch_spt:
504 		case e1000_pch_cnp:
505 		case e1000_pch_tgp:
506 		case e1000_pch_adp:
507 		case e1000_pch_mtp:
508 		case e1000_pch_ptp:
509 			/* In case the PHY needs to be in mdio slow mode,
510 			 * set slow mode and try to get the PHY id again.
511 			 */
512 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
513 			if (ret_val)
514 				return ret_val;
515 			ret_val = e1000_get_phy_id(hw);
516 			if (ret_val)
517 				return ret_val;
518 			break;
519 		}
520 	phy->type = e1000_get_phy_type_from_id(phy->id);
521 
522 	switch (phy->type) {
523 	case e1000_phy_82577:
524 	case e1000_phy_82579:
525 	case e1000_phy_i217:
526 		phy->ops.check_polarity = e1000_check_polarity_82577;
527 		phy->ops.force_speed_duplex =
528 			e1000_phy_force_speed_duplex_82577;
529 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
530 		phy->ops.get_info = e1000_get_phy_info_82577;
531 		phy->ops.commit = e1000_phy_sw_reset_generic;
532 		break;
533 	case e1000_phy_82578:
534 		phy->ops.check_polarity = e1000_check_polarity_m88;
535 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
536 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
537 		phy->ops.get_info = e1000_get_phy_info_m88;
538 		break;
539 	default:
540 		ret_val = -E1000_ERR_PHY;
541 		break;
542 	}
543 
544 	return ret_val;
545 }
546 
547 /**
548  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
549  *  @hw: pointer to the HW structure
550  *
551  *  Initialize family-specific PHY parameters and function pointers.
552  **/
553 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
554 {
555 	struct e1000_phy_info *phy = &hw->phy;
556 	s32 ret_val;
557 	u16 i = 0;
558 
559 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
560 
561 	phy->addr		= 1;
562 	phy->reset_delay_us	= 100;
563 
564 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
565 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
566 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
567 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
568 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
569 	phy->ops.release	= e1000_release_swflag_ich8lan;
570 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
571 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
572 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
573 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
574 	phy->ops.power_up	= e1000_power_up_phy_copper;
575 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
576 
577 	/* We may need to do this twice - once for IGP and if that fails,
578 	 * we'll set BM func pointers and try again
579 	 */
580 	ret_val = e1000_determine_phy_address(hw);
581 	if (ret_val) {
582 		phy->ops.write_reg = e1000_write_phy_reg_bm;
583 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
584 		ret_val = e1000_determine_phy_address(hw);
585 		if (ret_val) {
586 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
587 			return ret_val;
588 		}
589 	}
590 
591 	phy->id = 0;
592 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
593 	       (i++ < 100)) {
594 		msec_delay(1);
595 		ret_val = e1000_get_phy_id(hw);
596 		if (ret_val)
597 			return ret_val;
598 	}
599 
600 	/* Verify phy id */
601 	switch (phy->id) {
602 	case IGP03E1000_E_PHY_ID:
603 		phy->type = e1000_phy_igp_3;
604 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
605 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
606 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
607 		phy->ops.get_info = e1000_get_phy_info_igp;
608 		phy->ops.check_polarity = e1000_check_polarity_igp;
609 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
610 		break;
611 	case IFE_E_PHY_ID:
612 	case IFE_PLUS_E_PHY_ID:
613 	case IFE_C_E_PHY_ID:
614 		phy->type = e1000_phy_ife;
615 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
616 		phy->ops.get_info = e1000_get_phy_info_ife;
617 		phy->ops.check_polarity = e1000_check_polarity_ife;
618 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
619 		break;
620 	case BME1000_E_PHY_ID:
621 		phy->type = e1000_phy_bm;
622 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
623 		phy->ops.read_reg = e1000_read_phy_reg_bm;
624 		phy->ops.write_reg = e1000_write_phy_reg_bm;
625 		phy->ops.commit = e1000_phy_sw_reset_generic;
626 		phy->ops.get_info = e1000_get_phy_info_m88;
627 		phy->ops.check_polarity = e1000_check_polarity_m88;
628 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
629 		break;
630 	default:
631 		return -E1000_ERR_PHY;
632 		break;
633 	}
634 
635 	return E1000_SUCCESS;
636 }
637 
638 /**
639  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
640  *  @hw: pointer to the HW structure
641  *
642  *  Initialize family-specific NVM parameters and function
643  *  pointers.
644  **/
645 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
646 {
647 	struct e1000_nvm_info *nvm = &hw->nvm;
648 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
649 	u32 gfpreg, sector_base_addr, sector_end_addr;
650 	u16 i;
651 	u32 nvm_size;
652 
653 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
654 
655 	nvm->type = e1000_nvm_flash_sw;
656 
657 	if (hw->mac.type >= e1000_pch_spt) {
658 		/* in SPT, gfpreg doesn't exist. NVM size is taken from the
659 		 * STRAP register. This is because in SPT the GbE Flash region
660 		 * is no longer accessed through the flash registers. Instead,
661 		 * the mechanism has changed, and the Flash region access
662 		 * registers are now implemented in GbE memory space.
663 		 */
664 		nvm->flash_base_addr = 0;
665 		nvm_size =
666 		    (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
667 		    * NVM_SIZE_MULTIPLIER;
668 		nvm->flash_bank_size = nvm_size / 2;
669 		/* Adjust to word count */
670 		nvm->flash_bank_size /= sizeof(u16);
671 		/* Set the base address for flash register access */
672 		hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
673 	} else {
674 		/* Can't read flash registers if register set isn't mapped. */
675 		if (!hw->flash_address) {
676 			DEBUGOUT("ERROR: Flash registers not mapped\n");
677 			return -E1000_ERR_CONFIG;
678 		}
679 
680 		gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
681 
682 		/* sector_X_addr is a "sector"-aligned address (4096 bytes)
683 		 * Add 1 to sector_end_addr since this sector is included in
684 		 * the overall size.
685 		 */
686 		sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
687 		sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
688 
689 		/* flash_base_addr is byte-aligned */
690 		nvm->flash_base_addr = sector_base_addr
691 				       << FLASH_SECTOR_ADDR_SHIFT;
692 
693 		/* find total size of the NVM, then cut in half since the total
694 		 * size represents two separate NVM banks.
695 		 */
696 		nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
697 					<< FLASH_SECTOR_ADDR_SHIFT);
698 		nvm->flash_bank_size /= 2;
699 		/* Adjust to word count */
700 		nvm->flash_bank_size /= sizeof(u16);
701 	}
702 
703 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
704 
705 	/* Clear shadow ram */
706 	for (i = 0; i < nvm->word_size; i++) {
707 		dev_spec->shadow_ram[i].modified = false;
708 		dev_spec->shadow_ram[i].value    = 0xFFFF;
709 	}
710 
711 	/* Function Pointers */
712 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
713 	nvm->ops.release	= e1000_release_nvm_ich8lan;
714 	if (hw->mac.type >= e1000_pch_spt) {
715 		nvm->ops.read	= e1000_read_nvm_spt;
716 		nvm->ops.update	= e1000_update_nvm_checksum_spt;
717 	} else {
718 		nvm->ops.read	= e1000_read_nvm_ich8lan;
719 		nvm->ops.update	= e1000_update_nvm_checksum_ich8lan;
720 	}
721 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
722 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
723 	nvm->ops.write		= e1000_write_nvm_ich8lan;
724 
725 	return E1000_SUCCESS;
726 }
727 
728 /**
729  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
730  *  @hw: pointer to the HW structure
731  *
732  *  Initialize family-specific MAC parameters and function
733  *  pointers.
734  **/
735 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
736 {
737 	struct e1000_mac_info *mac = &hw->mac;
738 
739 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
740 
741 	/* Set media type function pointer */
742 	hw->phy.media_type = e1000_media_type_copper;
743 
744 	/* Set mta register count */
745 	mac->mta_reg_count = 32;
746 	/* Set rar entry count */
747 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
748 	if (mac->type == e1000_ich8lan)
749 		mac->rar_entry_count--;
750 	/* Set if part includes ASF firmware */
751 	mac->asf_firmware_present = true;
752 	/* FWSM register */
753 	mac->has_fwsm = true;
754 	/* ARC subsystem not supported */
755 	mac->arc_subsystem_valid = false;
756 	/* Adaptive IFS supported */
757 	mac->adaptive_ifs = true;
758 
759 	/* Function pointers */
760 
761 	/* bus type/speed/width */
762 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
763 	/* function id */
764 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
765 	/* reset */
766 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
767 	/* hw initialization */
768 	mac->ops.init_hw = e1000_init_hw_ich8lan;
769 	/* link setup */
770 	mac->ops.setup_link = e1000_setup_link_ich8lan;
771 	/* physical interface setup */
772 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
773 	/* check for link */
774 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
775 	/* link info */
776 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
777 	/* multicast address update */
778 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
779 	/* clear hardware counters */
780 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
781 
782 	/* LED and other operations */
783 	switch (mac->type) {
784 	case e1000_ich8lan:
785 	case e1000_ich9lan:
786 	case e1000_ich10lan:
787 		/* check management mode */
788 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
789 		/* ID LED init */
790 		mac->ops.id_led_init = e1000_id_led_init_generic;
791 		/* blink LED */
792 		mac->ops.blink_led = e1000_blink_led_generic;
793 		/* setup LED */
794 		mac->ops.setup_led = e1000_setup_led_generic;
795 		/* cleanup LED */
796 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
797 		/* turn on/off LED */
798 		mac->ops.led_on = e1000_led_on_ich8lan;
799 		mac->ops.led_off = e1000_led_off_ich8lan;
800 		break;
801 	case e1000_pch2lan:
802 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
803 		mac->ops.rar_set = e1000_rar_set_pch2lan;
804 		/* FALLTHROUGH */
805 	case e1000_pch_lpt:
806 	case e1000_pch_spt:
807 	case e1000_pch_cnp:
808 	case e1000_pch_tgp:
809 	case e1000_pch_adp:
810 	case e1000_pch_mtp:
811 	case e1000_pch_ptp:
812 		/* multicast address update for pch2 */
813 		mac->ops.update_mc_addr_list =
814 			e1000_update_mc_addr_list_pch2lan;
815 		/* FALLTHROUGH */
816 	case e1000_pchlan:
817 		/* check management mode */
818 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
819 		/* ID LED init */
820 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
821 		/* setup LED */
822 		mac->ops.setup_led = e1000_setup_led_pchlan;
823 		/* cleanup LED */
824 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
825 		/* turn on/off LED */
826 		mac->ops.led_on = e1000_led_on_pchlan;
827 		mac->ops.led_off = e1000_led_off_pchlan;
828 		break;
829 	default:
830 		break;
831 	}
832 
833 	if (mac->type >= e1000_pch_lpt) {
834 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
835 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
836 		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
837 		mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
838 	}
839 
840 	/* Enable PCS Lock-loss workaround for ICH8 */
841 	if (mac->type == e1000_ich8lan)
842 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
843 
844 	return E1000_SUCCESS;
845 }
846 
847 /**
848  *  __e1000_access_emi_reg_locked - Read/write EMI register
849  *  @hw: pointer to the HW structure
850  *  @address: EMI address to program
851  *  @data: pointer to value to read/write from/to the EMI address
852  *  @read: boolean flag to indicate read or write
853  *
854  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
855  **/
856 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
857 					 u16 *data, bool read)
858 {
859 	s32 ret_val;
860 
861 	DEBUGFUNC("__e1000_access_emi_reg_locked");
862 
863 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
864 	if (ret_val)
865 		return ret_val;
866 
867 	if (read)
868 		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
869 						      data);
870 	else
871 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
872 						       *data);
873 
874 	return ret_val;
875 }
876 
877 /**
878  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
879  *  @hw: pointer to the HW structure
880  *  @addr: EMI address to program
881  *  @data: value to be read from the EMI address
882  *
883  *  Assumes the SW/FW/HW Semaphore is already acquired.
884  **/
885 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
886 {
887 	DEBUGFUNC("e1000_read_emi_reg_locked");
888 
889 	return __e1000_access_emi_reg_locked(hw, addr, data, true);
890 }
891 
892 /**
893  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
894  *  @hw: pointer to the HW structure
895  *  @addr: EMI address to program
896  *  @data: value to be written to the EMI address
897  *
898  *  Assumes the SW/FW/HW Semaphore is already acquired.
899  **/
900 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
901 {
902 	DEBUGFUNC("e1000_read_emi_reg_locked");
903 
904 	return __e1000_access_emi_reg_locked(hw, addr, &data, false);
905 }
906 
907 /**
908  *  e1000_set_eee_pchlan - Enable/disable EEE support
909  *  @hw: pointer to the HW structure
910  *
911  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
912  *  the link and the EEE capabilities of the link partner.  The LPI Control
913  *  register bits will remain set only if/when link is up.
914  *
915  *  EEE LPI must not be asserted earlier than one second after link is up.
916  *  On 82579, EEE LPI should not be enabled until such time otherwise there
917  *  can be link issues with some switches.  Other devices can have EEE LPI
918  *  enabled immediately upon link up since they have a timer in hardware which
919  *  prevents LPI from being asserted too early.
920  **/
921 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
922 {
923 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
924 	s32 ret_val;
925 	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
926 
927 	DEBUGFUNC("e1000_set_eee_pchlan");
928 
929 	switch (hw->phy.type) {
930 	case e1000_phy_82579:
931 		lpa = I82579_EEE_LP_ABILITY;
932 		pcs_status = I82579_EEE_PCS_STATUS;
933 		adv_addr = I82579_EEE_ADVERTISEMENT;
934 		break;
935 	case e1000_phy_i217:
936 		lpa = I217_EEE_LP_ABILITY;
937 		pcs_status = I217_EEE_PCS_STATUS;
938 		adv_addr = I217_EEE_ADVERTISEMENT;
939 		break;
940 	default:
941 		return E1000_SUCCESS;
942 	}
943 
944 	ret_val = hw->phy.ops.acquire(hw);
945 	if (ret_val)
946 		return ret_val;
947 
948 	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
949 	if (ret_val)
950 		goto release;
951 
952 	/* Clear bits that enable EEE in various speeds */
953 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
954 
955 	/* Enable EEE if not disabled by user */
956 	if (!dev_spec->eee_disable) {
957 		/* Save off link partner's EEE ability */
958 		ret_val = e1000_read_emi_reg_locked(hw, lpa,
959 						    &dev_spec->eee_lp_ability);
960 		if (ret_val)
961 			goto release;
962 
963 		/* Read EEE advertisement */
964 		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
965 		if (ret_val)
966 			goto release;
967 
968 		/* Enable EEE only for speeds in which the link partner is
969 		 * EEE capable and for which we advertise EEE.
970 		 */
971 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
972 			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
973 
974 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
975 			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
976 			if (data & NWAY_LPAR_100TX_FD_CAPS)
977 				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
978 			else
979 				/* EEE is not supported in 100Half, so ignore
980 				 * partner's EEE in 100 ability if full-duplex
981 				 * is not advertised.
982 				 */
983 				dev_spec->eee_lp_ability &=
984 				    ~I82579_EEE_100_SUPPORTED;
985 		}
986 	}
987 
988 	if (hw->phy.type == e1000_phy_82579) {
989 		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
990 						    &data);
991 		if (ret_val)
992 			goto release;
993 
994 		data &= ~I82579_LPI_100_PLL_SHUT;
995 		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
996 						     data);
997 	}
998 
999 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
1000 	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
1001 	if (ret_val)
1002 		goto release;
1003 
1004 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
1005 release:
1006 	hw->phy.ops.release(hw);
1007 
1008 	return ret_val;
1009 }
1010 
1011 /**
1012  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
1013  *  @hw:   pointer to the HW structure
1014  *  @link: link up bool flag
1015  *
1016  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1017  *  preventing further DMA write requests.  Workaround the issue by disabling
1018  *  the de-assertion of the clock request when in 1Gpbs mode.
1019  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1020  *  speeds in order to avoid Tx hangs.
1021  **/
1022 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1023 {
1024 	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1025 	u32 status = E1000_READ_REG(hw, E1000_STATUS);
1026 	s32 ret_val = E1000_SUCCESS;
1027 	u16 reg;
1028 
1029 	if (link && (status & E1000_STATUS_SPEED_1000)) {
1030 		ret_val = hw->phy.ops.acquire(hw);
1031 		if (ret_val)
1032 			return ret_val;
1033 
1034 		ret_val =
1035 		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1036 					       &reg);
1037 		if (ret_val)
1038 			goto release;
1039 
1040 		ret_val =
1041 		    e1000_write_kmrn_reg_locked(hw,
1042 						E1000_KMRNCTRLSTA_K1_CONFIG,
1043 						reg &
1044 						~E1000_KMRNCTRLSTA_K1_ENABLE);
1045 		if (ret_val)
1046 			goto release;
1047 
1048 		usec_delay(10);
1049 
1050 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1051 				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1052 
1053 		ret_val =
1054 		    e1000_write_kmrn_reg_locked(hw,
1055 						E1000_KMRNCTRLSTA_K1_CONFIG,
1056 						reg);
1057 release:
1058 		hw->phy.ops.release(hw);
1059 	} else {
1060 		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
1061 		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1062 
1063 		if ((hw->phy.revision > 5) || !link ||
1064 		    ((status & E1000_STATUS_SPEED_100) &&
1065 		     (status & E1000_STATUS_FD)))
1066 			goto update_fextnvm6;
1067 
1068 		ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1069 		if (ret_val)
1070 			return ret_val;
1071 
1072 		/* Clear link status transmit timeout */
1073 		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1074 
1075 		if (status & E1000_STATUS_SPEED_100) {
1076 			/* Set inband Tx timeout to 5x10us for 100Half */
1077 			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1078 
1079 			/* Do not extend the K1 entry latency for 100Half */
1080 			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1081 		} else {
1082 			/* Set inband Tx timeout to 50x10us for 10Full/Half */
1083 			reg |= 50 <<
1084 			       I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1085 
1086 			/* Extend the K1 entry latency for 10 Mbps */
1087 			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1088 		}
1089 
1090 		ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1091 		if (ret_val)
1092 			return ret_val;
1093 
1094 update_fextnvm6:
1095 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1096 	}
1097 
1098 	return ret_val;
1099 }
1100 
1101 static u64 e1000_ltr2ns(u16 ltr)
1102 {
1103 	u32 value, scale;
1104 
1105 	/* Determine the latency in nsec based on the LTR value & scale */
1106 	value = ltr & E1000_LTRV_VALUE_MASK;
1107 	scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1108 
1109 	return value * (1ULL << (scale * E1000_LTRV_SCALE_FACTOR));
1110 }
1111 
1112 /**
1113  *  e1000_platform_pm_pch_lpt - Set platform power management values
1114  *  @hw: pointer to the HW structure
1115  *  @link: bool indicating link status
1116  *
1117  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1118  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1119  *  when link is up (which must not exceed the maximum latency supported
1120  *  by the platform), otherwise specify there is no LTR requirement.
1121  *  Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
1122  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1123  *  Capability register set, on this device LTR is set by writing the
1124  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1125  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1126  *  message to the PMC.
1127  *
1128  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1129  *  high-water mark.
1130  **/
1131 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1132 {
1133 	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1134 		  link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1135 	u16 lat_enc = 0;	/* latency encoded */
1136 	s32 obff_hwm = 0;
1137 
1138 	DEBUGFUNC("e1000_platform_pm_pch_lpt");
1139 
1140 	if (link) {
1141 		u16 speed, duplex, scale = 0;
1142 		u16 max_snoop, max_nosnoop;
1143 		u16 max_ltr_enc;	/* max LTR latency encoded */
1144 		s64 lat_ns;
1145 		s64 value;
1146 		u32 rxa;
1147 
1148 		if (!hw->mac.max_frame_size) {
1149 			DEBUGOUT("max_frame_size not set.\n");
1150 			return -E1000_ERR_CONFIG;
1151 		}
1152 
1153 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1154 		if (!speed) {
1155 			DEBUGOUT("Speed not set.\n");
1156 			return -E1000_ERR_CONFIG;
1157 		}
1158 
1159 		/* Rx Packet Buffer Allocation size (KB) */
1160 		rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1161 
1162 		/* Determine the maximum latency tolerated by the device.
1163 		 *
1164 		 * Per the PCIe spec, the tolerated latencies are encoded as
1165 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1166 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
1167 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1168 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1169 		 */
1170 		lat_ns = ((s64)rxa * 1024 -
1171 			  (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1172 		if (lat_ns < 0)
1173 			lat_ns = 0;
1174 		else
1175 			lat_ns /= speed;
1176 		value = lat_ns;
1177 
1178 		while (value > E1000_LTRV_VALUE_MASK) {
1179 			scale++;
1180 			value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1181 		}
1182 		if (scale > E1000_LTRV_SCALE_MAX) {
1183 			DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1184 			return -E1000_ERR_CONFIG;
1185 		}
1186 		lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1187 
1188 		/* Determine the maximum latency tolerated by the platform */
1189 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1190 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1191 		max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1192 
1193 		if (lat_enc > max_ltr_enc) {
1194 			lat_enc = max_ltr_enc;
1195 			lat_ns = e1000_ltr2ns(max_ltr_enc);
1196 		}
1197 
1198 		if (lat_ns) {
1199 			lat_ns *= speed * 1000;
1200 			lat_ns /= 8;
1201 			lat_ns /= 1000000000;
1202 			obff_hwm = (s32)(rxa - lat_ns);
1203 		}
1204 		if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1205 			DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1206 			return -E1000_ERR_CONFIG;
1207 		}
1208 	}
1209 
1210 	/* Set Snoop and No-Snoop latencies the same */
1211 	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1212 	E1000_WRITE_REG(hw, E1000_LTRV, reg);
1213 
1214 	/* Set OBFF high water mark */
1215 	reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1216 	reg |= obff_hwm;
1217 	E1000_WRITE_REG(hw, E1000_SVT, reg);
1218 
1219 	/* Enable OBFF */
1220 	reg = E1000_READ_REG(hw, E1000_SVCR);
1221 	reg |= E1000_SVCR_OFF_EN;
1222 	/* Always unblock interrupts to the CPU even when the system is
1223 	 * in OBFF mode. This ensures that small round-robin traffic
1224 	 * (like ping) does not get dropped or experience long latency.
1225 	 */
1226 	reg |= E1000_SVCR_OFF_MASKINT;
1227 	E1000_WRITE_REG(hw, E1000_SVCR, reg);
1228 
1229 	return E1000_SUCCESS;
1230 }
1231 
1232 /**
1233  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1234  *  @hw: pointer to the HW structure
1235  *  @itr: interrupt throttling rate
1236  *
1237  *  Configure OBFF with the updated interrupt rate.
1238  **/
1239 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1240 {
1241 	u32 svcr;
1242 	s32 timer;
1243 
1244 	DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1245 
1246 	/* Convert ITR value into microseconds for OBFF timer */
1247 	timer = itr & E1000_ITR_MASK;
1248 	timer = (timer * E1000_ITR_MULT) / 1000;
1249 
1250 	if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1251 		DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1252 		return -E1000_ERR_CONFIG;
1253 	}
1254 
1255 	svcr = E1000_READ_REG(hw, E1000_SVCR);
1256 	svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1257 	svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1258 	E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1259 
1260 	return E1000_SUCCESS;
1261 }
1262 
1263 /**
1264  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1265  *  @hw: pointer to the HW structure
1266  *  @to_sx: boolean indicating a system power state transition to Sx
1267  *
1268  *  When link is down, configure ULP mode to significantly reduce the power
1269  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1270  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1271  *  system, configure the ULP mode by software.
1272  */
1273 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1274 {
1275 	u32 mac_reg;
1276 	s32 ret_val = E1000_SUCCESS;
1277 	u16 phy_reg;
1278 	u16 oem_reg = 0;
1279 
1280 	if ((hw->mac.type < e1000_pch_lpt) ||
1281 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1282 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1283 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1284 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1285 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1286 		return 0;
1287 
1288 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1289 		/* Request ME configure ULP mode in the PHY */
1290 		mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1291 		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1292 		E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1293 
1294 		goto out;
1295 	}
1296 
1297 	if (!to_sx) {
1298 		int i = 0;
1299 		/* Poll up to 5 seconds for Cable Disconnected indication */
1300 		while (!(E1000_READ_REG(hw, E1000_FEXT) &
1301 			 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1302 			/* Bail if link is re-acquired */
1303 			if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1304 				return -E1000_ERR_PHY;
1305 			if (i++ == 100)
1306 				break;
1307 
1308 			msec_delay(50);
1309 		}
1310 		DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1311 			 (E1000_READ_REG(hw, E1000_FEXT) &
1312 			  E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1313 			 i * 50);
1314 		if (!(E1000_READ_REG(hw, E1000_FEXT) &
1315 		    E1000_FEXT_PHY_CABLE_DISCONNECTED))
1316 			return 0;
1317 	}
1318 
1319 	ret_val = hw->phy.ops.acquire(hw);
1320 	if (ret_val)
1321 		goto out;
1322 
1323 	/* Force SMBus mode in PHY */
1324 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1325 	if (ret_val)
1326 		goto release;
1327 	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1328 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1329 
1330 	/* Force SMBus mode in MAC */
1331 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1332 	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1333 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1334 
1335 	/* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1336 	 * LPLU and disable Gig speed when entering ULP
1337 	 */
1338 	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1339 		ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1340 						       &oem_reg);
1341 		if (ret_val)
1342 			goto release;
1343 
1344 		phy_reg = oem_reg;
1345 		phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1346 
1347 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1348 							phy_reg);
1349 
1350 		if (ret_val)
1351 			goto release;
1352 	}
1353 
1354 	/* Set Inband ULP Exit, Reset to SMBus mode and
1355 	 * Disable SMBus Release on PERST# in PHY
1356 	 */
1357 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1358 	if (ret_val)
1359 		goto release;
1360 	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1361 		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1362 	if (to_sx) {
1363 		if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1364 			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1365 		else
1366 			phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1367 
1368 		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1369 		phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1370 	} else {
1371 		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1372 		phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1373 		phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1374 	}
1375 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1376 
1377 	/* Set Disable SMBus Release on PERST# in MAC */
1378 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1379 	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1380 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1381 
1382 	/* Commit ULP changes in PHY by starting auto ULP configuration */
1383 	phy_reg |= I218_ULP_CONFIG1_START;
1384 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1385 
1386 	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1387 	    to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1388 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1389 							oem_reg);
1390 		if (ret_val)
1391 			goto release;
1392 	}
1393 
1394 release:
1395 	hw->phy.ops.release(hw);
1396 out:
1397 	if (ret_val)
1398 		DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1399 	else
1400 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1401 
1402 	return ret_val;
1403 }
1404 
1405 /**
1406  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1407  *  @hw: pointer to the HW structure
1408  *  @force: boolean indicating whether or not to force disabling ULP
1409  *
1410  *  Un-configure ULP mode when link is up, the system is transitioned from
1411  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1412  *  system, poll for an indication from ME that ULP has been un-configured.
1413  *  If not on an ME enabled system, un-configure the ULP mode by software.
1414  *
1415  *  During nominal operation, this function is called when link is acquired
1416  *  to disable ULP mode (force=false); otherwise, for example when unloading
1417  *  the driver or during Sx->S0 transitions, this is called with force=true
1418  *  to forcibly disable ULP.
1419  */
1420 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1421 {
1422 	s32 ret_val = E1000_SUCCESS;
1423 	u8 ulp_exit_timeout = 30;
1424 	u32 mac_reg;
1425 	u16 phy_reg;
1426 	int i = 0;
1427 
1428 	if ((hw->mac.type < e1000_pch_lpt) ||
1429 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1430 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1431 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1432 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1433 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1434 		return 0;
1435 
1436 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1437 		if (force) {
1438 			/* Request ME un-configure ULP mode in the PHY */
1439 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1440 			mac_reg &= ~E1000_H2ME_ULP;
1441 			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1442 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1443 		}
1444 
1445 		if (hw->mac.type == e1000_pch_cnp)
1446 			ulp_exit_timeout = 100;
1447 
1448 		while (E1000_READ_REG(hw, E1000_FWSM) &
1449 		       E1000_FWSM_ULP_CFG_DONE) {
1450 			if (i++ == ulp_exit_timeout) {
1451 				ret_val = -E1000_ERR_PHY;
1452 				goto out;
1453 			}
1454 
1455 			msec_delay(10);
1456 		}
1457 		DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1458 
1459 		if (force) {
1460 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1461 			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1462 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1463 		} else {
1464 			/* Clear H2ME.ULP after ME ULP configuration */
1465 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1466 			mac_reg &= ~E1000_H2ME_ULP;
1467 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1468 		}
1469 
1470 		goto out;
1471 	}
1472 
1473 	ret_val = hw->phy.ops.acquire(hw);
1474 	if (ret_val)
1475 		goto out;
1476 
1477 	if (force)
1478 		/* Toggle LANPHYPC Value bit */
1479 		e1000_toggle_lanphypc_pch_lpt(hw);
1480 
1481 	/* Unforce SMBus mode in PHY */
1482 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1483 	if (ret_val) {
1484 		/* The MAC might be in PCIe mode, so temporarily force to
1485 		 * SMBus mode in order to access the PHY.
1486 		 */
1487 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1488 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1489 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1490 
1491 		msec_delay(50);
1492 
1493 		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1494 						       &phy_reg);
1495 		if (ret_val)
1496 			goto release;
1497 	}
1498 	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1499 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1500 
1501 	/* Unforce SMBus mode in MAC */
1502 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1503 	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1504 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1505 
1506 	/* When ULP mode was previously entered, K1 was disabled by the
1507 	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1508 	 */
1509 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1510 	if (ret_val)
1511 		goto release;
1512 	phy_reg |= HV_PM_CTRL_K1_ENABLE;
1513 	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1514 
1515 	/* Clear ULP enabled configuration */
1516 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1517 	if (ret_val)
1518 		goto release;
1519 	phy_reg &= ~(I218_ULP_CONFIG1_IND |
1520 		     I218_ULP_CONFIG1_STICKY_ULP |
1521 		     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1522 		     I218_ULP_CONFIG1_WOL_HOST |
1523 		     I218_ULP_CONFIG1_INBAND_EXIT |
1524 		     I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1525 		     I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1526 		     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1527 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1528 
1529 	/* Commit ULP changes by starting auto ULP configuration */
1530 	phy_reg |= I218_ULP_CONFIG1_START;
1531 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1532 
1533 	/* Clear Disable SMBus Release on PERST# in MAC */
1534 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1535 	mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1536 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1537 
1538 release:
1539 	hw->phy.ops.release(hw);
1540 	if (force) {
1541 		hw->phy.ops.reset(hw);
1542 		msec_delay(50);
1543 	}
1544 out:
1545 	if (ret_val)
1546 		DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1547 	else
1548 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1549 
1550 	return ret_val;
1551 }
1552 
1553 /**
1554  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1555  *  @hw: pointer to the HW structure
1556  *
1557  *  Checks to see of the link status of the hardware has changed.  If a
1558  *  change in link status has been detected, then we read the PHY registers
1559  *  to get the current speed/duplex if link exists.
1560  **/
1561 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1562 {
1563 	struct e1000_mac_info *mac = &hw->mac;
1564 	s32 ret_val, tipg_reg = 0;
1565 	u16 emi_addr, emi_val = 0;
1566 	bool link;
1567 	u16 phy_reg;
1568 
1569 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1570 
1571 	/* We only want to go out to the PHY registers to see if Auto-Neg
1572 	 * has completed and/or if our link status has changed.  The
1573 	 * get_link_status flag is set upon receiving a Link Status
1574 	 * Change or Rx Sequence Error interrupt.
1575 	 */
1576 	if (!mac->get_link_status)
1577 		return E1000_SUCCESS;
1578 
1579 	/* First we want to see if the MII Status Register reports
1580 	 * link.  If so, then we want to get the current speed/duplex
1581 	 * of the PHY.
1582 	 */
1583 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1584 	if (ret_val)
1585 		return ret_val;
1586 
1587 	if (hw->mac.type == e1000_pchlan) {
1588 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1589 		if (ret_val)
1590 			return ret_val;
1591 	}
1592 
1593 	/* When connected at 10Mbps half-duplex, some parts are excessively
1594 	 * aggressive resulting in many collisions. To avoid this, increase
1595 	 * the IPG and reduce Rx latency in the PHY.
1596 	 */
1597 	if ((hw->mac.type >= e1000_pch2lan) && link) {
1598 		u16 speed, duplex;
1599 
1600 		e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1601 		tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1602 		tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1603 
1604 		if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1605 			tipg_reg |= 0xFF;
1606 			/* Reduce Rx latency in analog PHY */
1607 			emi_val = 0;
1608 		} else if (hw->mac.type >= e1000_pch_spt &&
1609 			   duplex == FULL_DUPLEX && speed != SPEED_1000) {
1610 			tipg_reg |= 0xC;
1611 			emi_val = 1;
1612 		} else {
1613 			/* Roll back the default values */
1614 			tipg_reg |= 0x08;
1615 			emi_val = 1;
1616 		}
1617 
1618 		E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1619 
1620 		ret_val = hw->phy.ops.acquire(hw);
1621 		if (ret_val)
1622 			return ret_val;
1623 
1624 		if (hw->mac.type == e1000_pch2lan)
1625 			emi_addr = I82579_RX_CONFIG;
1626 		else
1627 			emi_addr = I217_RX_CONFIG;
1628 		ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1629 
1630 
1631 		if (hw->mac.type >= e1000_pch_lpt) {
1632 			hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1633 						    &phy_reg);
1634 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1635 			if (speed == SPEED_100 || speed == SPEED_10)
1636 				phy_reg |= 0x3E8;
1637 			else
1638 				phy_reg |= 0xFA;
1639 			hw->phy.ops.write_reg_locked(hw,
1640 						     I217_PLL_CLOCK_GATE_REG,
1641 						     phy_reg);
1642 
1643 			if (speed == SPEED_1000) {
1644 				hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1645 							    &phy_reg);
1646 
1647 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1648 
1649 				hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1650 							     phy_reg);
1651 				}
1652 		 }
1653 		hw->phy.ops.release(hw);
1654 
1655 		if (ret_val)
1656 			return ret_val;
1657 
1658 		if (hw->mac.type >= e1000_pch_spt) {
1659 			u16 data;
1660 			u16 ptr_gap;
1661 
1662 			if (speed == SPEED_1000) {
1663 				ret_val = hw->phy.ops.acquire(hw);
1664 				if (ret_val)
1665 					return ret_val;
1666 
1667 				ret_val = hw->phy.ops.read_reg_locked(hw,
1668 							      PHY_REG(776, 20),
1669 							      &data);
1670 				if (ret_val) {
1671 					hw->phy.ops.release(hw);
1672 					return ret_val;
1673 				}
1674 
1675 				ptr_gap = (data & (0x3FF << 2)) >> 2;
1676 				if (ptr_gap < 0x18) {
1677 					data &= ~(0x3FF << 2);
1678 					data |= (0x18 << 2);
1679 					ret_val =
1680 						hw->phy.ops.write_reg_locked(hw,
1681 							PHY_REG(776, 20), data);
1682 				}
1683 				hw->phy.ops.release(hw);
1684 				if (ret_val)
1685 					return ret_val;
1686 			} else {
1687 				ret_val = hw->phy.ops.acquire(hw);
1688 				if (ret_val)
1689 					return ret_val;
1690 
1691 				ret_val = hw->phy.ops.write_reg_locked(hw,
1692 							     PHY_REG(776, 20),
1693 							     0xC023);
1694 				hw->phy.ops.release(hw);
1695 				if (ret_val)
1696 					return ret_val;
1697 
1698 			}
1699 		}
1700 	}
1701 
1702 	/* I217 Packet Loss issue:
1703 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
1704 	 * on power up.
1705 	 * Set the Beacon Duration for I217 to 8 usec
1706 	 */
1707 	if (hw->mac.type >= e1000_pch_lpt) {
1708 		u32 mac_reg;
1709 
1710 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1711 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1712 		mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1713 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1714 	}
1715 
1716 	/* Work-around I218 hang issue */
1717 	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1718 	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1719 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1720 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1721 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1722 		if (ret_val)
1723 			return ret_val;
1724 	}
1725 	if (hw->mac.type >= e1000_pch_lpt) {
1726 		/* Set platform power management values for
1727 		 * Latency Tolerance Reporting (LTR)
1728 		 * Optimized Buffer Flush/Fill (OBFF)
1729 		 */
1730 		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1731 		if (ret_val)
1732 			return ret_val;
1733 	}
1734 	/* Clear link partner's EEE ability */
1735 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1736 
1737 	if (hw->mac.type >= e1000_pch_lpt) {
1738 		u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1739 
1740 		if (hw->mac.type == e1000_pch_spt) {
1741 			/* FEXTNVM6 K1-off workaround - for SPT only */
1742 			u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1743 
1744 			if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1745 				fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1746 			else
1747 				fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1748 		}
1749 
1750 		if (hw->dev_spec.ich8lan.disable_k1_off == true)
1751 			fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1752 
1753 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1754 
1755 		/* Configure K0s minimum time */
1756 		e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
1757 	}
1758 
1759 	if (!link)
1760 		return E1000_SUCCESS; /* No link detected */
1761 
1762 	mac->get_link_status = false;
1763 
1764 	switch (hw->mac.type) {
1765 	case e1000_pch2lan:
1766 		ret_val = e1000_k1_workaround_lv(hw);
1767 		if (ret_val)
1768 			return ret_val;
1769 		/* FALLTHROUGH */
1770 	case e1000_pchlan:
1771 		if (hw->phy.type == e1000_phy_82578) {
1772 			ret_val = e1000_link_stall_workaround_hv(hw);
1773 			if (ret_val)
1774 				return ret_val;
1775 		}
1776 
1777 		/* Workaround for PCHx parts in half-duplex:
1778 		 * Set the number of preambles removed from the packet
1779 		 * when it is passed from the PHY to the MAC to prevent
1780 		 * the MAC from misinterpreting the packet type.
1781 		 */
1782 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1783 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1784 
1785 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1786 		    E1000_STATUS_FD)
1787 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1788 
1789 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1790 		break;
1791 	default:
1792 		break;
1793 	}
1794 
1795 	/* Check if there was DownShift, must be checked
1796 	 * immediately after link-up
1797 	 */
1798 	e1000_check_downshift_generic(hw);
1799 
1800 	/* Enable/Disable EEE after link up */
1801 	if (hw->phy.type > e1000_phy_82579) {
1802 		ret_val = e1000_set_eee_pchlan(hw);
1803 		if (ret_val)
1804 			return ret_val;
1805 	}
1806 
1807 	/* If we are forcing speed/duplex, then we simply return since
1808 	 * we have already determined whether we have link or not.
1809 	 */
1810 	if (!mac->autoneg)
1811 		return -E1000_ERR_CONFIG;
1812 
1813 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1814 	 * of MAC speed/duplex configuration.  So we only need to
1815 	 * configure Collision Distance in the MAC.
1816 	 */
1817 	mac->ops.config_collision_dist(hw);
1818 
1819 	/* Configure Flow Control now that Auto-Neg has completed.
1820 	 * First, we need to restore the desired flow control
1821 	 * settings because we may have had to re-autoneg with a
1822 	 * different link partner.
1823 	 */
1824 	ret_val = e1000_config_fc_after_link_up_generic(hw);
1825 	if (ret_val)
1826 		DEBUGOUT("Error configuring flow control\n");
1827 
1828 	return ret_val;
1829 }
1830 
1831 /**
1832  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1833  *  @hw: pointer to the HW structure
1834  *
1835  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1836  **/
1837 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1838 {
1839 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1840 
1841 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1842 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1843 	switch (hw->mac.type) {
1844 	case e1000_ich8lan:
1845 	case e1000_ich9lan:
1846 	case e1000_ich10lan:
1847 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1848 		break;
1849 	case e1000_pchlan:
1850 	case e1000_pch2lan:
1851 	case e1000_pch_lpt:
1852 	case e1000_pch_spt:
1853 	case e1000_pch_cnp:
1854 	case e1000_pch_tgp:
1855 	case e1000_pch_adp:
1856 	case e1000_pch_mtp:
1857 	case e1000_pch_ptp:
1858 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1859 		break;
1860 	default:
1861 		break;
1862 	}
1863 }
1864 
1865 /**
1866  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1867  *  @hw: pointer to the HW structure
1868  *
1869  *  Acquires the mutex for performing NVM operations.
1870  **/
1871 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1872 {
1873 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1874 
1875 	ASSERT_CTX_LOCK_HELD(hw);
1876 
1877 	return E1000_SUCCESS;
1878 }
1879 
1880 /**
1881  *  e1000_release_nvm_ich8lan - Release NVM mutex
1882  *  @hw: pointer to the HW structure
1883  *
1884  *  Releases the mutex used while performing NVM operations.
1885  **/
1886 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1887 {
1888 	DEBUGFUNC("e1000_release_nvm_ich8lan");
1889 
1890 	ASSERT_CTX_LOCK_HELD(hw);
1891 }
1892 
1893 /**
1894  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1895  *  @hw: pointer to the HW structure
1896  *
1897  *  Acquires the software control flag for performing PHY and select
1898  *  MAC CSR accesses.
1899  **/
1900 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1901 {
1902 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1903 	s32 ret_val = E1000_SUCCESS;
1904 
1905 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1906 
1907 	ASSERT_CTX_LOCK_HELD(hw);
1908 
1909 	while (timeout) {
1910 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1911 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1912 			break;
1913 
1914 		msec_delay_irq(1);
1915 		timeout--;
1916 	}
1917 
1918 	if (!timeout) {
1919 		DEBUGOUT("SW has already locked the resource.\n");
1920 		ret_val = -E1000_ERR_CONFIG;
1921 		goto out;
1922 	}
1923 
1924 	timeout = SW_FLAG_TIMEOUT;
1925 
1926 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1927 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1928 
1929 	while (timeout) {
1930 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1931 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1932 			break;
1933 
1934 		msec_delay_irq(1);
1935 		timeout--;
1936 	}
1937 
1938 	if (!timeout) {
1939 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1940 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1941 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1942 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1943 		ret_val = -E1000_ERR_CONFIG;
1944 		goto out;
1945 	}
1946 
1947 out:
1948 	return ret_val;
1949 }
1950 
1951 /**
1952  *  e1000_release_swflag_ich8lan - Release software control flag
1953  *  @hw: pointer to the HW structure
1954  *
1955  *  Releases the software control flag for performing PHY and select
1956  *  MAC CSR accesses.
1957  **/
1958 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1959 {
1960 	u32 extcnf_ctrl;
1961 
1962 	DEBUGFUNC("e1000_release_swflag_ich8lan");
1963 
1964 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1965 
1966 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1967 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1968 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1969 	} else {
1970 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1971 	}
1972 }
1973 
1974 /**
1975  *  e1000_check_mng_mode_ich8lan - Checks management mode
1976  *  @hw: pointer to the HW structure
1977  *
1978  *  This checks if the adapter has any manageability enabled.
1979  *  This is a function pointer entry point only called by read/write
1980  *  routines for the PHY and NVM parts.
1981  **/
1982 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1983 {
1984 	u32 fwsm;
1985 
1986 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1987 
1988 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1989 
1990 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1991 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1992 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1993 }
1994 
1995 /**
1996  *  e1000_check_mng_mode_pchlan - Checks management mode
1997  *  @hw: pointer to the HW structure
1998  *
1999  *  This checks if the adapter has iAMT enabled.
2000  *  This is a function pointer entry point only called by read/write
2001  *  routines for the PHY and NVM parts.
2002  **/
2003 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
2004 {
2005 	u32 fwsm;
2006 
2007 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
2008 
2009 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
2010 
2011 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
2012 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
2013 }
2014 
2015 /**
2016  *  e1000_rar_set_pch2lan - Set receive address register
2017  *  @hw: pointer to the HW structure
2018  *  @addr: pointer to the receive address
2019  *  @index: receive address array register
2020  *
2021  *  Sets the receive address array register at index to the address passed
2022  *  in by addr.  For 82579, RAR[0] is the base address register that is to
2023  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
2024  *  Use SHRA[0-3] in place of those reserved for ME.
2025  **/
2026 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
2027 {
2028 	u32 rar_low, rar_high;
2029 
2030 	DEBUGFUNC("e1000_rar_set_pch2lan");
2031 
2032 	/* HW expects these in little endian so we reverse the byte order
2033 	 * from network order (big endian) to little endian
2034 	 */
2035 	rar_low = ((u32) addr[0] |
2036 		   ((u32) addr[1] << 8) |
2037 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2038 
2039 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2040 
2041 	/* If MAC address zero, no need to set the AV bit */
2042 	if (rar_low || rar_high)
2043 		rar_high |= E1000_RAH_AV;
2044 
2045 	if (index == 0) {
2046 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2047 		E1000_WRITE_FLUSH(hw);
2048 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2049 		E1000_WRITE_FLUSH(hw);
2050 		return E1000_SUCCESS;
2051 	}
2052 
2053 	/* RAR[1-6] are owned by manageability.  Skip those and program the
2054 	 * next address into the SHRA register array.
2055 	 */
2056 	if (index < (u32) (hw->mac.rar_entry_count)) {
2057 		s32 ret_val;
2058 
2059 		ret_val = e1000_acquire_swflag_ich8lan(hw);
2060 		if (ret_val)
2061 			goto out;
2062 
2063 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2064 		E1000_WRITE_FLUSH(hw);
2065 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2066 		E1000_WRITE_FLUSH(hw);
2067 
2068 		e1000_release_swflag_ich8lan(hw);
2069 
2070 		/* verify the register updates */
2071 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2072 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2073 			return E1000_SUCCESS;
2074 
2075 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2076 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2077 	}
2078 
2079 out:
2080 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2081 	return -E1000_ERR_CONFIG;
2082 }
2083 
2084 /**
2085  *  e1000_rar_set_pch_lpt - Set receive address registers
2086  *  @hw: pointer to the HW structure
2087  *  @addr: pointer to the receive address
2088  *  @index: receive address array register
2089  *
2090  *  Sets the receive address register array at index to the address passed
2091  *  in by addr. For LPT, RAR[0] is the base address register that is to
2092  *  contain the MAC address. SHRA[0-10] are the shared receive address
2093  *  registers that are shared between the Host and manageability engine (ME).
2094  **/
2095 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2096 {
2097 	u32 rar_low, rar_high;
2098 	u32 wlock_mac;
2099 
2100 	DEBUGFUNC("e1000_rar_set_pch_lpt");
2101 
2102 	/* HW expects these in little endian so we reverse the byte order
2103 	 * from network order (big endian) to little endian
2104 	 */
2105 	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2106 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2107 
2108 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2109 
2110 	/* If MAC address zero, no need to set the AV bit */
2111 	if (rar_low || rar_high)
2112 		rar_high |= E1000_RAH_AV;
2113 
2114 	if (index == 0) {
2115 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2116 		E1000_WRITE_FLUSH(hw);
2117 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2118 		E1000_WRITE_FLUSH(hw);
2119 		return E1000_SUCCESS;
2120 	}
2121 
2122 	/* The manageability engine (ME) can lock certain SHRAR registers that
2123 	 * it is using - those registers are unavailable for use.
2124 	 */
2125 	if (index < hw->mac.rar_entry_count) {
2126 		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2127 			    E1000_FWSM_WLOCK_MAC_MASK;
2128 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2129 
2130 		/* Check if all SHRAR registers are locked */
2131 		if (wlock_mac == 1)
2132 			goto out;
2133 
2134 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
2135 			s32 ret_val;
2136 
2137 			ret_val = e1000_acquire_swflag_ich8lan(hw);
2138 
2139 			if (ret_val)
2140 				goto out;
2141 
2142 			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2143 					rar_low);
2144 			E1000_WRITE_FLUSH(hw);
2145 			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2146 					rar_high);
2147 			E1000_WRITE_FLUSH(hw);
2148 
2149 			e1000_release_swflag_ich8lan(hw);
2150 
2151 			/* verify the register updates */
2152 			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2153 			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2154 				return E1000_SUCCESS;
2155 		}
2156 	}
2157 
2158 out:
2159 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2160 	return -E1000_ERR_CONFIG;
2161 }
2162 
2163 /**
2164  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2165  *  @hw: pointer to the HW structure
2166  *  @mc_addr_list: array of multicast addresses to program
2167  *  @mc_addr_count: number of multicast addresses to program
2168  *
2169  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2170  *  The caller must have a packed mc_addr_list of multicast addresses.
2171  **/
2172 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2173 					      u8 *mc_addr_list,
2174 					      u32 mc_addr_count)
2175 {
2176 	u16 phy_reg = 0;
2177 	int i;
2178 	s32 ret_val;
2179 
2180 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2181 
2182 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2183 
2184 	ret_val = hw->phy.ops.acquire(hw);
2185 	if (ret_val)
2186 		return;
2187 
2188 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2189 	if (ret_val)
2190 		goto release;
2191 
2192 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
2193 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2194 					   (u16)(hw->mac.mta_shadow[i] &
2195 						 0xFFFF));
2196 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2197 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
2198 						 0xFFFF));
2199 	}
2200 
2201 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2202 
2203 release:
2204 	hw->phy.ops.release(hw);
2205 }
2206 
2207 /**
2208  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2209  *  @hw: pointer to the HW structure
2210  *
2211  *  Checks if firmware is blocking the reset of the PHY.
2212  *  This is a function pointer entry point only called by
2213  *  reset routines.
2214  **/
2215 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2216 {
2217 	u32 fwsm;
2218 	bool blocked = false;
2219 	int i = 0;
2220 
2221 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
2222 
2223 	do {
2224 		fwsm = E1000_READ_REG(hw, E1000_FWSM);
2225 		if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2226 			blocked = true;
2227 			msec_delay(10);
2228 			continue;
2229 		}
2230 		blocked = false;
2231 	} while (blocked && (i++ < 30));
2232 	return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2233 }
2234 
2235 /**
2236  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2237  *  @hw: pointer to the HW structure
2238  *
2239  *  Assumes semaphore already acquired.
2240  *
2241  **/
2242 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2243 {
2244 	u16 phy_data;
2245 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2246 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2247 		E1000_STRAP_SMT_FREQ_SHIFT;
2248 	s32 ret_val;
2249 
2250 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2251 
2252 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2253 	if (ret_val)
2254 		return ret_val;
2255 
2256 	phy_data &= ~HV_SMB_ADDR_MASK;
2257 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2258 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2259 
2260 	if (hw->phy.type == e1000_phy_i217) {
2261 		/* Restore SMBus frequency */
2262 		if (freq--) {
2263 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2264 			phy_data |= (freq & (1 << 0)) <<
2265 				HV_SMB_ADDR_FREQ_LOW_SHIFT;
2266 			phy_data |= (freq & (1 << 1)) <<
2267 				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2268 		} else {
2269 			DEBUGOUT("Unsupported SMB frequency in PHY\n");
2270 		}
2271 	}
2272 
2273 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2274 }
2275 
2276 /**
2277  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2278  *  @hw:   pointer to the HW structure
2279  *
2280  *  SW should configure the LCD from the NVM extended configuration region
2281  *  as a workaround for certain parts.
2282  **/
2283 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2284 {
2285 	struct e1000_phy_info *phy = &hw->phy;
2286 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2287 	s32 ret_val = E1000_SUCCESS;
2288 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
2289 
2290 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2291 
2292 	/* Initialize the PHY from the NVM on ICH platforms.  This
2293 	 * is needed due to an issue where the NVM configuration is
2294 	 * not properly autoloaded after power transitions.
2295 	 * Therefore, after each PHY reset, we will load the
2296 	 * configuration data out of the NVM manually.
2297 	 */
2298 	switch (hw->mac.type) {
2299 	case e1000_ich8lan:
2300 		if (phy->type != e1000_phy_igp_3)
2301 			return ret_val;
2302 
2303 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2304 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2305 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2306 			break;
2307 		}
2308 		/* FALLTHROUGH */
2309 	case e1000_pchlan:
2310 	case e1000_pch2lan:
2311 	case e1000_pch_lpt:
2312 	case e1000_pch_spt:
2313 	case e1000_pch_cnp:
2314 	case e1000_pch_tgp:
2315 	case e1000_pch_adp:
2316 	case e1000_pch_mtp:
2317 	case e1000_pch_ptp:
2318 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2319 		break;
2320 	default:
2321 		return ret_val;
2322 	}
2323 
2324 	ret_val = hw->phy.ops.acquire(hw);
2325 	if (ret_val)
2326 		return ret_val;
2327 
2328 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
2329 	if (!(data & sw_cfg_mask))
2330 		goto release;
2331 
2332 	/* Make sure HW does not configure LCD from PHY
2333 	 * extended configuration before SW configuration
2334 	 */
2335 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2336 	if ((hw->mac.type < e1000_pch2lan) &&
2337 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2338 			goto release;
2339 
2340 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2341 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2342 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2343 	if (!cnf_size)
2344 		goto release;
2345 
2346 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2347 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2348 
2349 	if (((hw->mac.type == e1000_pchlan) &&
2350 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2351 	    (hw->mac.type > e1000_pchlan)) {
2352 		/* HW configures the SMBus address and LEDs when the
2353 		 * OEM and LCD Write Enable bits are set in the NVM.
2354 		 * When both NVM bits are cleared, SW will configure
2355 		 * them instead.
2356 		 */
2357 		ret_val = e1000_write_smbus_addr(hw);
2358 		if (ret_val)
2359 			goto release;
2360 
2361 		data = E1000_READ_REG(hw, E1000_LEDCTL);
2362 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2363 							(u16)data);
2364 		if (ret_val)
2365 			goto release;
2366 	}
2367 
2368 	/* Configure LCD from extended configuration region. */
2369 
2370 	/* cnf_base_addr is in DWORD */
2371 	word_addr = (u16)(cnf_base_addr << 1);
2372 
2373 	for (i = 0; i < cnf_size; i++) {
2374 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2375 					   &reg_data);
2376 		if (ret_val)
2377 			goto release;
2378 
2379 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2380 					   1, &reg_addr);
2381 		if (ret_val)
2382 			goto release;
2383 
2384 		/* Save off the PHY page for future writes. */
2385 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2386 			phy_page = reg_data;
2387 			continue;
2388 		}
2389 
2390 		reg_addr &= PHY_REG_MASK;
2391 		reg_addr |= phy_page;
2392 
2393 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2394 						    reg_data);
2395 		if (ret_val)
2396 			goto release;
2397 	}
2398 
2399 release:
2400 	hw->phy.ops.release(hw);
2401 	return ret_val;
2402 }
2403 
2404 /**
2405  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2406  *  @hw:   pointer to the HW structure
2407  *  @link: link up bool flag
2408  *
2409  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2410  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2411  *  If link is down, the function will restore the default K1 setting located
2412  *  in the NVM.
2413  **/
2414 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2415 {
2416 	s32 ret_val = E1000_SUCCESS;
2417 	u16 status_reg = 0;
2418 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2419 
2420 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
2421 
2422 	if (hw->mac.type != e1000_pchlan)
2423 		return E1000_SUCCESS;
2424 
2425 	/* Wrap the whole flow with the sw flag */
2426 	ret_val = hw->phy.ops.acquire(hw);
2427 	if (ret_val)
2428 		return ret_val;
2429 
2430 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2431 	if (link) {
2432 		if (hw->phy.type == e1000_phy_82578) {
2433 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2434 							      &status_reg);
2435 			if (ret_val)
2436 				goto release;
2437 
2438 			status_reg &= (BM_CS_STATUS_LINK_UP |
2439 				       BM_CS_STATUS_RESOLVED |
2440 				       BM_CS_STATUS_SPEED_MASK);
2441 
2442 			if (status_reg == (BM_CS_STATUS_LINK_UP |
2443 					   BM_CS_STATUS_RESOLVED |
2444 					   BM_CS_STATUS_SPEED_1000))
2445 				k1_enable = false;
2446 		}
2447 
2448 		if (hw->phy.type == e1000_phy_82577) {
2449 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2450 							      &status_reg);
2451 			if (ret_val)
2452 				goto release;
2453 
2454 			status_reg &= (HV_M_STATUS_LINK_UP |
2455 				       HV_M_STATUS_AUTONEG_COMPLETE |
2456 				       HV_M_STATUS_SPEED_MASK);
2457 
2458 			if (status_reg == (HV_M_STATUS_LINK_UP |
2459 					   HV_M_STATUS_AUTONEG_COMPLETE |
2460 					   HV_M_STATUS_SPEED_1000))
2461 				k1_enable = false;
2462 		}
2463 
2464 		/* Link stall fix for link up */
2465 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2466 						       0x0100);
2467 		if (ret_val)
2468 			goto release;
2469 
2470 	} else {
2471 		/* Link stall fix for link down */
2472 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2473 						       0x4100);
2474 		if (ret_val)
2475 			goto release;
2476 	}
2477 
2478 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2479 
2480 release:
2481 	hw->phy.ops.release(hw);
2482 
2483 	return ret_val;
2484 }
2485 
2486 /**
2487  *  e1000_configure_k1_ich8lan - Configure K1 power state
2488  *  @hw: pointer to the HW structure
2489  *  @k1_enable: K1 state to configure
2490  *
2491  *  Configure the K1 power state based on the provided parameter.
2492  *  Assumes semaphore already acquired.
2493  *
2494  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2495  **/
2496 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2497 {
2498 	s32 ret_val;
2499 	u32 ctrl_reg = 0;
2500 	u32 ctrl_ext = 0;
2501 	u32 reg = 0;
2502 	u16 kmrn_reg = 0;
2503 
2504 	DEBUGFUNC("e1000_configure_k1_ich8lan");
2505 
2506 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2507 					     &kmrn_reg);
2508 	if (ret_val)
2509 		return ret_val;
2510 
2511 	if (k1_enable)
2512 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2513 	else
2514 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2515 
2516 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2517 					      kmrn_reg);
2518 	if (ret_val)
2519 		return ret_val;
2520 
2521 	usec_delay(20);
2522 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2523 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2524 
2525 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2526 	reg |= E1000_CTRL_FRCSPD;
2527 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2528 
2529 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2530 	E1000_WRITE_FLUSH(hw);
2531 	usec_delay(20);
2532 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2533 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2534 	E1000_WRITE_FLUSH(hw);
2535 	usec_delay(20);
2536 
2537 	return E1000_SUCCESS;
2538 }
2539 
2540 /**
2541  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2542  *  @hw:       pointer to the HW structure
2543  *  @d0_state: boolean if entering d0 or d3 device state
2544  *
2545  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2546  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2547  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2548  **/
2549 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2550 {
2551 	s32 ret_val = 0;
2552 	u32 mac_reg;
2553 	u16 oem_reg;
2554 
2555 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2556 
2557 	if (hw->mac.type < e1000_pchlan)
2558 		return ret_val;
2559 
2560 	ret_val = hw->phy.ops.acquire(hw);
2561 	if (ret_val)
2562 		return ret_val;
2563 
2564 	if (hw->mac.type == e1000_pchlan) {
2565 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2566 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2567 			goto release;
2568 	}
2569 
2570 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2571 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2572 		goto release;
2573 
2574 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2575 
2576 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2577 	if (ret_val)
2578 		goto release;
2579 
2580 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2581 
2582 	if (d0_state) {
2583 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2584 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2585 
2586 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2587 			oem_reg |= HV_OEM_BITS_LPLU;
2588 	} else {
2589 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2590 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2591 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2592 
2593 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2594 		    E1000_PHY_CTRL_NOND0A_LPLU))
2595 			oem_reg |= HV_OEM_BITS_LPLU;
2596 	}
2597 
2598 	/* Set Restart auto-neg to activate the bits */
2599 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2600 	    !hw->phy.ops.check_reset_block(hw))
2601 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2602 
2603 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2604 
2605 release:
2606 	hw->phy.ops.release(hw);
2607 
2608 	return ret_val;
2609 }
2610 
2611 
2612 /**
2613  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2614  *  @hw:   pointer to the HW structure
2615  **/
2616 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2617 {
2618 	s32 ret_val;
2619 	u16 data;
2620 
2621 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2622 
2623 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2624 	if (ret_val)
2625 		return ret_val;
2626 
2627 	data |= HV_KMRN_MDIO_SLOW;
2628 
2629 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2630 
2631 	return ret_val;
2632 }
2633 
2634 /**
2635  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2636  *  done after every PHY reset.
2637  *  @hw: pointer to the HW structure
2638  **/
2639 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2640 {
2641 	s32 ret_val = E1000_SUCCESS;
2642 	u16 phy_data;
2643 
2644 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2645 
2646 	if (hw->mac.type != e1000_pchlan)
2647 		return E1000_SUCCESS;
2648 
2649 	/* Set MDIO slow mode before any other MDIO access */
2650 	if (hw->phy.type == e1000_phy_82577) {
2651 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2652 		if (ret_val)
2653 			return ret_val;
2654 	}
2655 
2656 	if (((hw->phy.type == e1000_phy_82577) &&
2657 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2658 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2659 		/* Disable generation of early preamble */
2660 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2661 		if (ret_val)
2662 			return ret_val;
2663 
2664 		/* Preamble tuning for SSC */
2665 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2666 						0xA204);
2667 		if (ret_val)
2668 			return ret_val;
2669 	}
2670 
2671 	if (hw->phy.type == e1000_phy_82578) {
2672 		/* Return registers to default by doing a soft reset then
2673 		 * writing 0x3140 to the control register.
2674 		 */
2675 		if (hw->phy.revision < 2) {
2676 			e1000_phy_sw_reset_generic(hw);
2677 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2678 							0x3140);
2679 			if (ret_val)
2680 				return ret_val;
2681 		}
2682 	}
2683 
2684 	/* Select page 0 */
2685 	ret_val = hw->phy.ops.acquire(hw);
2686 	if (ret_val)
2687 		return ret_val;
2688 
2689 	hw->phy.addr = 1;
2690 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2691 	hw->phy.ops.release(hw);
2692 	if (ret_val)
2693 		return ret_val;
2694 
2695 	/* Configure the K1 Si workaround during phy reset assuming there is
2696 	 * link so that it disables K1 if link is in 1Gbps.
2697 	 */
2698 	ret_val = e1000_k1_gig_workaround_hv(hw, true);
2699 	if (ret_val)
2700 		return ret_val;
2701 
2702 	/* Workaround for link disconnects on a busy hub in half duplex */
2703 	ret_val = hw->phy.ops.acquire(hw);
2704 	if (ret_val)
2705 		return ret_val;
2706 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2707 	if (ret_val)
2708 		goto release;
2709 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2710 					       phy_data & 0x00FF);
2711 	if (ret_val)
2712 		goto release;
2713 
2714 	/* set MSE higher to enable link to stay up when noise is high */
2715 	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2716 release:
2717 	hw->phy.ops.release(hw);
2718 
2719 	return ret_val;
2720 }
2721 
2722 /**
2723  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2724  *  @hw:   pointer to the HW structure
2725  **/
2726 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2727 {
2728 	u32 mac_reg;
2729 	u16 i, phy_reg = 0;
2730 	s32 ret_val;
2731 
2732 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2733 
2734 	ret_val = hw->phy.ops.acquire(hw);
2735 	if (ret_val)
2736 		return;
2737 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2738 	if (ret_val)
2739 		goto release;
2740 
2741 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2742 	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2743 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2744 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2745 					   (u16)(mac_reg & 0xFFFF));
2746 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2747 					   (u16)((mac_reg >> 16) & 0xFFFF));
2748 
2749 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2750 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2751 					   (u16)(mac_reg & 0xFFFF));
2752 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2753 					   (u16)((mac_reg & E1000_RAH_AV)
2754 						 >> 16));
2755 	}
2756 
2757 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2758 
2759 release:
2760 	hw->phy.ops.release(hw);
2761 }
2762 
2763 static u32 e1000_calc_rx_da_crc(u8 mac[])
2764 {
2765 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2766 	u32 i, j, mask, crc;
2767 
2768 	DEBUGFUNC("e1000_calc_rx_da_crc");
2769 
2770 	crc = 0xffffffff;
2771 	for (i = 0; i < 6; i++) {
2772 		crc = crc ^ mac[i];
2773 		for (j = 8; j > 0; j--) {
2774 			mask = (crc & 1) * (-1);
2775 			crc = (crc >> 1) ^ (poly & mask);
2776 		}
2777 	}
2778 	return ~crc;
2779 }
2780 
2781 /**
2782  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2783  *  with 82579 PHY
2784  *  @hw: pointer to the HW structure
2785  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2786  **/
2787 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2788 {
2789 	s32 ret_val = E1000_SUCCESS;
2790 	u16 phy_reg, data;
2791 	u32 mac_reg;
2792 	u16 i;
2793 
2794 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2795 
2796 	if (hw->mac.type < e1000_pch2lan)
2797 		return E1000_SUCCESS;
2798 
2799 	/* disable Rx path while enabling/disabling workaround */
2800 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2801 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2802 					phy_reg | (1 << 14));
2803 	if (ret_val)
2804 		return ret_val;
2805 
2806 	if (enable) {
2807 		/* Write Rx addresses (rar_entry_count for RAL/H, and
2808 		 * SHRAL/H) and initial CRC values to the MAC
2809 		 */
2810 		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2811 			u8 mac_addr[ETHER_ADDR_LEN] = {0};
2812 			u32 addr_high, addr_low;
2813 
2814 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2815 			if (!(addr_high & E1000_RAH_AV))
2816 				continue;
2817 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2818 			mac_addr[0] = (addr_low & 0xFF);
2819 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2820 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2821 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2822 			mac_addr[4] = (addr_high & 0xFF);
2823 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2824 
2825 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2826 					e1000_calc_rx_da_crc(mac_addr));
2827 		}
2828 
2829 		/* Write Rx addresses to the PHY */
2830 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2831 
2832 		/* Enable jumbo frame workaround in the MAC */
2833 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2834 		mac_reg &= ~(1 << 14);
2835 		mac_reg |= (7 << 15);
2836 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2837 
2838 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2839 		mac_reg |= E1000_RCTL_SECRC;
2840 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2841 
2842 		ret_val = e1000_read_kmrn_reg_generic(hw,
2843 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2844 						&data);
2845 		if (ret_val)
2846 			return ret_val;
2847 		ret_val = e1000_write_kmrn_reg_generic(hw,
2848 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2849 						data | (1 << 0));
2850 		if (ret_val)
2851 			return ret_val;
2852 		ret_val = e1000_read_kmrn_reg_generic(hw,
2853 						E1000_KMRNCTRLSTA_HD_CTRL,
2854 						&data);
2855 		if (ret_val)
2856 			return ret_val;
2857 		data &= ~(0xF << 8);
2858 		data |= (0xB << 8);
2859 		ret_val = e1000_write_kmrn_reg_generic(hw,
2860 						E1000_KMRNCTRLSTA_HD_CTRL,
2861 						data);
2862 		if (ret_val)
2863 			return ret_val;
2864 
2865 		/* Enable jumbo frame workaround in the PHY */
2866 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2867 		data &= ~(0x7F << 5);
2868 		data |= (0x37 << 5);
2869 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2870 		if (ret_val)
2871 			return ret_val;
2872 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2873 		data &= ~(1 << 13);
2874 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2875 		if (ret_val)
2876 			return ret_val;
2877 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2878 		data &= ~(0x3FF << 2);
2879 		data |= (E1000_TX_PTR_GAP << 2);
2880 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2881 		if (ret_val)
2882 			return ret_val;
2883 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2884 		if (ret_val)
2885 			return ret_val;
2886 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2887 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2888 						(1 << 10));
2889 		if (ret_val)
2890 			return ret_val;
2891 	} else {
2892 		/* Write MAC register values back to h/w defaults */
2893 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2894 		mac_reg &= ~(0xF << 14);
2895 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2896 
2897 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2898 		mac_reg &= ~E1000_RCTL_SECRC;
2899 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2900 
2901 		ret_val = e1000_read_kmrn_reg_generic(hw,
2902 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2903 						&data);
2904 		if (ret_val)
2905 			return ret_val;
2906 		ret_val = e1000_write_kmrn_reg_generic(hw,
2907 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2908 						data & ~(1 << 0));
2909 		if (ret_val)
2910 			return ret_val;
2911 		ret_val = e1000_read_kmrn_reg_generic(hw,
2912 						E1000_KMRNCTRLSTA_HD_CTRL,
2913 						&data);
2914 		if (ret_val)
2915 			return ret_val;
2916 		data &= ~(0xF << 8);
2917 		data |= (0xB << 8);
2918 		ret_val = e1000_write_kmrn_reg_generic(hw,
2919 						E1000_KMRNCTRLSTA_HD_CTRL,
2920 						data);
2921 		if (ret_val)
2922 			return ret_val;
2923 
2924 		/* Write PHY register values back to h/w defaults */
2925 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2926 		data &= ~(0x7F << 5);
2927 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2928 		if (ret_val)
2929 			return ret_val;
2930 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2931 		data |= (1 << 13);
2932 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2933 		if (ret_val)
2934 			return ret_val;
2935 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2936 		data &= ~(0x3FF << 2);
2937 		data |= (0x8 << 2);
2938 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2939 		if (ret_val)
2940 			return ret_val;
2941 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2942 		if (ret_val)
2943 			return ret_val;
2944 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2945 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2946 						~(1 << 10));
2947 		if (ret_val)
2948 			return ret_val;
2949 	}
2950 
2951 	/* re-enable Rx path after enabling/disabling workaround */
2952 	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2953 				     ~(1 << 14));
2954 }
2955 
2956 /**
2957  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2958  *  done after every PHY reset.
2959  *  @hw: pointer to the HW structure
2960  **/
2961 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2962 {
2963 	s32 ret_val = E1000_SUCCESS;
2964 
2965 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2966 
2967 	if (hw->mac.type != e1000_pch2lan)
2968 		return E1000_SUCCESS;
2969 
2970 	/* Set MDIO slow mode before any other MDIO access */
2971 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2972 	if (ret_val)
2973 		return ret_val;
2974 
2975 	ret_val = hw->phy.ops.acquire(hw);
2976 	if (ret_val)
2977 		return ret_val;
2978 	/* set MSE higher to enable link to stay up when noise is high */
2979 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2980 	if (ret_val)
2981 		goto release;
2982 	/* drop link after 5 times MSE threshold was reached */
2983 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2984 release:
2985 	hw->phy.ops.release(hw);
2986 
2987 	return ret_val;
2988 }
2989 
2990 /**
2991  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2992  *  @hw:   pointer to the HW structure
2993  *
2994  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2995  *  Disable K1 for 1000 and 100 speeds
2996  **/
2997 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2998 {
2999 	s32 ret_val = E1000_SUCCESS;
3000 	u16 status_reg = 0;
3001 
3002 	DEBUGFUNC("e1000_k1_workaround_lv");
3003 
3004 	if (hw->mac.type != e1000_pch2lan)
3005 		return E1000_SUCCESS;
3006 
3007 	/* Set K1 beacon duration based on 10Mbs speed */
3008 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
3009 	if (ret_val)
3010 		return ret_val;
3011 
3012 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
3013 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
3014 		if (status_reg &
3015 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
3016 			u16 pm_phy_reg;
3017 
3018 			/* LV 1G/100 Packet drop issue wa  */
3019 			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
3020 						       &pm_phy_reg);
3021 			if (ret_val)
3022 				return ret_val;
3023 			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
3024 			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
3025 							pm_phy_reg);
3026 			if (ret_val)
3027 				return ret_val;
3028 		} else {
3029 			u32 mac_reg;
3030 			mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
3031 			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
3032 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
3033 			E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
3034 		}
3035 	}
3036 
3037 	return ret_val;
3038 }
3039 
3040 /**
3041  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
3042  *  @hw:   pointer to the HW structure
3043  *  @gate: boolean set to true to gate, false to ungate
3044  *
3045  *  Gate/ungate the automatic PHY configuration via hardware; perform
3046  *  the configuration via software instead.
3047  **/
3048 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
3049 {
3050 	u32 extcnf_ctrl;
3051 
3052 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3053 
3054 	if (hw->mac.type < e1000_pch2lan)
3055 		return;
3056 
3057 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3058 
3059 	if (gate)
3060 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3061 	else
3062 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3063 
3064 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3065 }
3066 
3067 /**
3068  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
3069  *  @hw: pointer to the HW structure
3070  *
3071  *  Check the appropriate indication the MAC has finished configuring the
3072  *  PHY after a software reset.
3073  **/
3074 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3075 {
3076 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3077 
3078 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
3079 
3080 	/* Wait for basic configuration completes before proceeding */
3081 	do {
3082 		data = E1000_READ_REG(hw, E1000_STATUS);
3083 		data &= E1000_STATUS_LAN_INIT_DONE;
3084 		usec_delay(100);
3085 	} while ((!data) && --loop);
3086 
3087 	/* If basic configuration is incomplete before the above loop
3088 	 * count reaches 0, loading the configuration from NVM will
3089 	 * leave the PHY in a bad state possibly resulting in no link.
3090 	 */
3091 	if (loop == 0)
3092 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3093 
3094 	/* Clear the Init Done bit for the next init event */
3095 	data = E1000_READ_REG(hw, E1000_STATUS);
3096 	data &= ~E1000_STATUS_LAN_INIT_DONE;
3097 	E1000_WRITE_REG(hw, E1000_STATUS, data);
3098 }
3099 
3100 /**
3101  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3102  *  @hw: pointer to the HW structure
3103  **/
3104 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3105 {
3106 	s32 ret_val = E1000_SUCCESS;
3107 	u16 reg;
3108 
3109 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3110 
3111 	if (hw->phy.ops.check_reset_block(hw))
3112 		return E1000_SUCCESS;
3113 
3114 	/* Allow time for h/w to get to quiescent state after reset */
3115 	msec_delay(10);
3116 
3117 	/* Perform any necessary post-reset workarounds */
3118 	switch (hw->mac.type) {
3119 	case e1000_pchlan:
3120 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3121 		if (ret_val)
3122 			return ret_val;
3123 		break;
3124 	case e1000_pch2lan:
3125 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3126 		if (ret_val)
3127 			return ret_val;
3128 		break;
3129 	default:
3130 		break;
3131 	}
3132 
3133 	/* Clear the host wakeup bit after lcd reset */
3134 	if (hw->mac.type >= e1000_pchlan) {
3135 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3136 		reg &= ~BM_WUC_HOST_WU_BIT;
3137 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3138 	}
3139 
3140 	/* Configure the LCD with the extended configuration region in NVM */
3141 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
3142 	if (ret_val)
3143 		return ret_val;
3144 
3145 	/* Configure the LCD with the OEM bits in NVM */
3146 	ret_val = e1000_oem_bits_config_ich8lan(hw, true);
3147 
3148 	if (hw->mac.type == e1000_pch2lan) {
3149 		/* Ungate automatic PHY configuration on non-managed 82579 */
3150 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
3151 		    E1000_ICH_FWSM_FW_VALID)) {
3152 			msec_delay(10);
3153 			e1000_gate_hw_phy_config_ich8lan(hw, false);
3154 		}
3155 
3156 		/* Set EEE LPI Update Timer to 200usec */
3157 		ret_val = hw->phy.ops.acquire(hw);
3158 		if (ret_val)
3159 			return ret_val;
3160 		ret_val = e1000_write_emi_reg_locked(hw,
3161 						     I82579_LPI_UPDATE_TIMER,
3162 						     0x1387);
3163 		hw->phy.ops.release(hw);
3164 	}
3165 
3166 	return ret_val;
3167 }
3168 
3169 /**
3170  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3171  *  @hw: pointer to the HW structure
3172  *
3173  *  Resets the PHY
3174  *  This is a function pointer entry point called by drivers
3175  *  or other shared routines.
3176  **/
3177 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3178 {
3179 	s32 ret_val = E1000_SUCCESS;
3180 
3181 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3182 
3183 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
3184 	if ((hw->mac.type == e1000_pch2lan) &&
3185 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3186 		e1000_gate_hw_phy_config_ich8lan(hw, true);
3187 
3188 	ret_val = e1000_phy_hw_reset_generic(hw);
3189 	if (ret_val)
3190 		return ret_val;
3191 
3192 	return e1000_post_phy_reset_ich8lan(hw);
3193 }
3194 
3195 /**
3196  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3197  *  @hw: pointer to the HW structure
3198  *  @active: true to enable LPLU, false to disable
3199  *
3200  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3201  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3202  *  the phy speed. This function will manually set the LPLU bit and restart
3203  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3204  *  since it configures the same bit.
3205  **/
3206 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3207 {
3208 	s32 ret_val;
3209 	u16 oem_reg;
3210 
3211 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
3212 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3213 	if (ret_val)
3214 		return ret_val;
3215 
3216 	if (active)
3217 		oem_reg |= HV_OEM_BITS_LPLU;
3218 	else
3219 		oem_reg &= ~HV_OEM_BITS_LPLU;
3220 
3221 	if (!hw->phy.ops.check_reset_block(hw))
3222 		oem_reg |= HV_OEM_BITS_RESTART_AN;
3223 
3224 	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3225 }
3226 
3227 /**
3228  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3229  *  @hw: pointer to the HW structure
3230  *  @active: true to enable LPLU, false to disable
3231  *
3232  *  Sets the LPLU D0 state according to the active flag.  When
3233  *  activating LPLU this function also disables smart speed
3234  *  and vice versa.  LPLU will not be activated unless the
3235  *  device autonegotiation advertisement meets standards of
3236  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3237  *  This is a function pointer entry point only called by
3238  *  PHY setup routines.
3239  **/
3240 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3241 {
3242 	struct e1000_phy_info *phy = &hw->phy;
3243 	u32 phy_ctrl;
3244 	s32 ret_val = E1000_SUCCESS;
3245 	u16 data;
3246 
3247 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3248 
3249 	if (phy->type == e1000_phy_ife)
3250 		return E1000_SUCCESS;
3251 
3252 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3253 
3254 	if (active) {
3255 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3256 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3257 
3258 		if (phy->type != e1000_phy_igp_3)
3259 			return E1000_SUCCESS;
3260 
3261 		/* Call gig speed drop workaround on LPLU before accessing
3262 		 * any PHY registers
3263 		 */
3264 		if (hw->mac.type == e1000_ich8lan)
3265 			e1000_gig_downshift_workaround_ich8lan(hw);
3266 
3267 		/* When LPLU is enabled, we should disable SmartSpeed */
3268 		ret_val = phy->ops.read_reg(hw,
3269 					    IGP01E1000_PHY_PORT_CONFIG,
3270 					    &data);
3271 		if (ret_val)
3272 			return ret_val;
3273 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3274 		ret_val = phy->ops.write_reg(hw,
3275 					     IGP01E1000_PHY_PORT_CONFIG,
3276 					     data);
3277 		if (ret_val)
3278 			return ret_val;
3279 	} else {
3280 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3281 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3282 
3283 		if (phy->type != e1000_phy_igp_3)
3284 			return E1000_SUCCESS;
3285 
3286 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3287 		 * during Dx states where the power conservation is most
3288 		 * important.  During driver activity we should enable
3289 		 * SmartSpeed, so performance is maintained.
3290 		 */
3291 		if (phy->smart_speed == e1000_smart_speed_on) {
3292 			ret_val = phy->ops.read_reg(hw,
3293 						    IGP01E1000_PHY_PORT_CONFIG,
3294 						    &data);
3295 			if (ret_val)
3296 				return ret_val;
3297 
3298 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3299 			ret_val = phy->ops.write_reg(hw,
3300 						     IGP01E1000_PHY_PORT_CONFIG,
3301 						     data);
3302 			if (ret_val)
3303 				return ret_val;
3304 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3305 			ret_val = phy->ops.read_reg(hw,
3306 						    IGP01E1000_PHY_PORT_CONFIG,
3307 						    &data);
3308 			if (ret_val)
3309 				return ret_val;
3310 
3311 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3312 			ret_val = phy->ops.write_reg(hw,
3313 						     IGP01E1000_PHY_PORT_CONFIG,
3314 						     data);
3315 			if (ret_val)
3316 				return ret_val;
3317 		}
3318 	}
3319 
3320 	return E1000_SUCCESS;
3321 }
3322 
3323 /**
3324  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3325  *  @hw: pointer to the HW structure
3326  *  @active: true to enable LPLU, false to disable
3327  *
3328  *  Sets the LPLU D3 state according to the active flag.  When
3329  *  activating LPLU this function also disables smart speed
3330  *  and vice versa.  LPLU will not be activated unless the
3331  *  device autonegotiation advertisement meets standards of
3332  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3333  *  This is a function pointer entry point only called by
3334  *  PHY setup routines.
3335  **/
3336 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3337 {
3338 	struct e1000_phy_info *phy = &hw->phy;
3339 	u32 phy_ctrl;
3340 	s32 ret_val = E1000_SUCCESS;
3341 	u16 data;
3342 
3343 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3344 
3345 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3346 
3347 	if (!active) {
3348 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3349 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3350 
3351 		if (phy->type != e1000_phy_igp_3)
3352 			return E1000_SUCCESS;
3353 
3354 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3355 		 * during Dx states where the power conservation is most
3356 		 * important.  During driver activity we should enable
3357 		 * SmartSpeed, so performance is maintained.
3358 		 */
3359 		if (phy->smart_speed == e1000_smart_speed_on) {
3360 			ret_val = phy->ops.read_reg(hw,
3361 						    IGP01E1000_PHY_PORT_CONFIG,
3362 						    &data);
3363 			if (ret_val)
3364 				return ret_val;
3365 
3366 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3367 			ret_val = phy->ops.write_reg(hw,
3368 						     IGP01E1000_PHY_PORT_CONFIG,
3369 						     data);
3370 			if (ret_val)
3371 				return ret_val;
3372 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3373 			ret_val = phy->ops.read_reg(hw,
3374 						    IGP01E1000_PHY_PORT_CONFIG,
3375 						    &data);
3376 			if (ret_val)
3377 				return ret_val;
3378 
3379 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3380 			ret_val = phy->ops.write_reg(hw,
3381 						     IGP01E1000_PHY_PORT_CONFIG,
3382 						     data);
3383 			if (ret_val)
3384 				return ret_val;
3385 		}
3386 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3387 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3388 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3389 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3390 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3391 
3392 		if (phy->type != e1000_phy_igp_3)
3393 			return E1000_SUCCESS;
3394 
3395 		/* Call gig speed drop workaround on LPLU before accessing
3396 		 * any PHY registers
3397 		 */
3398 		if (hw->mac.type == e1000_ich8lan)
3399 			e1000_gig_downshift_workaround_ich8lan(hw);
3400 
3401 		/* When LPLU is enabled, we should disable SmartSpeed */
3402 		ret_val = phy->ops.read_reg(hw,
3403 					    IGP01E1000_PHY_PORT_CONFIG,
3404 					    &data);
3405 		if (ret_val)
3406 			return ret_val;
3407 
3408 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3409 		ret_val = phy->ops.write_reg(hw,
3410 					     IGP01E1000_PHY_PORT_CONFIG,
3411 					     data);
3412 	}
3413 
3414 	return ret_val;
3415 }
3416 
3417 /**
3418  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3419  *  @hw: pointer to the HW structure
3420  *  @bank:  pointer to the variable that returns the active bank
3421  *
3422  *  Reads signature byte from the NVM using the flash access registers.
3423  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3424  **/
3425 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3426 {
3427 	u32 eecd;
3428 	struct e1000_nvm_info *nvm = &hw->nvm;
3429 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3430 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3431 	u32 nvm_dword = 0;
3432 	u8 sig_byte = 0;
3433 	s32 ret_val;
3434 
3435 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3436 
3437 	switch (hw->mac.type) {
3438 	case e1000_pch_spt:
3439 	case e1000_pch_cnp:
3440 	case e1000_pch_tgp:
3441 	case e1000_pch_adp:
3442 	case e1000_pch_mtp:
3443 	case e1000_pch_ptp:
3444 		bank1_offset = nvm->flash_bank_size;
3445 		act_offset = E1000_ICH_NVM_SIG_WORD;
3446 
3447 		/* set bank to 0 in case flash read fails */
3448 		*bank = 0;
3449 
3450 		/* Check bank 0 */
3451 		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3452 							 &nvm_dword);
3453 		if (ret_val)
3454 			return ret_val;
3455 		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3456 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3457 		    E1000_ICH_NVM_SIG_VALUE) {
3458 			*bank = 0;
3459 			return E1000_SUCCESS;
3460 		}
3461 
3462 		/* Check bank 1 */
3463 		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3464 							 bank1_offset,
3465 							 &nvm_dword);
3466 		if (ret_val)
3467 			return ret_val;
3468 		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3469 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3470 		    E1000_ICH_NVM_SIG_VALUE) {
3471 			*bank = 1;
3472 			return E1000_SUCCESS;
3473 		}
3474 
3475 		DEBUGOUT("ERROR: No valid NVM bank present\n");
3476 		return -E1000_ERR_NVM;
3477 	case e1000_ich8lan:
3478 	case e1000_ich9lan:
3479 		eecd = E1000_READ_REG(hw, E1000_EECD);
3480 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3481 		    E1000_EECD_SEC1VAL_VALID_MASK) {
3482 			if (eecd & E1000_EECD_SEC1VAL)
3483 				*bank = 1;
3484 			else
3485 				*bank = 0;
3486 
3487 			return E1000_SUCCESS;
3488 		}
3489 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3490 		/* FALLTHROUGH */
3491 	default:
3492 		/* set bank to 0 in case flash read fails */
3493 		*bank = 0;
3494 
3495 		/* Check bank 0 */
3496 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3497 							&sig_byte);
3498 		if (ret_val)
3499 			return ret_val;
3500 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3501 		    E1000_ICH_NVM_SIG_VALUE) {
3502 			*bank = 0;
3503 			return E1000_SUCCESS;
3504 		}
3505 
3506 		/* Check bank 1 */
3507 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3508 							bank1_offset,
3509 							&sig_byte);
3510 		if (ret_val)
3511 			return ret_val;
3512 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3513 		    E1000_ICH_NVM_SIG_VALUE) {
3514 			*bank = 1;
3515 			return E1000_SUCCESS;
3516 		}
3517 
3518 		DEBUGOUT("ERROR: No valid NVM bank present\n");
3519 		return -E1000_ERR_NVM;
3520 	}
3521 }
3522 
3523 /**
3524  *  e1000_read_nvm_spt - NVM access for SPT
3525  *  @hw: pointer to the HW structure
3526  *  @offset: The offset (in bytes) of the word(s) to read.
3527  *  @words: Size of data to read in words.
3528  *  @data: pointer to the word(s) to read at offset.
3529  *
3530  *  Reads a word(s) from the NVM
3531  **/
3532 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3533 			      u16 *data)
3534 {
3535 	struct e1000_nvm_info *nvm = &hw->nvm;
3536 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3537 	u32 act_offset;
3538 	s32 ret_val = E1000_SUCCESS;
3539 	u32 bank = 0;
3540 	u32 dword = 0;
3541 	u16 offset_to_read;
3542 	u16 i;
3543 
3544 	DEBUGFUNC("e1000_read_nvm_spt");
3545 
3546 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3547 	    (words == 0)) {
3548 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3549 		ret_val = -E1000_ERR_NVM;
3550 		goto out;
3551 	}
3552 
3553 	nvm->ops.acquire(hw);
3554 
3555 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3556 	if (ret_val != E1000_SUCCESS) {
3557 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3558 		bank = 0;
3559 	}
3560 
3561 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3562 	act_offset += offset;
3563 
3564 	ret_val = E1000_SUCCESS;
3565 
3566 	for (i = 0; i < words; i += 2) {
3567 		if (words - i == 1) {
3568 			if (dev_spec->shadow_ram[offset + i].modified) {
3569 				data[i] =
3570 				    dev_spec->shadow_ram[offset + i].value;
3571 			} else {
3572 				offset_to_read = act_offset + i -
3573 						 ((act_offset + i) % 2);
3574 				ret_val =
3575 				   e1000_read_flash_dword_ich8lan(hw,
3576 								 offset_to_read,
3577 								 &dword);
3578 				if (ret_val)
3579 					break;
3580 				if ((act_offset + i) % 2 == 0)
3581 					data[i] = (u16)(dword & 0xFFFF);
3582 				else
3583 					data[i] = (u16)((dword >> 16) & 0xFFFF);
3584 			}
3585 		} else {
3586 			offset_to_read = act_offset + i;
3587 			if (!(dev_spec->shadow_ram[offset + i].modified) ||
3588 			    !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3589 				ret_val =
3590 				   e1000_read_flash_dword_ich8lan(hw,
3591 								 offset_to_read,
3592 								 &dword);
3593 				if (ret_val)
3594 					break;
3595 			}
3596 			if (dev_spec->shadow_ram[offset + i].modified)
3597 				data[i] =
3598 				    dev_spec->shadow_ram[offset + i].value;
3599 			else
3600 				data[i] = (u16)(dword & 0xFFFF);
3601 			if (dev_spec->shadow_ram[offset + i + 1].modified)
3602 				data[i + 1] =
3603 				   dev_spec->shadow_ram[offset + i + 1].value;
3604 			else
3605 				data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3606 		}
3607 	}
3608 
3609 	nvm->ops.release(hw);
3610 
3611 out:
3612 	if (ret_val)
3613 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3614 
3615 	return ret_val;
3616 }
3617 
3618 /**
3619  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3620  *  @hw: pointer to the HW structure
3621  *  @offset: The offset (in bytes) of the word(s) to read.
3622  *  @words: Size of data to read in words
3623  *  @data: Pointer to the word(s) to read at offset.
3624  *
3625  *  Reads a word(s) from the NVM using the flash access registers.
3626  **/
3627 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3628 				  u16 *data)
3629 {
3630 	struct e1000_nvm_info *nvm = &hw->nvm;
3631 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3632 	u32 act_offset;
3633 	s32 ret_val = E1000_SUCCESS;
3634 	u32 bank = 0;
3635 	u16 i, word;
3636 
3637 	DEBUGFUNC("e1000_read_nvm_ich8lan");
3638 
3639 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3640 	    (words == 0)) {
3641 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3642 		ret_val = -E1000_ERR_NVM;
3643 		goto out;
3644 	}
3645 
3646 	nvm->ops.acquire(hw);
3647 
3648 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3649 	if (ret_val != E1000_SUCCESS) {
3650 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3651 		bank = 0;
3652 	}
3653 
3654 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3655 	act_offset += offset;
3656 
3657 	ret_val = E1000_SUCCESS;
3658 	for (i = 0; i < words; i++) {
3659 		if (dev_spec->shadow_ram[offset + i].modified) {
3660 			data[i] = dev_spec->shadow_ram[offset + i].value;
3661 		} else {
3662 			ret_val = e1000_read_flash_word_ich8lan(hw,
3663 								act_offset + i,
3664 								&word);
3665 			if (ret_val)
3666 				break;
3667 			data[i] = word;
3668 		}
3669 	}
3670 
3671 	nvm->ops.release(hw);
3672 
3673 out:
3674 	if (ret_val)
3675 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3676 
3677 	return ret_val;
3678 }
3679 
3680 /**
3681  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3682  *  @hw: pointer to the HW structure
3683  *
3684  *  This function does initial flash setup so that a new read/write/erase cycle
3685  *  can be started.
3686  **/
3687 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3688 {
3689 	union ich8_hws_flash_status hsfsts;
3690 	s32 ret_val = -E1000_ERR_NVM;
3691 
3692 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3693 
3694 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3695 
3696 	/* Check if the flash descriptor is valid */
3697 	if (!hsfsts.hsf_status.fldesvalid) {
3698 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3699 		return -E1000_ERR_NVM;
3700 	}
3701 
3702 	/* Clear FCERR and DAEL in hw status by writing 1 */
3703 	hsfsts.hsf_status.flcerr = 1;
3704 	hsfsts.hsf_status.dael = 1;
3705 	if (hw->mac.type >= e1000_pch_spt)
3706 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3707 				      hsfsts.regval & 0xFFFF);
3708 	else
3709 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3710 
3711 	/* Either we should have a hardware SPI cycle in progress
3712 	 * bit to check against, in order to start a new cycle or
3713 	 * FDONE bit should be changed in the hardware so that it
3714 	 * is 1 after hardware reset, which can then be used as an
3715 	 * indication whether a cycle is in progress or has been
3716 	 * completed.
3717 	 */
3718 
3719 	if (!hsfsts.hsf_status.flcinprog) {
3720 		/* There is no cycle running at present,
3721 		 * so we can start a cycle.
3722 		 * Begin by setting Flash Cycle Done.
3723 		 */
3724 		hsfsts.hsf_status.flcdone = 1;
3725 		if (hw->mac.type >= e1000_pch_spt)
3726 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3727 					      hsfsts.regval & 0xFFFF);
3728 		else
3729 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3730 						hsfsts.regval);
3731 		ret_val = E1000_SUCCESS;
3732 	} else {
3733 		s32 i;
3734 
3735 		/* Otherwise poll for sometime so the current
3736 		 * cycle has a chance to end before giving up.
3737 		 */
3738 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3739 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3740 							      ICH_FLASH_HSFSTS);
3741 			if (!hsfsts.hsf_status.flcinprog) {
3742 				ret_val = E1000_SUCCESS;
3743 				break;
3744 			}
3745 			usec_delay(1);
3746 		}
3747 		if (ret_val == E1000_SUCCESS) {
3748 			/* Successful in waiting for previous cycle to timeout,
3749 			 * now set the Flash Cycle Done.
3750 			 */
3751 			hsfsts.hsf_status.flcdone = 1;
3752 			if (hw->mac.type >= e1000_pch_spt)
3753 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3754 						      hsfsts.regval & 0xFFFF);
3755 			else
3756 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3757 							hsfsts.regval);
3758 		} else {
3759 			DEBUGOUT("Flash controller busy, cannot get access\n");
3760 		}
3761 	}
3762 
3763 	return ret_val;
3764 }
3765 
3766 /**
3767  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3768  *  @hw: pointer to the HW structure
3769  *  @timeout: maximum time to wait for completion
3770  *
3771  *  This function starts a flash cycle and waits for its completion.
3772  **/
3773 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3774 {
3775 	union ich8_hws_flash_ctrl hsflctl;
3776 	union ich8_hws_flash_status hsfsts;
3777 	u32 i = 0;
3778 
3779 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3780 
3781 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3782 	if (hw->mac.type >= e1000_pch_spt)
3783 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3784 	else
3785 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3786 	hsflctl.hsf_ctrl.flcgo = 1;
3787 
3788 	if (hw->mac.type >= e1000_pch_spt)
3789 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3790 				      hsflctl.regval << 16);
3791 	else
3792 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3793 
3794 	/* wait till FDONE bit is set to 1 */
3795 	do {
3796 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3797 		if (hsfsts.hsf_status.flcdone)
3798 			break;
3799 		usec_delay(1);
3800 	} while (i++ < timeout);
3801 
3802 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3803 		return E1000_SUCCESS;
3804 
3805 	return -E1000_ERR_NVM;
3806 }
3807 
3808 /**
3809  *  e1000_read_flash_dword_ich8lan - Read dword from flash
3810  *  @hw: pointer to the HW structure
3811  *  @offset: offset to data location
3812  *  @data: pointer to the location for storing the data
3813  *
3814  *  Reads the flash dword at offset into data.  Offset is converted
3815  *  to bytes before read.
3816  **/
3817 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3818 					  u32 *data)
3819 {
3820 	DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3821 
3822 	if (!data)
3823 		return -E1000_ERR_NVM;
3824 
3825 	/* Must convert word offset into bytes. */
3826 	offset <<= 1;
3827 
3828 	return e1000_read_flash_data32_ich8lan(hw, offset, data);
3829 }
3830 
3831 /**
3832  *  e1000_read_flash_word_ich8lan - Read word from flash
3833  *  @hw: pointer to the HW structure
3834  *  @offset: offset to data location
3835  *  @data: pointer to the location for storing the data
3836  *
3837  *  Reads the flash word at offset into data.  Offset is converted
3838  *  to bytes before read.
3839  **/
3840 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3841 					 u16 *data)
3842 {
3843 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3844 
3845 	if (!data)
3846 		return -E1000_ERR_NVM;
3847 
3848 	/* Must convert offset into bytes. */
3849 	offset <<= 1;
3850 
3851 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3852 }
3853 
3854 /**
3855  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3856  *  @hw: pointer to the HW structure
3857  *  @offset: The offset of the byte to read.
3858  *  @data: Pointer to a byte to store the value read.
3859  *
3860  *  Reads a single byte from the NVM using the flash access registers.
3861  **/
3862 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3863 					 u8 *data)
3864 {
3865 	s32 ret_val;
3866 	u16 word = 0;
3867 
3868 	/* In SPT, only 32 bits access is supported,
3869 	 * so this function should not be called.
3870 	 */
3871 	if (hw->mac.type >= e1000_pch_spt)
3872 		return -E1000_ERR_NVM;
3873 	else
3874 		ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3875 
3876 	if (ret_val)
3877 		return ret_val;
3878 
3879 	*data = (u8)word;
3880 
3881 	return E1000_SUCCESS;
3882 }
3883 
3884 /**
3885  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3886  *  @hw: pointer to the HW structure
3887  *  @offset: The offset (in bytes) of the byte or word to read.
3888  *  @size: Size of data to read, 1=byte 2=word
3889  *  @data: Pointer to the word to store the value read.
3890  *
3891  *  Reads a byte or word from the NVM using the flash access registers.
3892  **/
3893 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3894 					 u8 size, u16 *data)
3895 {
3896 	union ich8_hws_flash_status hsfsts;
3897 	union ich8_hws_flash_ctrl hsflctl;
3898 	u32 flash_linear_addr;
3899 	u32 flash_data = 0;
3900 	s32 ret_val = -E1000_ERR_NVM;
3901 	u8 count = 0;
3902 
3903 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3904 
3905 	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3906 		return -E1000_ERR_NVM;
3907 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3908 			     hw->nvm.flash_base_addr);
3909 
3910 	do {
3911 		usec_delay(1);
3912 		/* Steps */
3913 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3914 		if (ret_val != E1000_SUCCESS)
3915 			break;
3916 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3917 
3918 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3919 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3920 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3921 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3922 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3923 
3924 		ret_val = e1000_flash_cycle_ich8lan(hw,
3925 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3926 
3927 		/* Check if FCERR is set to 1, if set to 1, clear it
3928 		 * and try the whole sequence a few more times, else
3929 		 * read in (shift in) the Flash Data0, the order is
3930 		 * least significant byte first msb to lsb
3931 		 */
3932 		if (ret_val == E1000_SUCCESS) {
3933 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3934 			if (size == 1)
3935 				*data = (u8)(flash_data & 0x000000FF);
3936 			else if (size == 2)
3937 				*data = (u16)(flash_data & 0x0000FFFF);
3938 			break;
3939 		} else {
3940 			/* If we've gotten here, then things are probably
3941 			 * completely hosed, but if the error condition is
3942 			 * detected, it won't hurt to give it another try...
3943 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3944 			 */
3945 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3946 							      ICH_FLASH_HSFSTS);
3947 			if (hsfsts.hsf_status.flcerr) {
3948 				/* Repeat for some time before giving up. */
3949 				continue;
3950 			} else if (!hsfsts.hsf_status.flcdone) {
3951 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3952 				break;
3953 			}
3954 		}
3955 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3956 
3957 	return ret_val;
3958 }
3959 
3960 /**
3961  *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3962  *  @hw: pointer to the HW structure
3963  *  @offset: The offset (in bytes) of the dword to read.
3964  *  @data: Pointer to the dword to store the value read.
3965  *
3966  *  Reads a byte or word from the NVM using the flash access registers.
3967  **/
3968 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3969 					   u32 *data)
3970 {
3971 	union ich8_hws_flash_status hsfsts;
3972 	union ich8_hws_flash_ctrl hsflctl;
3973 	u32 flash_linear_addr;
3974 	s32 ret_val = -E1000_ERR_NVM;
3975 	u8 count = 0;
3976 
3977 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3978 
3979 		if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3980 		    hw->mac.type < e1000_pch_spt)
3981 			return -E1000_ERR_NVM;
3982 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3983 			     hw->nvm.flash_base_addr);
3984 
3985 	do {
3986 		usec_delay(1);
3987 		/* Steps */
3988 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3989 		if (ret_val != E1000_SUCCESS)
3990 			break;
3991 		/* In SPT, This register is in Lan memory space, not flash.
3992 		 * Therefore, only 32 bit access is supported
3993 		 */
3994 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3995 
3996 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3997 		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3998 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3999 		/* In SPT, This register is in Lan memory space, not flash.
4000 		 * Therefore, only 32 bit access is supported
4001 		 */
4002 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4003 				      (u32)hsflctl.regval << 16);
4004 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4005 
4006 		ret_val = e1000_flash_cycle_ich8lan(hw,
4007 						ICH_FLASH_READ_COMMAND_TIMEOUT);
4008 
4009 		/* Check if FCERR is set to 1, if set to 1, clear it
4010 		 * and try the whole sequence a few more times, else
4011 		 * read in (shift in) the Flash Data0, the order is
4012 		 * least significant byte first msb to lsb
4013 		 */
4014 		if (ret_val == E1000_SUCCESS) {
4015 			*data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
4016 			break;
4017 		} else {
4018 			/* If we've gotten here, then things are probably
4019 			 * completely hosed, but if the error condition is
4020 			 * detected, it won't hurt to give it another try...
4021 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
4022 			 */
4023 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4024 							      ICH_FLASH_HSFSTS);
4025 			if (hsfsts.hsf_status.flcerr) {
4026 				/* Repeat for some time before giving up. */
4027 				continue;
4028 			} else if (!hsfsts.hsf_status.flcdone) {
4029 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4030 				break;
4031 			}
4032 		}
4033 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4034 
4035 	return ret_val;
4036 }
4037 
4038 /**
4039  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
4040  *  @hw: pointer to the HW structure
4041  *  @offset: The offset (in bytes) of the word(s) to write.
4042  *  @words: Size of data to write in words
4043  *  @data: Pointer to the word(s) to write at offset.
4044  *
4045  *  Writes a byte or word to the NVM using the flash access registers.
4046  **/
4047 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
4048 				   u16 *data)
4049 {
4050 	struct e1000_nvm_info *nvm = &hw->nvm;
4051 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4052 	u16 i;
4053 
4054 	DEBUGFUNC("e1000_write_nvm_ich8lan");
4055 
4056 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
4057 	    (words == 0)) {
4058 		DEBUGOUT("nvm parameter(s) out of bounds\n");
4059 		return -E1000_ERR_NVM;
4060 	}
4061 
4062 	nvm->ops.acquire(hw);
4063 
4064 	for (i = 0; i < words; i++) {
4065 		dev_spec->shadow_ram[offset + i].modified = true;
4066 		dev_spec->shadow_ram[offset + i].value = data[i];
4067 	}
4068 
4069 	nvm->ops.release(hw);
4070 
4071 	return E1000_SUCCESS;
4072 }
4073 
4074 /**
4075  *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
4076  *  @hw: pointer to the HW structure
4077  *
4078  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4079  *  which writes the checksum to the shadow ram.  The changes in the shadow
4080  *  ram are then committed to the EEPROM by processing each bank at a time
4081  *  checking for the modified bit and writing only the pending changes.
4082  *  After a successful commit, the shadow ram is cleared and is ready for
4083  *  future writes.
4084  **/
4085 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4086 {
4087 	struct e1000_nvm_info *nvm = &hw->nvm;
4088 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4089 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4090 	s32 ret_val;
4091 	u32 dword = 0;
4092 
4093 	DEBUGFUNC("e1000_update_nvm_checksum_spt");
4094 
4095 	ret_val = e1000_update_nvm_checksum_generic(hw);
4096 	if (ret_val)
4097 		goto out;
4098 
4099 	if (nvm->type != e1000_nvm_flash_sw)
4100 		goto out;
4101 
4102 	nvm->ops.acquire(hw);
4103 
4104 	/* We're writing to the opposite bank so if we're on bank 1,
4105 	 * write to bank 0 etc.  We also need to erase the segment that
4106 	 * is going to be written
4107 	 */
4108 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4109 	if (ret_val != E1000_SUCCESS) {
4110 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4111 		bank = 0;
4112 	}
4113 
4114 	if (bank == 0) {
4115 		new_bank_offset = nvm->flash_bank_size;
4116 		old_bank_offset = 0;
4117 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4118 		if (ret_val)
4119 			goto release;
4120 	} else {
4121 		old_bank_offset = nvm->flash_bank_size;
4122 		new_bank_offset = 0;
4123 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4124 		if (ret_val)
4125 			goto release;
4126 	}
4127 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4128 		/* Determine whether to write the value stored
4129 		 * in the other NVM bank or a modified value stored
4130 		 * in the shadow RAM
4131 		 */
4132 		ret_val = e1000_read_flash_dword_ich8lan(hw,
4133 							 i + old_bank_offset,
4134 							 &dword);
4135 
4136 		if (dev_spec->shadow_ram[i].modified) {
4137 			dword &= 0xffff0000;
4138 			dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4139 		}
4140 		if (dev_spec->shadow_ram[i + 1].modified) {
4141 			dword &= 0x0000ffff;
4142 			dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4143 				  << 16);
4144 		}
4145 		if (ret_val)
4146 			break;
4147 
4148 		/* If the word is 0x13, then make sure the signature bits
4149 		 * (15:14) are 11b until the commit has completed.
4150 		 * This will allow us to write 10b which indicates the
4151 		 * signature is valid.  We want to do this after the write
4152 		 * has completed so that we don't mark the segment valid
4153 		 * while the write is still in progress
4154 		 */
4155 		if (i == E1000_ICH_NVM_SIG_WORD - 1)
4156 			dword |= E1000_ICH_NVM_SIG_MASK << 16;
4157 
4158 		/* Convert offset to bytes. */
4159 		act_offset = (i + new_bank_offset) << 1;
4160 
4161 		usec_delay(100);
4162 
4163 		/* Write the data to the new bank. Offset in words*/
4164 		act_offset = i + new_bank_offset;
4165 		ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4166 								dword);
4167 		if (ret_val)
4168 			break;
4169 	 }
4170 
4171 	/* Don't bother writing the segment valid bits if sector
4172 	 * programming failed.
4173 	 */
4174 	if (ret_val) {
4175 		DEBUGOUT("Flash commit failed.\n");
4176 		goto release;
4177 	}
4178 
4179 	/* Finally validate the new segment by setting bit 15:14
4180 	 * to 10b in word 0x13 , this can be done without an
4181 	 * erase as well since these bits are 11 to start with
4182 	 * and we need to change bit 14 to 0b
4183 	 */
4184 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4185 
4186 	/*offset in words but we read dword*/
4187 	--act_offset;
4188 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4189 
4190 	if (ret_val)
4191 		goto release;
4192 
4193 	dword &= 0xBFFFFFFF;
4194 	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4195 
4196 	if (ret_val)
4197 		goto release;
4198 
4199 	/* offset in words but we read dword*/
4200 	act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4201 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4202 
4203 	if (ret_val)
4204 		goto release;
4205 
4206 	dword &= 0x00FFFFFF;
4207 	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4208 
4209 	if (ret_val)
4210 		goto release;
4211 
4212 	/* Great!  Everything worked, we can now clear the cached entries. */
4213 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4214 		dev_spec->shadow_ram[i].modified = false;
4215 		dev_spec->shadow_ram[i].value = 0xFFFF;
4216 	}
4217 
4218 release:
4219 	nvm->ops.release(hw);
4220 
4221 	/* Reload the EEPROM, or else modifications will not appear
4222 	 * until after the next adapter reset.
4223 	 */
4224 	if (!ret_val) {
4225 		nvm->ops.reload(hw);
4226 		msec_delay(10);
4227 	}
4228 
4229 out:
4230 	if (ret_val)
4231 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4232 
4233 	return ret_val;
4234 }
4235 
4236 /**
4237  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4238  *  @hw: pointer to the HW structure
4239  *
4240  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4241  *  which writes the checksum to the shadow ram.  The changes in the shadow
4242  *  ram are then committed to the EEPROM by processing each bank at a time
4243  *  checking for the modified bit and writing only the pending changes.
4244  *  After a successful commit, the shadow ram is cleared and is ready for
4245  *  future writes.
4246  **/
4247 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4248 {
4249 	struct e1000_nvm_info *nvm = &hw->nvm;
4250 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4251 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4252 	s32 ret_val;
4253 	u16 data = 0;
4254 
4255 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4256 
4257 	ret_val = e1000_update_nvm_checksum_generic(hw);
4258 	if (ret_val)
4259 		goto out;
4260 
4261 	if (nvm->type != e1000_nvm_flash_sw)
4262 		goto out;
4263 
4264 	nvm->ops.acquire(hw);
4265 
4266 	/* We're writing to the opposite bank so if we're on bank 1,
4267 	 * write to bank 0 etc.  We also need to erase the segment that
4268 	 * is going to be written
4269 	 */
4270 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4271 	if (ret_val != E1000_SUCCESS) {
4272 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4273 		bank = 0;
4274 	}
4275 
4276 	if (bank == 0) {
4277 		new_bank_offset = nvm->flash_bank_size;
4278 		old_bank_offset = 0;
4279 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4280 		if (ret_val)
4281 			goto release;
4282 	} else {
4283 		old_bank_offset = nvm->flash_bank_size;
4284 		new_bank_offset = 0;
4285 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4286 		if (ret_val)
4287 			goto release;
4288 	}
4289 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4290 		if (dev_spec->shadow_ram[i].modified) {
4291 			data = dev_spec->shadow_ram[i].value;
4292 		} else {
4293 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
4294 								old_bank_offset,
4295 								&data);
4296 			if (ret_val)
4297 				break;
4298 		}
4299 		/* If the word is 0x13, then make sure the signature bits
4300 		 * (15:14) are 11b until the commit has completed.
4301 		 * This will allow us to write 10b which indicates the
4302 		 * signature is valid.  We want to do this after the write
4303 		 * has completed so that we don't mark the segment valid
4304 		 * while the write is still in progress
4305 		 */
4306 		if (i == E1000_ICH_NVM_SIG_WORD)
4307 			data |= E1000_ICH_NVM_SIG_MASK;
4308 
4309 		/* Convert offset to bytes. */
4310 		act_offset = (i + new_bank_offset) << 1;
4311 
4312 		usec_delay(100);
4313 
4314 		/* Write the bytes to the new bank. */
4315 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4316 							       act_offset,
4317 							       (u8)data);
4318 		if (ret_val)
4319 			break;
4320 
4321 		usec_delay(100);
4322 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4323 							  act_offset + 1,
4324 							  (u8)(data >> 8));
4325 		if (ret_val)
4326 			break;
4327 	}
4328 
4329 	/* Don't bother writing the segment valid bits if sector
4330 	 * programming failed.
4331 	 */
4332 	if (ret_val) {
4333 		DEBUGOUT("Flash commit failed.\n");
4334 		goto release;
4335 	}
4336 
4337 	/* Finally validate the new segment by setting bit 15:14
4338 	 * to 10b in word 0x13 , this can be done without an
4339 	 * erase as well since these bits are 11 to start with
4340 	 * and we need to change bit 14 to 0b
4341 	 */
4342 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4343 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4344 	if (ret_val)
4345 		goto release;
4346 
4347 	data &= 0xBFFF;
4348 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4349 						       (u8)(data >> 8));
4350 	if (ret_val)
4351 		goto release;
4352 
4353 	/* And invalidate the previously valid segment by setting
4354 	 * its signature word (0x13) high_byte to 0b. This can be
4355 	 * done without an erase because flash erase sets all bits
4356 	 * to 1's. We can write 1's to 0's without an erase
4357 	 */
4358 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4359 
4360 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4361 
4362 	if (ret_val)
4363 		goto release;
4364 
4365 	/* Great!  Everything worked, we can now clear the cached entries. */
4366 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4367 		dev_spec->shadow_ram[i].modified = false;
4368 		dev_spec->shadow_ram[i].value = 0xFFFF;
4369 	}
4370 
4371 release:
4372 	nvm->ops.release(hw);
4373 
4374 	/* Reload the EEPROM, or else modifications will not appear
4375 	 * until after the next adapter reset.
4376 	 */
4377 	if (!ret_val) {
4378 		nvm->ops.reload(hw);
4379 		msec_delay(10);
4380 	}
4381 
4382 out:
4383 	if (ret_val)
4384 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4385 
4386 	return ret_val;
4387 }
4388 
4389 /**
4390  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4391  *  @hw: pointer to the HW structure
4392  *
4393  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4394  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4395  *  calculated, in which case we need to calculate the checksum and set bit 6.
4396  **/
4397 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4398 {
4399 	s32 ret_val;
4400 	u16 data;
4401 	u16 word;
4402 	u16 valid_csum_mask;
4403 
4404 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4405 
4406 	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4407 	 * the checksum needs to be fixed.  This bit is an indication that
4408 	 * the NVM was prepared by OEM software and did not calculate
4409 	 * the checksum...a likely scenario.
4410 	 */
4411 	switch (hw->mac.type) {
4412 	case e1000_pch_lpt:
4413 	case e1000_pch_spt:
4414 	case e1000_pch_cnp:
4415 	case e1000_pch_tgp:
4416 	case e1000_pch_adp:
4417 	case e1000_pch_mtp:
4418 	case e1000_pch_ptp:
4419 		word = NVM_COMPAT;
4420 		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4421 		break;
4422 	default:
4423 		word = NVM_FUTURE_INIT_WORD1;
4424 		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4425 		break;
4426 	}
4427 
4428 	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4429 	if (ret_val)
4430 		return ret_val;
4431 
4432 	if (!(data & valid_csum_mask)) {
4433 		data |= valid_csum_mask;
4434 		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4435 		if (ret_val)
4436 			return ret_val;
4437 		ret_val = hw->nvm.ops.update(hw);
4438 		if (ret_val)
4439 			return ret_val;
4440 	}
4441 
4442 	return e1000_validate_nvm_checksum_generic(hw);
4443 }
4444 
4445 /**
4446  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4447  *  @hw: pointer to the HW structure
4448  *  @offset: The offset (in bytes) of the byte/word to read.
4449  *  @size: Size of data to read, 1=byte 2=word
4450  *  @data: The byte(s) to write to the NVM.
4451  *
4452  *  Writes one/two bytes to the NVM using the flash access registers.
4453  **/
4454 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4455 					  u8 size, u16 data)
4456 {
4457 	union ich8_hws_flash_status hsfsts;
4458 	union ich8_hws_flash_ctrl hsflctl;
4459 	u32 flash_linear_addr;
4460 	u32 flash_data = 0;
4461 	s32 ret_val;
4462 	u8 count = 0;
4463 
4464 	DEBUGFUNC("e1000_write_ich8_data");
4465 
4466 	if (hw->mac.type >= e1000_pch_spt) {
4467 		if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4468 			return -E1000_ERR_NVM;
4469 	} else {
4470 		if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4471 			return -E1000_ERR_NVM;
4472 	}
4473 
4474 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4475 			     hw->nvm.flash_base_addr);
4476 
4477 	do {
4478 		usec_delay(1);
4479 		/* Steps */
4480 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4481 		if (ret_val != E1000_SUCCESS)
4482 			break;
4483 		/* In SPT, This register is in Lan memory space, not
4484 		 * flash.  Therefore, only 32 bit access is supported
4485 		 */
4486 		if (hw->mac.type >= e1000_pch_spt)
4487 			hsflctl.regval =
4488 			    E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4489 		else
4490 			hsflctl.regval =
4491 			    E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4492 
4493 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4494 		hsflctl.hsf_ctrl.fldbcount = size - 1;
4495 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4496 		/* In SPT, This register is in Lan memory space,
4497 		 * not flash.  Therefore, only 32 bit access is
4498 		 * supported
4499 		 */
4500 		if (hw->mac.type >= e1000_pch_spt)
4501 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4502 					      hsflctl.regval << 16);
4503 		else
4504 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4505 						hsflctl.regval);
4506 
4507 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4508 
4509 		if (size == 1)
4510 			flash_data = (u32)data & 0x00FF;
4511 		else
4512 			flash_data = (u32)data;
4513 
4514 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4515 
4516 		/* check if FCERR is set to 1 , if set to 1, clear it
4517 		 * and try the whole sequence a few more times else done
4518 		 */
4519 		ret_val =
4520 		    e1000_flash_cycle_ich8lan(hw,
4521 					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4522 		if (ret_val == E1000_SUCCESS)
4523 			break;
4524 
4525 		/* If we're here, then things are most likely
4526 		 * completely hosed, but if the error condition
4527 		 * is detected, it won't hurt to give it another
4528 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4529 		 */
4530 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4531 		if (hsfsts.hsf_status.flcerr)
4532 			/* Repeat for some time before giving up. */
4533 			continue;
4534 		if (!hsfsts.hsf_status.flcdone) {
4535 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4536 			break;
4537 		}
4538 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4539 
4540 	return ret_val;
4541 }
4542 
4543 /**
4544 *  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4545 *  @hw: pointer to the HW structure
4546 *  @offset: The offset (in bytes) of the dwords to read.
4547 *  @data: The 4 bytes to write to the NVM.
4548 *
4549 *  Writes one/two/four bytes to the NVM using the flash access registers.
4550 **/
4551 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4552 					    u32 data)
4553 {
4554 	union ich8_hws_flash_status hsfsts;
4555 	union ich8_hws_flash_ctrl hsflctl;
4556 	u32 flash_linear_addr;
4557 	s32 ret_val;
4558 	u8 count = 0;
4559 
4560 	DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4561 
4562 	if (hw->mac.type >= e1000_pch_spt) {
4563 		if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4564 			return -E1000_ERR_NVM;
4565 	}
4566 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4567 			     hw->nvm.flash_base_addr);
4568 	do {
4569 		usec_delay(1);
4570 		/* Steps */
4571 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4572 		if (ret_val != E1000_SUCCESS)
4573 			break;
4574 
4575 		/* In SPT, This register is in Lan memory space, not
4576 		 * flash.  Therefore, only 32 bit access is supported
4577 		 */
4578 		if (hw->mac.type >= e1000_pch_spt)
4579 			hsflctl.regval = E1000_READ_FLASH_REG(hw,
4580 							      ICH_FLASH_HSFSTS)
4581 					 >> 16;
4582 		else
4583 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4584 							      ICH_FLASH_HSFCTL);
4585 
4586 		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4587 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4588 
4589 		/* In SPT, This register is in Lan memory space,
4590 		 * not flash.  Therefore, only 32 bit access is
4591 		 * supported
4592 		 */
4593 		if (hw->mac.type >= e1000_pch_spt)
4594 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4595 					      hsflctl.regval << 16);
4596 		else
4597 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4598 						hsflctl.regval);
4599 
4600 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4601 
4602 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4603 
4604 		/* check if FCERR is set to 1 , if set to 1, clear it
4605 		 * and try the whole sequence a few more times else done
4606 		 */
4607 		ret_val = e1000_flash_cycle_ich8lan(hw,
4608 					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4609 
4610 		if (ret_val == E1000_SUCCESS)
4611 			break;
4612 
4613 		/* If we're here, then things are most likely
4614 		 * completely hosed, but if the error condition
4615 		 * is detected, it won't hurt to give it another
4616 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4617 		 */
4618 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4619 
4620 		if (hsfsts.hsf_status.flcerr)
4621 			/* Repeat for some time before giving up. */
4622 			continue;
4623 		if (!hsfsts.hsf_status.flcdone) {
4624 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4625 			break;
4626 		}
4627 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4628 
4629 	return ret_val;
4630 }
4631 
4632 /**
4633  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4634  *  @hw: pointer to the HW structure
4635  *  @offset: The index of the byte to read.
4636  *  @data: The byte to write to the NVM.
4637  *
4638  *  Writes a single byte to the NVM using the flash access registers.
4639  **/
4640 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4641 					  u8 data)
4642 {
4643 	u16 word = (u16)data;
4644 
4645 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4646 
4647 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4648 }
4649 
4650 /**
4651 *  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4652 *  @hw: pointer to the HW structure
4653 *  @offset: The offset of the word to write.
4654 *  @dword: The dword to write to the NVM.
4655 *
4656 *  Writes a single dword to the NVM using the flash access registers.
4657 *  Goes through a retry algorithm before giving up.
4658 **/
4659 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4660 						 u32 offset, u32 dword)
4661 {
4662 	s32 ret_val;
4663 	u16 program_retries;
4664 
4665 	DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4666 
4667 	/* Must convert word offset into bytes. */
4668 	offset <<= 1;
4669 
4670 	ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4671 
4672 	if (!ret_val)
4673 		return ret_val;
4674 	for (program_retries = 0; program_retries < 100; program_retries++) {
4675 		DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4676 		usec_delay(100);
4677 		ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4678 		if (ret_val == E1000_SUCCESS)
4679 			break;
4680 	}
4681 	if (program_retries == 100)
4682 		return -E1000_ERR_NVM;
4683 
4684 	return E1000_SUCCESS;
4685 }
4686 
4687 /**
4688  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4689  *  @hw: pointer to the HW structure
4690  *  @offset: The offset of the byte to write.
4691  *  @byte: The byte to write to the NVM.
4692  *
4693  *  Writes a single byte to the NVM using the flash access registers.
4694  *  Goes through a retry algorithm before giving up.
4695  **/
4696 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4697 						u32 offset, u8 byte)
4698 {
4699 	s32 ret_val;
4700 	u16 program_retries;
4701 
4702 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4703 
4704 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4705 	if (!ret_val)
4706 		return ret_val;
4707 
4708 	for (program_retries = 0; program_retries < 100; program_retries++) {
4709 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4710 		usec_delay(100);
4711 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4712 		if (ret_val == E1000_SUCCESS)
4713 			break;
4714 	}
4715 	if (program_retries == 100)
4716 		return -E1000_ERR_NVM;
4717 
4718 	return E1000_SUCCESS;
4719 }
4720 
4721 /**
4722  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4723  *  @hw: pointer to the HW structure
4724  *  @bank: 0 for first bank, 1 for second bank, etc.
4725  *
4726  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4727  *  bank N is 4096 * N + flash_reg_addr.
4728  **/
4729 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4730 {
4731 	struct e1000_nvm_info *nvm = &hw->nvm;
4732 	union ich8_hws_flash_status hsfsts;
4733 	union ich8_hws_flash_ctrl hsflctl;
4734 	u32 flash_linear_addr;
4735 	/* bank size is in 16bit words - adjust to bytes */
4736 	u32 flash_bank_size = nvm->flash_bank_size * 2;
4737 	s32 ret_val;
4738 	s32 count = 0;
4739 	s32 j, iteration, sector_size;
4740 
4741 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4742 
4743 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4744 
4745 	/* Determine HW Sector size: Read BERASE bits of hw flash status
4746 	 * register
4747 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4748 	 *     consecutive sectors.  The start index for the nth Hw sector
4749 	 *     can be calculated as = bank * 4096 + n * 256
4750 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4751 	 *     The start index for the nth Hw sector can be calculated
4752 	 *     as = bank * 4096
4753 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4754 	 *     (ich9 only, otherwise error condition)
4755 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4756 	 */
4757 	switch (hsfsts.hsf_status.berasesz) {
4758 	case 0:
4759 		/* Hw sector size 256 */
4760 		sector_size = ICH_FLASH_SEG_SIZE_256;
4761 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4762 		break;
4763 	case 1:
4764 		sector_size = ICH_FLASH_SEG_SIZE_4K;
4765 		iteration = 1;
4766 		break;
4767 	case 2:
4768 		sector_size = ICH_FLASH_SEG_SIZE_8K;
4769 		iteration = 1;
4770 		break;
4771 	case 3:
4772 		sector_size = ICH_FLASH_SEG_SIZE_64K;
4773 		iteration = 1;
4774 		break;
4775 	default:
4776 		return -E1000_ERR_NVM;
4777 	}
4778 
4779 	/* Start with the base address, then add the sector offset. */
4780 	flash_linear_addr = hw->nvm.flash_base_addr;
4781 	flash_linear_addr += (bank) ? flash_bank_size : 0;
4782 
4783 	for (j = 0; j < iteration; j++) {
4784 		do {
4785 			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4786 
4787 			/* Steps */
4788 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
4789 			if (ret_val)
4790 				return ret_val;
4791 
4792 			/* Write a value 11 (block Erase) in Flash
4793 			 * Cycle field in hw flash control
4794 			 */
4795 			if (hw->mac.type >= e1000_pch_spt)
4796 				hsflctl.regval =
4797 				    E1000_READ_FLASH_REG(hw,
4798 							 ICH_FLASH_HSFSTS)>>16;
4799 			else
4800 				hsflctl.regval =
4801 				    E1000_READ_FLASH_REG16(hw,
4802 							   ICH_FLASH_HSFCTL);
4803 
4804 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4805 			if (hw->mac.type >= e1000_pch_spt)
4806 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4807 						      hsflctl.regval << 16);
4808 			else
4809 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4810 							hsflctl.regval);
4811 
4812 			/* Write the last 24 bits of an index within the
4813 			 * block into Flash Linear address field in Flash
4814 			 * Address.
4815 			 */
4816 			flash_linear_addr += (j * sector_size);
4817 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4818 					      flash_linear_addr);
4819 
4820 			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4821 			if (ret_val == E1000_SUCCESS)
4822 				break;
4823 
4824 			/* Check if FCERR is set to 1.  If 1,
4825 			 * clear it and try the whole sequence
4826 			 * a few more times else Done
4827 			 */
4828 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4829 						      ICH_FLASH_HSFSTS);
4830 			if (hsfsts.hsf_status.flcerr)
4831 				/* repeat for some time before giving up */
4832 				continue;
4833 			else if (!hsfsts.hsf_status.flcdone)
4834 				return ret_val;
4835 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4836 	}
4837 
4838 	return E1000_SUCCESS;
4839 }
4840 
4841 /**
4842  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4843  *  @hw: pointer to the HW structure
4844  *  @data: Pointer to the LED settings
4845  *
4846  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4847  *  settings is all 0's or F's, set the LED default to a valid LED default
4848  *  setting.
4849  **/
4850 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4851 {
4852 	s32 ret_val;
4853 
4854 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
4855 
4856 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4857 	if (ret_val) {
4858 		DEBUGOUT("NVM Read Error\n");
4859 		return ret_val;
4860 	}
4861 
4862 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4863 		*data = ID_LED_DEFAULT_ICH8LAN;
4864 
4865 	return E1000_SUCCESS;
4866 }
4867 
4868 /**
4869  *  e1000_id_led_init_pchlan - store LED configurations
4870  *  @hw: pointer to the HW structure
4871  *
4872  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4873  *  the PHY LED configuration register.
4874  *
4875  *  PCH also does not have an "always on" or "always off" mode which
4876  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4877  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4878  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4879  *  link based on logic in e1000_led_[on|off]_pchlan().
4880  **/
4881 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4882 {
4883 	struct e1000_mac_info *mac = &hw->mac;
4884 	s32 ret_val;
4885 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4886 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4887 	u16 data, i, temp, shift;
4888 
4889 	DEBUGFUNC("e1000_id_led_init_pchlan");
4890 
4891 	/* Get default ID LED modes */
4892 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4893 	if (ret_val)
4894 		return ret_val;
4895 
4896 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4897 	mac->ledctl_mode1 = mac->ledctl_default;
4898 	mac->ledctl_mode2 = mac->ledctl_default;
4899 
4900 	for (i = 0; i < 4; i++) {
4901 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4902 		shift = (i * 5);
4903 		switch (temp) {
4904 		case ID_LED_ON1_DEF2:
4905 		case ID_LED_ON1_ON2:
4906 		case ID_LED_ON1_OFF2:
4907 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4908 			mac->ledctl_mode1 |= (ledctl_on << shift);
4909 			break;
4910 		case ID_LED_OFF1_DEF2:
4911 		case ID_LED_OFF1_ON2:
4912 		case ID_LED_OFF1_OFF2:
4913 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4914 			mac->ledctl_mode1 |= (ledctl_off << shift);
4915 			break;
4916 		default:
4917 			/* Do nothing */
4918 			break;
4919 		}
4920 		switch (temp) {
4921 		case ID_LED_DEF1_ON2:
4922 		case ID_LED_ON1_ON2:
4923 		case ID_LED_OFF1_ON2:
4924 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4925 			mac->ledctl_mode2 |= (ledctl_on << shift);
4926 			break;
4927 		case ID_LED_DEF1_OFF2:
4928 		case ID_LED_ON1_OFF2:
4929 		case ID_LED_OFF1_OFF2:
4930 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4931 			mac->ledctl_mode2 |= (ledctl_off << shift);
4932 			break;
4933 		default:
4934 			/* Do nothing */
4935 			break;
4936 		}
4937 	}
4938 
4939 	return E1000_SUCCESS;
4940 }
4941 
4942 /**
4943  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4944  *  @hw: pointer to the HW structure
4945  *
4946  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4947  *  register, so the bus width is hard coded.
4948  **/
4949 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4950 {
4951 	struct e1000_bus_info *bus = &hw->bus;
4952 	s32 ret_val;
4953 
4954 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
4955 
4956 	ret_val = e1000_get_bus_info_pcie_generic(hw);
4957 
4958 	/* ICH devices are "PCI Express"-ish.  They have
4959 	 * a configuration space, but do not contain
4960 	 * PCI Express Capability registers, so bus width
4961 	 * must be hardcoded.
4962 	 */
4963 	if (bus->width == e1000_bus_width_unknown)
4964 		bus->width = e1000_bus_width_pcie_x1;
4965 
4966 	return ret_val;
4967 }
4968 
4969 /**
4970  *  e1000_reset_hw_ich8lan - Reset the hardware
4971  *  @hw: pointer to the HW structure
4972  *
4973  *  Does a full reset of the hardware which includes a reset of the PHY and
4974  *  MAC.
4975  **/
4976 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4977 {
4978 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4979 	u16 kum_cfg;
4980 	u32 ctrl, reg;
4981 	s32 ret_val;
4982 	u16 pci_cfg;
4983 
4984 	DEBUGFUNC("e1000_reset_hw_ich8lan");
4985 
4986 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
4987 	 * on the last TLP read/write transaction when MAC is reset.
4988 	 */
4989 	ret_val = e1000_disable_pcie_master_generic(hw);
4990 	if (ret_val)
4991 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
4992 
4993 	DEBUGOUT("Masking off all interrupts\n");
4994 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4995 
4996 	/* Disable the Transmit and Receive units.  Then delay to allow
4997 	 * any pending transactions to complete before we hit the MAC
4998 	 * with the global reset.
4999 	 */
5000 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
5001 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
5002 	E1000_WRITE_FLUSH(hw);
5003 
5004 	msec_delay(10);
5005 
5006 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
5007 	if (hw->mac.type == e1000_ich8lan) {
5008 		/* Set Tx and Rx buffer allocation to 8k apiece. */
5009 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
5010 		/* Set Packet Buffer Size to 16k. */
5011 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
5012 	}
5013 
5014 	if (hw->mac.type == e1000_pchlan) {
5015 		/* Save the NVM K1 bit setting*/
5016 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
5017 		if (ret_val)
5018 			return ret_val;
5019 
5020 		if (kum_cfg & E1000_NVM_K1_ENABLE)
5021 			dev_spec->nvm_k1_enabled = true;
5022 		else
5023 			dev_spec->nvm_k1_enabled = false;
5024 	}
5025 
5026 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5027 
5028 	if (!hw->phy.ops.check_reset_block(hw)) {
5029 		/* Full-chip reset requires MAC and PHY reset at the same
5030 		 * time to make sure the interface between MAC and the
5031 		 * external PHY is reset.
5032 		 */
5033 		ctrl |= E1000_CTRL_PHY_RST;
5034 
5035 		/* Gate automatic PHY configuration by hardware on
5036 		 * non-managed 82579
5037 		 */
5038 		if ((hw->mac.type == e1000_pch2lan) &&
5039 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
5040 			e1000_gate_hw_phy_config_ich8lan(hw, true);
5041 	}
5042 	ret_val = e1000_acquire_swflag_ich8lan(hw);
5043 
5044 	/* Read from EXTCNF_CTRL in e1000_acquire_swflag_ich8lan function
5045 	 * may occur during global reset and cause system hang.
5046 	 * Configuration space access creates the needed delay.
5047 	 * Write to E1000_STRAP RO register E1000_PCI_VENDOR_ID_REGISTER value
5048 	 * insures configuration space read is done before global reset.
5049 	 */
5050 	e1000_read_pci_cfg(hw, E1000_PCI_VENDOR_ID_REGISTER, &pci_cfg);
5051 	E1000_WRITE_REG(hw, E1000_STRAP, pci_cfg);
5052 	DEBUGOUT("Issuing a global reset to ich8lan\n");
5053 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
5054 	/* cannot issue a flush here because it hangs the hardware */
5055 	msec_delay(20);
5056 
5057 	/* Configuration space access improve HW level time sync mechanism.
5058 	 * Write to E1000_STRAP RO register E1000_PCI_VENDOR_ID_REGISTER
5059 	 * value to insure configuration space read is done
5060 	 * before any access to mac register.
5061 	 */
5062 	e1000_read_pci_cfg(hw, E1000_PCI_VENDOR_ID_REGISTER, &pci_cfg);
5063 	E1000_WRITE_REG(hw, E1000_STRAP, pci_cfg);
5064 
5065 	/* Set Phy Config Counter to 50msec */
5066 	if (hw->mac.type == e1000_pch2lan) {
5067 		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
5068 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
5069 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
5070 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
5071 	}
5072 
5073 
5074 	if (ctrl & E1000_CTRL_PHY_RST) {
5075 		ret_val = hw->phy.ops.get_cfg_done(hw);
5076 		if (ret_val)
5077 			return ret_val;
5078 
5079 		ret_val = e1000_post_phy_reset_ich8lan(hw);
5080 		if (ret_val)
5081 			return ret_val;
5082 	}
5083 
5084 	/* For PCH, this write will make sure that any noise
5085 	 * will be detected as a CRC error and be dropped rather than show up
5086 	 * as a bad packet to the DMA engine.
5087 	 */
5088 	if (hw->mac.type == e1000_pchlan)
5089 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5090 
5091 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5092 	E1000_READ_REG(hw, E1000_ICR);
5093 
5094 	reg = E1000_READ_REG(hw, E1000_KABGTXD);
5095 	reg |= E1000_KABGTXD_BGSQLBIAS;
5096 	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5097 
5098 	return E1000_SUCCESS;
5099 }
5100 
5101 /**
5102  *  e1000_init_hw_ich8lan - Initialize the hardware
5103  *  @hw: pointer to the HW structure
5104  *
5105  *  Prepares the hardware for transmit and receive by doing the following:
5106  *   - initialize hardware bits
5107  *   - initialize LED identification
5108  *   - setup receive address registers
5109  *   - setup flow control
5110  *   - setup transmit descriptors
5111  *   - clear statistics
5112  **/
5113 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5114 {
5115 	struct e1000_mac_info *mac = &hw->mac;
5116 	u32 ctrl_ext, txdctl, snoop;
5117 	s32 ret_val;
5118 	u16 i;
5119 
5120 	DEBUGFUNC("e1000_init_hw_ich8lan");
5121 
5122 	e1000_initialize_hw_bits_ich8lan(hw);
5123 
5124 	/* Initialize identification LED */
5125 	ret_val = mac->ops.id_led_init(hw);
5126 	/* An error is not fatal and we should not stop init due to this */
5127 	if (ret_val)
5128 		DEBUGOUT("Error initializing identification LED\n");
5129 
5130 	/* Setup the receive address. */
5131 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5132 
5133 	/* Zero out the Multicast HASH table */
5134 	DEBUGOUT("Zeroing the MTA\n");
5135 	for (i = 0; i < mac->mta_reg_count; i++)
5136 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5137 
5138 	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
5139 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
5140 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
5141 	 */
5142 	if (hw->phy.type == e1000_phy_82578) {
5143 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5144 		i &= ~BM_WUC_HOST_WU_BIT;
5145 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5146 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
5147 		if (ret_val)
5148 			return ret_val;
5149 	}
5150 
5151 	/* Setup link and flow control */
5152 	ret_val = mac->ops.setup_link(hw);
5153 
5154 	/* Set the transmit descriptor write-back policy for both queues */
5155 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5156 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5157 		  E1000_TXDCTL_FULL_TX_DESC_WB);
5158 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5159 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5160 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5161 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5162 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5163 		  E1000_TXDCTL_FULL_TX_DESC_WB);
5164 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5165 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5166 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5167 
5168 	/* ICH8 has opposite polarity of no_snoop bits.
5169 	 * By default, we should use snoop behavior.
5170 	 */
5171 	if (mac->type == e1000_ich8lan)
5172 		snoop = PCIE_ICH8_SNOOP_ALL;
5173 	else
5174 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5175 	e1000_set_pcie_no_snoop_generic(hw, snoop);
5176 
5177 	/* ungate DMA clock to avoid packet loss */
5178 	if (mac->type >= e1000_pch_tgp) {
5179 		uint32_t fflt_dbg = E1000_READ_REG(hw, E1000_FFLT_DBG);
5180 		fflt_dbg |= (1 << 12);
5181 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, fflt_dbg);
5182 	}
5183 
5184 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5185 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5186 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5187 
5188 	/* Clear all of the statistics registers (clear on read).  It is
5189 	 * important that we do this after we have tried to establish link
5190 	 * because the symbol error count will increment wildly if there
5191 	 * is no link.
5192 	 */
5193 	e1000_clear_hw_cntrs_ich8lan(hw);
5194 
5195 	return ret_val;
5196 }
5197 
5198 /**
5199  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5200  *  @hw: pointer to the HW structure
5201  *
5202  *  Sets/Clears required hardware bits necessary for correctly setting up the
5203  *  hardware for transmit and receive.
5204  **/
5205 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5206 {
5207 	u32 reg;
5208 
5209 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5210 
5211 	/* Extended Device Control */
5212 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5213 	reg |= (1 << 22);
5214 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
5215 	if (hw->mac.type >= e1000_pchlan)
5216 		reg |= E1000_CTRL_EXT_PHYPDEN;
5217 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5218 
5219 	/* Transmit Descriptor Control 0 */
5220 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5221 	reg |= (1 << 22);
5222 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5223 
5224 	/* Transmit Descriptor Control 1 */
5225 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5226 	reg |= (1 << 22);
5227 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5228 
5229 	/* Transmit Arbitration Control 0 */
5230 	reg = E1000_READ_REG(hw, E1000_TARC(0));
5231 	if (hw->mac.type == e1000_ich8lan)
5232 		reg |= (1 << 28) | (1 << 29);
5233 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5234 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5235 
5236 	/* Transmit Arbitration Control 1 */
5237 	reg = E1000_READ_REG(hw, E1000_TARC(1));
5238 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5239 		reg &= ~(1 << 28);
5240 	else
5241 		reg |= (1 << 28);
5242 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
5243 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5244 
5245 	/* Device Status */
5246 	if (hw->mac.type == e1000_ich8lan) {
5247 		reg = E1000_READ_REG(hw, E1000_STATUS);
5248 		reg &= ~(1U << 31);
5249 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
5250 	}
5251 
5252 	/* work-around descriptor data corruption issue during nfs v2 udp
5253 	 * traffic, just disable the nfs filtering capability
5254 	 */
5255 	reg = E1000_READ_REG(hw, E1000_RFCTL);
5256 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5257 
5258 	/* Disable IPv6 extension header parsing because some malformed
5259 	 * IPv6 headers can hang the Rx.
5260 	 */
5261 	if (hw->mac.type == e1000_ich8lan)
5262 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5263 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5264 
5265 	/* Enable ECC on Lynxpoint */
5266 	if (hw->mac.type >= e1000_pch_lpt) {
5267 		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5268 		reg |= E1000_PBECCSTS_ECC_ENABLE;
5269 		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5270 
5271 		reg = E1000_READ_REG(hw, E1000_CTRL);
5272 		reg |= E1000_CTRL_MEHE;
5273 		E1000_WRITE_REG(hw, E1000_CTRL, reg);
5274 	}
5275 
5276 	return;
5277 }
5278 
5279 /**
5280  *  e1000_setup_link_ich8lan - Setup flow control and link settings
5281  *  @hw: pointer to the HW structure
5282  *
5283  *  Determines which flow control settings to use, then configures flow
5284  *  control.  Calls the appropriate media-specific link configuration
5285  *  function.  Assuming the adapter has a valid link partner, a valid link
5286  *  should be established.  Assumes the hardware has previously been reset
5287  *  and the transmitter and receiver are not enabled.
5288  **/
5289 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5290 {
5291 	s32 ret_val;
5292 
5293 	DEBUGFUNC("e1000_setup_link_ich8lan");
5294 
5295 	/* ICH parts do not have a word in the NVM to determine
5296 	 * the default flow control setting, so we explicitly
5297 	 * set it to full.
5298 	 */
5299 	if (hw->fc.requested_mode == e1000_fc_default)
5300 		hw->fc.requested_mode = e1000_fc_full;
5301 
5302 	/* Save off the requested flow control mode for use later.  Depending
5303 	 * on the link partner's capabilities, we may or may not use this mode.
5304 	 */
5305 	hw->fc.current_mode = hw->fc.requested_mode;
5306 
5307 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5308 		hw->fc.current_mode);
5309 
5310 	if (!hw->phy.ops.check_reset_block(hw)) {
5311 		/* Continue to configure the copper link. */
5312 		ret_val = hw->mac.ops.setup_physical_interface(hw);
5313 		if (ret_val)
5314 			return ret_val;
5315 	}
5316 
5317 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5318 	if ((hw->phy.type == e1000_phy_82578) ||
5319 	    (hw->phy.type == e1000_phy_82579) ||
5320 	    (hw->phy.type == e1000_phy_i217) ||
5321 	    (hw->phy.type == e1000_phy_82577)) {
5322 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5323 
5324 		ret_val = hw->phy.ops.write_reg(hw,
5325 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
5326 					     hw->fc.pause_time);
5327 		if (ret_val)
5328 			return ret_val;
5329 	}
5330 
5331 	return e1000_set_fc_watermarks_generic(hw);
5332 }
5333 
5334 /**
5335  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5336  *  @hw: pointer to the HW structure
5337  *
5338  *  Configures the kumeran interface to the PHY to wait the appropriate time
5339  *  when polling the PHY, then call the generic setup_copper_link to finish
5340  *  configuring the copper link.
5341  **/
5342 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5343 {
5344 	u32 ctrl;
5345 	s32 ret_val;
5346 	u16 reg_data;
5347 
5348 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5349 
5350 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5351 	ctrl |= E1000_CTRL_SLU;
5352 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5353 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5354 
5355 	/* Set the mac to wait the maximum time between each iteration
5356 	 * and increase the max iterations when polling the phy;
5357 	 * this fixes erroneous timeouts at 10Mbps.
5358 	 */
5359 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5360 					       0xFFFF);
5361 	if (ret_val)
5362 		return ret_val;
5363 	ret_val = e1000_read_kmrn_reg_generic(hw,
5364 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
5365 					      &reg_data);
5366 	if (ret_val)
5367 		return ret_val;
5368 	reg_data |= 0x3F;
5369 	ret_val = e1000_write_kmrn_reg_generic(hw,
5370 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
5371 					       reg_data);
5372 	if (ret_val)
5373 		return ret_val;
5374 
5375 	switch (hw->phy.type) {
5376 	case e1000_phy_igp_3:
5377 		ret_val = e1000_copper_link_setup_igp(hw);
5378 		if (ret_val)
5379 			return ret_val;
5380 		break;
5381 	case e1000_phy_bm:
5382 	case e1000_phy_82578:
5383 		ret_val = e1000_copper_link_setup_m88(hw);
5384 		if (ret_val)
5385 			return ret_val;
5386 		break;
5387 	case e1000_phy_82577:
5388 	case e1000_phy_82579:
5389 		ret_val = e1000_copper_link_setup_82577(hw);
5390 		if (ret_val)
5391 			return ret_val;
5392 		break;
5393 	case e1000_phy_ife:
5394 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5395 					       &reg_data);
5396 		if (ret_val)
5397 			return ret_val;
5398 
5399 		reg_data &= ~IFE_PMC_AUTO_MDIX;
5400 
5401 		switch (hw->phy.mdix) {
5402 		case 1:
5403 			reg_data &= ~IFE_PMC_FORCE_MDIX;
5404 			break;
5405 		case 2:
5406 			reg_data |= IFE_PMC_FORCE_MDIX;
5407 			break;
5408 		case 0:
5409 		default:
5410 			reg_data |= IFE_PMC_AUTO_MDIX;
5411 			break;
5412 		}
5413 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5414 						reg_data);
5415 		if (ret_val)
5416 			return ret_val;
5417 		break;
5418 	default:
5419 		break;
5420 	}
5421 
5422 	return e1000_setup_copper_link_generic(hw);
5423 }
5424 
5425 /**
5426  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5427  *  @hw: pointer to the HW structure
5428  *
5429  *  Calls the PHY specific link setup function and then calls the
5430  *  generic setup_copper_link to finish configuring the link for
5431  *  Lynxpoint PCH devices
5432  **/
5433 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5434 {
5435 	u32 ctrl;
5436 	s32 ret_val;
5437 
5438 	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5439 
5440 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5441 	ctrl |= E1000_CTRL_SLU;
5442 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5443 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5444 
5445 	ret_val = e1000_copper_link_setup_82577(hw);
5446 	if (ret_val)
5447 		return ret_val;
5448 
5449 	return e1000_setup_copper_link_generic(hw);
5450 }
5451 
5452 /**
5453  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5454  *  @hw: pointer to the HW structure
5455  *  @speed: pointer to store current link speed
5456  *  @duplex: pointer to store the current link duplex
5457  *
5458  *  Calls the generic get_speed_and_duplex to retrieve the current link
5459  *  information and then calls the Kumeran lock loss workaround for links at
5460  *  gigabit speeds.
5461  **/
5462 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5463 					  u16 *duplex)
5464 {
5465 	s32 ret_val;
5466 
5467 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5468 
5469 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5470 	if (ret_val)
5471 		return ret_val;
5472 
5473 	if ((hw->mac.type == e1000_ich8lan) &&
5474 	    (hw->phy.type == e1000_phy_igp_3) &&
5475 	    (*speed == SPEED_1000)) {
5476 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5477 	}
5478 
5479 	return ret_val;
5480 }
5481 
5482 /**
5483  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5484  *  @hw: pointer to the HW structure
5485  *
5486  *  Work-around for 82566 Kumeran PCS lock loss:
5487  *  On link status change (i.e. PCI reset, speed change) and link is up and
5488  *  speed is gigabit-
5489  *    0) if workaround is optionally disabled do nothing
5490  *    1) wait 1ms for Kumeran link to come up
5491  *    2) check Kumeran Diagnostic register PCS lock loss bit
5492  *    3) if not set the link is locked (all is good), otherwise...
5493  *    4) reset the PHY
5494  *    5) repeat up to 10 times
5495  *  Note: this is only called for IGP3 copper when speed is 1gb.
5496  **/
5497 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5498 {
5499 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5500 	u32 phy_ctrl;
5501 	s32 ret_val;
5502 	u16 i, data;
5503 	bool link;
5504 
5505 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5506 
5507 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5508 		return E1000_SUCCESS;
5509 
5510 	/* Make sure link is up before proceeding.  If not just return.
5511 	 * Attempting this while link is negotiating fouled up link
5512 	 * stability
5513 	 */
5514 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5515 	if (!link)
5516 		return E1000_SUCCESS;
5517 
5518 	for (i = 0; i < 10; i++) {
5519 		/* read once to clear */
5520 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5521 		if (ret_val)
5522 			return ret_val;
5523 		/* and again to get new status */
5524 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5525 		if (ret_val)
5526 			return ret_val;
5527 
5528 		/* check for PCS lock */
5529 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5530 			return E1000_SUCCESS;
5531 
5532 		/* Issue PHY reset */
5533 		hw->phy.ops.reset(hw);
5534 		msec_delay_irq(5);
5535 	}
5536 	/* Disable GigE link negotiation */
5537 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5538 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5539 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5540 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5541 
5542 	/* Call gig speed drop workaround on Gig disable before accessing
5543 	 * any PHY registers
5544 	 */
5545 	e1000_gig_downshift_workaround_ich8lan(hw);
5546 
5547 	/* unable to acquire PCS lock */
5548 	return -E1000_ERR_PHY;
5549 }
5550 
5551 /**
5552  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5553  *  @hw: pointer to the HW structure
5554  *  @state: boolean value used to set the current Kumeran workaround state
5555  *
5556  *  If ICH8, set the current Kumeran workaround state (enabled - true
5557  *  /disabled - false).
5558  **/
5559 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5560 						 bool state)
5561 {
5562 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5563 
5564 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5565 
5566 	if (hw->mac.type != e1000_ich8lan) {
5567 		DEBUGOUT("Workaround applies to ICH8 only.\n");
5568 		return;
5569 	}
5570 
5571 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
5572 
5573 	return;
5574 }
5575 
5576 /**
5577  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5578  *  @hw: pointer to the HW structure
5579  *
5580  *  Workaround for 82566 power-down on D3 entry:
5581  *    1) disable gigabit link
5582  *    2) write VR power-down enable
5583  *    3) read it back
5584  *  Continue if successful, else issue LCD reset and repeat
5585  **/
5586 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5587 {
5588 	u32 reg;
5589 	u16 data;
5590 	u8  retry = 0;
5591 
5592 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5593 
5594 	if (hw->phy.type != e1000_phy_igp_3)
5595 		return;
5596 
5597 	/* Try the workaround twice (if needed) */
5598 	do {
5599 		/* Disable link */
5600 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5601 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5602 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5603 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5604 
5605 		/* Call gig speed drop workaround on Gig disable before
5606 		 * accessing any PHY registers
5607 		 */
5608 		if (hw->mac.type == e1000_ich8lan)
5609 			e1000_gig_downshift_workaround_ich8lan(hw);
5610 
5611 		/* Write VR power-down enable */
5612 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5613 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5614 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5615 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5616 
5617 		/* Read it back and test */
5618 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5619 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5620 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5621 			break;
5622 
5623 		/* Issue PHY reset and repeat at most one more time */
5624 		reg = E1000_READ_REG(hw, E1000_CTRL);
5625 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5626 		retry++;
5627 	} while (retry);
5628 }
5629 
5630 /**
5631  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5632  *  @hw: pointer to the HW structure
5633  *
5634  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5635  *  LPLU, Gig disable, MDIC PHY reset):
5636  *    1) Set Kumeran Near-end loopback
5637  *    2) Clear Kumeran Near-end loopback
5638  *  Should only be called for ICH8[m] devices with any 1G Phy.
5639  **/
5640 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5641 {
5642 	s32 ret_val;
5643 	u16 reg_data = 0;
5644 
5645 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5646 
5647 	if ((hw->mac.type != e1000_ich8lan) ||
5648 	    (hw->phy.type == e1000_phy_ife))
5649 		return;
5650 
5651 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5652 					      &reg_data);
5653 	if (ret_val)
5654 		return;
5655 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5656 	ret_val = e1000_write_kmrn_reg_generic(hw,
5657 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
5658 					       reg_data);
5659 	if (ret_val)
5660 		return;
5661 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5662 	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5663 				     reg_data);
5664 }
5665 
5666 /**
5667  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5668  *  @hw: pointer to the HW structure
5669  *
5670  *  During S0 to Sx transition, it is possible the link remains at gig
5671  *  instead of negotiating to a lower speed.  Before going to Sx, set
5672  *  'Gig Disable' to force link speed negotiation to a lower speed based on
5673  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5674  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5675  *  needs to be written.
5676  *  Parts that support (and are linked to a partner which support) EEE in
5677  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5678  *  than 10Mbps w/o EEE.
5679  **/
5680 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5681 {
5682 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5683 	u32 phy_ctrl;
5684 	s32 ret_val;
5685 
5686 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5687 
5688 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5689 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5690 
5691 	if (hw->phy.type == e1000_phy_i217) {
5692 		u16 phy_reg, device_id = hw->device_id;
5693 
5694 		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5695 		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5696 		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5697 		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5698 		    (hw->mac.type >= e1000_pch_spt)) {
5699 			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5700 
5701 			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5702 					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5703 		}
5704 
5705 		ret_val = hw->phy.ops.acquire(hw);
5706 		if (ret_val)
5707 			goto out;
5708 
5709 		if (!dev_spec->eee_disable) {
5710 			u16 eee_advert;
5711 
5712 			ret_val =
5713 			    e1000_read_emi_reg_locked(hw,
5714 						      I217_EEE_ADVERTISEMENT,
5715 						      &eee_advert);
5716 			if (ret_val)
5717 				goto release;
5718 
5719 			/* Disable LPLU if both link partners support 100BaseT
5720 			 * EEE and 100Full is advertised on both ends of the
5721 			 * link, and enable Auto Enable LPI since there will
5722 			 * be no driver to enable LPI while in Sx.
5723 			 */
5724 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5725 			    (dev_spec->eee_lp_ability &
5726 			     I82579_EEE_100_SUPPORTED) &&
5727 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5728 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5729 					      E1000_PHY_CTRL_NOND0A_LPLU);
5730 
5731 				/* Set Auto Enable LPI after link up */
5732 				hw->phy.ops.read_reg_locked(hw,
5733 							    I217_LPI_GPIO_CTRL,
5734 							    &phy_reg);
5735 				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5736 				hw->phy.ops.write_reg_locked(hw,
5737 							     I217_LPI_GPIO_CTRL,
5738 							     phy_reg);
5739 			}
5740 		}
5741 
5742 		/* For i217 Intel Rapid Start Technology support,
5743 		 * when the system is going into Sx and no manageability engine
5744 		 * is present, the driver must configure proxy to reset only on
5745 		 * power good.  LPI (Low Power Idle) state must also reset only
5746 		 * on power good, as well as the MTA (Multicast table array).
5747 		 * The SMBus release must also be disabled on LCD reset.
5748 		 */
5749 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5750 		      E1000_ICH_FWSM_FW_VALID)) {
5751 			/* Enable proxy to reset only on power good. */
5752 			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5753 						    &phy_reg);
5754 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5755 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5756 						     phy_reg);
5757 
5758 			/* Set bit enable LPI (EEE) to reset only on
5759 			 * power good.
5760 			*/
5761 			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5762 			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5763 			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5764 
5765 			/* Disable the SMB release on LCD reset. */
5766 			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5767 			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5768 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5769 		}
5770 
5771 		/* Enable MTA to reset for Intel Rapid Start Technology
5772 		 * Support
5773 		 */
5774 		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5775 		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5776 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5777 
5778 release:
5779 		hw->phy.ops.release(hw);
5780 	}
5781 out:
5782 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5783 
5784 	if (hw->mac.type == e1000_ich8lan)
5785 		e1000_gig_downshift_workaround_ich8lan(hw);
5786 
5787 	if (hw->mac.type >= e1000_pchlan) {
5788 		e1000_oem_bits_config_ich8lan(hw, false);
5789 
5790 		/* Reset PHY to activate OEM bits on 82577/8 */
5791 		if (hw->mac.type == e1000_pchlan)
5792 			e1000_phy_hw_reset_generic(hw);
5793 
5794 		ret_val = hw->phy.ops.acquire(hw);
5795 		if (ret_val)
5796 			return;
5797 		e1000_write_smbus_addr(hw);
5798 		hw->phy.ops.release(hw);
5799 	}
5800 
5801 	return;
5802 }
5803 
5804 /**
5805  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5806  *  @hw: pointer to the HW structure
5807  *
5808  *  During Sx to S0 transitions on non-managed devices or managed devices
5809  *  on which PHY resets are not blocked, if the PHY registers cannot be
5810  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5811  *  the PHY.
5812  *  On i217, setup Intel Rapid Start Technology.
5813  **/
5814 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5815 {
5816 	s32 ret_val;
5817 
5818 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
5819 	if (hw->mac.type < e1000_pch2lan)
5820 		return E1000_SUCCESS;
5821 
5822 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
5823 	if (ret_val) {
5824 		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5825 		return ret_val;
5826 	}
5827 
5828 	/* For i217 Intel Rapid Start Technology support when the system
5829 	 * is transitioning from Sx and no manageability engine is present
5830 	 * configure SMBus to restore on reset, disable proxy, and enable
5831 	 * the reset on MTA (Multicast table array).
5832 	 */
5833 	if (hw->phy.type == e1000_phy_i217) {
5834 		u16 phy_reg;
5835 
5836 		ret_val = hw->phy.ops.acquire(hw);
5837 		if (ret_val) {
5838 			DEBUGOUT("Failed to setup iRST\n");
5839 			return ret_val;
5840 		}
5841 
5842 		/* Clear Auto Enable LPI after link up */
5843 		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5844 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5845 		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5846 
5847 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5848 		    E1000_ICH_FWSM_FW_VALID)) {
5849 			/* Restore clear on SMB if no manageability engine
5850 			 * is present
5851 			 */
5852 			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5853 							      &phy_reg);
5854 			if (ret_val)
5855 				goto release;
5856 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5857 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5858 
5859 			/* Disable Proxy */
5860 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5861 		}
5862 		/* Enable reset on MTA */
5863 		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5864 						      &phy_reg);
5865 		if (ret_val)
5866 			goto release;
5867 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5868 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5869 release:
5870 		if (ret_val)
5871 			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5872 		hw->phy.ops.release(hw);
5873 		return ret_val;
5874 	}
5875 	return E1000_SUCCESS;
5876 }
5877 
5878 /**
5879  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5880  *  @hw: pointer to the HW structure
5881  *
5882  *  Return the LED back to the default configuration.
5883  **/
5884 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5885 {
5886 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
5887 
5888 	if (hw->phy.type == e1000_phy_ife)
5889 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5890 					     0);
5891 
5892 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5893 	return E1000_SUCCESS;
5894 }
5895 
5896 /**
5897  *  e1000_led_on_ich8lan - Turn LEDs on
5898  *  @hw: pointer to the HW structure
5899  *
5900  *  Turn on the LEDs.
5901  **/
5902 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5903 {
5904 	DEBUGFUNC("e1000_led_on_ich8lan");
5905 
5906 	if (hw->phy.type == e1000_phy_ife)
5907 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5908 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5909 
5910 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5911 	return E1000_SUCCESS;
5912 }
5913 
5914 /**
5915  *  e1000_led_off_ich8lan - Turn LEDs off
5916  *  @hw: pointer to the HW structure
5917  *
5918  *  Turn off the LEDs.
5919  **/
5920 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5921 {
5922 	DEBUGFUNC("e1000_led_off_ich8lan");
5923 
5924 	if (hw->phy.type == e1000_phy_ife)
5925 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5926 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5927 
5928 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5929 	return E1000_SUCCESS;
5930 }
5931 
5932 /**
5933  *  e1000_setup_led_pchlan - Configures SW controllable LED
5934  *  @hw: pointer to the HW structure
5935  *
5936  *  This prepares the SW controllable LED for use.
5937  **/
5938 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5939 {
5940 	DEBUGFUNC("e1000_setup_led_pchlan");
5941 
5942 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5943 				     (u16)hw->mac.ledctl_mode1);
5944 }
5945 
5946 /**
5947  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5948  *  @hw: pointer to the HW structure
5949  *
5950  *  Return the LED back to the default configuration.
5951  **/
5952 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5953 {
5954 	DEBUGFUNC("e1000_cleanup_led_pchlan");
5955 
5956 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5957 				     (u16)hw->mac.ledctl_default);
5958 }
5959 
5960 /**
5961  *  e1000_led_on_pchlan - Turn LEDs on
5962  *  @hw: pointer to the HW structure
5963  *
5964  *  Turn on the LEDs.
5965  **/
5966 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5967 {
5968 	u16 data = (u16)hw->mac.ledctl_mode2;
5969 	u32 i, led;
5970 
5971 	DEBUGFUNC("e1000_led_on_pchlan");
5972 
5973 	/* If no link, then turn LED on by setting the invert bit
5974 	 * for each LED that's mode is "link_up" in ledctl_mode2.
5975 	 */
5976 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5977 		for (i = 0; i < 3; i++) {
5978 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5979 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5980 			    E1000_LEDCTL_MODE_LINK_UP)
5981 				continue;
5982 			if (led & E1000_PHY_LED0_IVRT)
5983 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5984 			else
5985 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5986 		}
5987 	}
5988 
5989 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5990 }
5991 
5992 /**
5993  *  e1000_led_off_pchlan - Turn LEDs off
5994  *  @hw: pointer to the HW structure
5995  *
5996  *  Turn off the LEDs.
5997  **/
5998 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5999 {
6000 	u16 data = (u16)hw->mac.ledctl_mode1;
6001 	u32 i, led;
6002 
6003 	DEBUGFUNC("e1000_led_off_pchlan");
6004 
6005 	/* If no link, then turn LED off by clearing the invert bit
6006 	 * for each LED that's mode is "link_up" in ledctl_mode1.
6007 	 */
6008 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
6009 		for (i = 0; i < 3; i++) {
6010 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
6011 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
6012 			    E1000_LEDCTL_MODE_LINK_UP)
6013 				continue;
6014 			if (led & E1000_PHY_LED0_IVRT)
6015 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
6016 			else
6017 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
6018 		}
6019 	}
6020 
6021 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
6022 }
6023 
6024 /**
6025  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
6026  *  @hw: pointer to the HW structure
6027  *
6028  *  Read appropriate register for the config done bit for completion status
6029  *  and configure the PHY through s/w for EEPROM-less parts.
6030  *
6031  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
6032  *  config done bit, so only an error is logged and continues.  If we were
6033  *  to return with error, EEPROM-less silicon would not be able to be reset
6034  *  or change link.
6035  **/
6036 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
6037 {
6038 	s32 ret_val = E1000_SUCCESS;
6039 	u32 bank = 0;
6040 	u32 status;
6041 
6042 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
6043 
6044 	e1000_get_cfg_done_generic(hw);
6045 
6046 	/* Wait for indication from h/w that it has completed basic config */
6047 	if (hw->mac.type >= e1000_ich10lan) {
6048 		e1000_lan_init_done_ich8lan(hw);
6049 	} else {
6050 		ret_val = e1000_get_auto_rd_done_generic(hw);
6051 		if (ret_val) {
6052 			/* When auto config read does not complete, do not
6053 			 * return with an error. This can happen in situations
6054 			 * where there is no eeprom and prevents getting link.
6055 			 */
6056 			DEBUGOUT("Auto Read Done did not complete\n");
6057 			ret_val = E1000_SUCCESS;
6058 		}
6059 	}
6060 
6061 	/* Clear PHY Reset Asserted bit */
6062 	status = E1000_READ_REG(hw, E1000_STATUS);
6063 	if (status & E1000_STATUS_PHYRA)
6064 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
6065 	else
6066 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
6067 
6068 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
6069 	if (hw->mac.type <= e1000_ich9lan) {
6070 		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
6071 		    (hw->phy.type == e1000_phy_igp_3)) {
6072 			e1000_phy_init_script_igp3(hw);
6073 		}
6074 	} else {
6075 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
6076 			/* Maybe we should do a basic PHY config */
6077 			DEBUGOUT("EEPROM not present\n");
6078 			ret_val = -E1000_ERR_CONFIG;
6079 		}
6080 	}
6081 
6082 	return ret_val;
6083 }
6084 
6085 /**
6086  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6087  * @hw: pointer to the HW structure
6088  *
6089  * In the case of a PHY power down to save power, or to turn off link during a
6090  * driver unload, or wake on lan is not enabled, remove the link.
6091  **/
6092 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6093 {
6094 	/* If the management interface is not enabled, then power down */
6095 	if (!(hw->mac.ops.check_mng_mode(hw) ||
6096 	      hw->phy.ops.check_reset_block(hw)))
6097 		e1000_power_down_phy_copper(hw);
6098 
6099 	return;
6100 }
6101 
6102 /**
6103  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6104  *  @hw: pointer to the HW structure
6105  *
6106  *  Clears hardware counters specific to the silicon family and calls
6107  *  clear_hw_cntrs_generic to clear all general purpose counters.
6108  **/
6109 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6110 {
6111 	u16 phy_data;
6112 	s32 ret_val;
6113 
6114 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6115 
6116 	e1000_clear_hw_cntrs_base_generic(hw);
6117 
6118 	E1000_READ_REG(hw, E1000_ALGNERRC);
6119 	E1000_READ_REG(hw, E1000_RXERRC);
6120 	E1000_READ_REG(hw, E1000_TNCRS);
6121 	E1000_READ_REG(hw, E1000_CEXTERR);
6122 	E1000_READ_REG(hw, E1000_TSCTC);
6123 	E1000_READ_REG(hw, E1000_TSCTFC);
6124 
6125 	E1000_READ_REG(hw, E1000_MGTPRC);
6126 	E1000_READ_REG(hw, E1000_MGTPDC);
6127 	E1000_READ_REG(hw, E1000_MGTPTC);
6128 
6129 	E1000_READ_REG(hw, E1000_IAC);
6130 	E1000_READ_REG(hw, E1000_ICRXOC);
6131 
6132 	/* Clear PHY statistics registers */
6133 	if ((hw->phy.type == e1000_phy_82578) ||
6134 	    (hw->phy.type == e1000_phy_82579) ||
6135 	    (hw->phy.type == e1000_phy_i217) ||
6136 	    (hw->phy.type == e1000_phy_82577)) {
6137 		ret_val = hw->phy.ops.acquire(hw);
6138 		if (ret_val)
6139 			return;
6140 		ret_val = hw->phy.ops.set_page(hw,
6141 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
6142 		if (ret_val)
6143 			goto release;
6144 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6145 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6146 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6147 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6148 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6149 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6150 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6151 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6152 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6153 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6154 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6155 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6156 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6157 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6158 release:
6159 		hw->phy.ops.release(hw);
6160 	}
6161 }
6162 
6163 /**
6164  *  e1000_configure_k0s_lpt - Configure K0s power state
6165  *  @hw: pointer to the HW structure
6166  *  @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
6167  *	0 corresponds to 128ns, each value over 0 doubles the duration.
6168  *  @min_time: Minimum Tx idle period allowed  - valid values are 0 to 4.
6169  *	0 corresponds to 128ns, each value over 0 doubles the duration.
6170  *
6171  *  Configure the K1 power state based on the provided parameter.
6172  *  Assumes semaphore already acquired.
6173  *
6174  *  Success returns 0, Failure returns:
6175  *	-E1000_ERR_PHY (-2) in case of access error
6176  *	-E1000_ERR_PARAM (-4) in case of parameters error
6177  **/
6178 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
6179 {
6180 	s32 ret_val;
6181 	u16 kmrn_reg = 0;
6182 
6183 	DEBUGFUNC("e1000_configure_k0s_lpt");
6184 
6185 	if (entry_latency > 3 || min_time > 4)
6186 		return -E1000_ERR_PARAM;
6187 
6188 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6189 					     &kmrn_reg);
6190 	if (ret_val)
6191 		return ret_val;
6192 
6193 	/* for now don't touch the latency */
6194 	kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
6195 	kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
6196 
6197 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6198 					      kmrn_reg);
6199 	if (ret_val)
6200 		return ret_val;
6201 
6202 	return E1000_SUCCESS;
6203 }
6204