xref: /illumos-gate/usr/src/uts/common/io/e1000api/e1000_ich8lan.c (revision b89e420ae1290e425c29db875ec0c0546006eec7)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2013, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 /* 82562G 10/100 Network Connection
36  * 82562G-2 10/100 Network Connection
37  * 82562GT 10/100 Network Connection
38  * 82562GT-2 10/100 Network Connection
39  * 82562V 10/100 Network Connection
40  * 82562V-2 10/100 Network Connection
41  * 82566DC-2 Gigabit Network Connection
42  * 82566DC Gigabit Network Connection
43  * 82566DM-2 Gigabit Network Connection
44  * 82566DM Gigabit Network Connection
45  * 82566MC Gigabit Network Connection
46  * 82566MM Gigabit Network Connection
47  * 82567LM Gigabit Network Connection
48  * 82567LF Gigabit Network Connection
49  * 82567V Gigabit Network Connection
50  * 82567LM-2 Gigabit Network Connection
51  * 82567LF-2 Gigabit Network Connection
52  * 82567V-2 Gigabit Network Connection
53  * 82567LF-3 Gigabit Network Connection
54  * 82567LM-3 Gigabit Network Connection
55  * 82567LM-4 Gigabit Network Connection
56  * 82577LM Gigabit Network Connection
57  * 82577LC Gigabit Network Connection
58  * 82578DM Gigabit Network Connection
59  * 82578DC Gigabit Network Connection
60  * 82579LM Gigabit Network Connection
61  * 82579V Gigabit Network Connection
62  */
63 
64 #include "e1000_api.h"
65 
66 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
67 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
68 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
69 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
70 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
71 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
72 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
73 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
74 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
75 					      u8 *mc_addr_list,
76 					      u32 mc_addr_count);
77 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
78 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
79 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
80 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
81 					    bool active);
82 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
83 					    bool active);
84 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
85 				   u16 words, u16 *data);
86 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
87 				    u16 words, u16 *data);
88 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
89 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
90 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
91 					    u16 *data);
92 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
93 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
94 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
95 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
96 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
97 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
98 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
99 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
100 					   u16 *speed, u16 *duplex);
101 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
102 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
103 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
104 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
105 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
106 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
107 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
108 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
109 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
111 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
112 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
113 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
114 					  u32 offset, u8 *data);
115 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
116 					  u8 size, u16 *data);
117 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
118 					  u32 offset, u16 *data);
119 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
120 						 u32 offset, u8 byte);
121 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
122 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
123 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
124 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
125 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
126 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
127 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
128 
129 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
130 /* Offset 04h HSFSTS */
131 union ich8_hws_flash_status {
132 	struct ich8_hsfsts {
133 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
134 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
135 		u16 dael:1; /* bit 2 Direct Access error Log */
136 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
137 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
138 		u16 reserved1:2; /* bit 13:6 Reserved */
139 		u16 reserved2:6; /* bit 13:6 Reserved */
140 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
141 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
142 	} hsf_status;
143 	u16 regval;
144 };
145 
146 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
147 /* Offset 06h FLCTL */
148 union ich8_hws_flash_ctrl {
149 	struct ich8_hsflctl {
150 		u16 flcgo:1;   /* 0 Flash Cycle Go */
151 		u16 flcycle:2;   /* 2:1 Flash Cycle */
152 		u16 reserved:5;   /* 7:3 Reserved  */
153 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
154 		u16 flockdn:6;   /* 15:10 Reserved */
155 	} hsf_ctrl;
156 	u16 regval;
157 };
158 
159 /* ICH Flash Region Access Permissions */
160 union ich8_hws_flash_regacc {
161 	struct ich8_flracc {
162 		u32 grra:8; /* 0:7 GbE region Read Access */
163 		u32 grwa:8; /* 8:15 GbE region Write Access */
164 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
165 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
166 	} hsf_flregacc;
167 	u16 regval;
168 };
169 
170 /**
171  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
172  *  @hw: pointer to the HW structure
173  *
174  *  Test access to the PHY registers by reading the PHY ID registers.  If
175  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
176  *  otherwise assume the read PHY ID is correct if it is valid.
177  *
178  *  Assumes the sw/fw/hw semaphore is already acquired.
179  **/
180 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
181 {
182 	u16 phy_reg = 0;
183 	u32 phy_id = 0;
184 	s32 ret_val;
185 	u16 retry_count;
186 
187 	for (retry_count = 0; retry_count < 2; retry_count++) {
188 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
189 		if (ret_val || (phy_reg == 0xFFFF))
190 			continue;
191 		phy_id = (u32)(phy_reg << 16);
192 
193 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
194 		if (ret_val || (phy_reg == 0xFFFF)) {
195 			phy_id = 0;
196 			continue;
197 		}
198 		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
199 		break;
200 	}
201 
202 	if (hw->phy.id) {
203 		if  (hw->phy.id == phy_id)
204 			return TRUE;
205 	} else if (phy_id) {
206 		hw->phy.id = phy_id;
207 		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
208 		return TRUE;
209 	}
210 
211 	/* In case the PHY needs to be in mdio slow mode,
212 	 * set slow mode and try to get the PHY id again.
213 	 */
214 	hw->phy.ops.release(hw);
215 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
216 	if (!ret_val)
217 		ret_val = e1000_get_phy_id(hw);
218 	hw->phy.ops.acquire(hw);
219 
220 	return !ret_val;
221 }
222 
223 /**
224  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
225  *  @hw: pointer to the HW structure
226  *
227  *  Workarounds/flow necessary for PHY initialization during driver load
228  *  and resume paths.
229  **/
230 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
231 {
232 	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
233 	s32 ret_val;
234 	u16 phy_reg;
235 
236 	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
237 
238 	/* Gate automatic PHY configuration by hardware on managed and
239 	 * non-managed 82579 and newer adapters.
240 	 */
241 	e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
242 
243 	ret_val = hw->phy.ops.acquire(hw);
244 	if (ret_val) {
245 		DEBUGOUT("Failed to initialize PHY flow\n");
246 		goto out;
247 	}
248 
249 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
250 	 * inaccessible and resetting the PHY is not blocked, toggle the
251 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
252 	 */
253 	switch (hw->mac.type) {
254 	case e1000_pch_lpt:
255 		if (e1000_phy_is_accessible_pchlan(hw))
256 			break;
257 
258 		/* Before toggling LANPHYPC, see if PHY is accessible by
259 		 * forcing MAC to SMBus mode first.
260 		 */
261 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
262 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
263 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
264 
265 		/* fall-through */
266 	case e1000_pch2lan:
267 		if (e1000_phy_is_accessible_pchlan(hw)) {
268 			if (hw->mac.type == e1000_pch_lpt) {
269 				/* Unforce SMBus mode in PHY */
270 				hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL,
271 							    &phy_reg);
272 				phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
273 				hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL,
274 							     phy_reg);
275 
276 				/* Unforce SMBus mode in MAC */
277 				mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
278 				mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
279 				E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
280 			}
281 			break;
282 		}
283 
284 		/* fall-through */
285 	case e1000_pchlan:
286 		if ((hw->mac.type == e1000_pchlan) &&
287 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
288 			break;
289 
290 		if (hw->phy.ops.check_reset_block(hw)) {
291 			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
292 			break;
293 		}
294 
295 		DEBUGOUT("Toggling LANPHYPC\n");
296 
297 		/* Set Phy Config Counter to 50msec */
298 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
299 		mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
300 		mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
301 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
302 
303 		if (hw->mac.type == e1000_pch_lpt) {
304 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
305 			 * So ensure that the MAC is also out of SMBus mode
306 			 */
307 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
308 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
309 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
310 		}
311 
312 		/* Toggle LANPHYPC Value bit */
313 		mac_reg = E1000_READ_REG(hw, E1000_CTRL);
314 		mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
315 		mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
316 		E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
317 		E1000_WRITE_FLUSH(hw);
318 		usec_delay(10);
319 		mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
320 		E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
321 		E1000_WRITE_FLUSH(hw);
322 		if (hw->mac.type < e1000_pch_lpt) {
323 			msec_delay(50);
324 		} else {
325 			u16 count = 20;
326 			do {
327 				msec_delay(5);
328 			} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
329 				   E1000_CTRL_EXT_LPCD) && count--);
330 		}
331 		break;
332 	default:
333 		break;
334 	}
335 
336 	hw->phy.ops.release(hw);
337 
338 	/* Reset the PHY before any access to it.  Doing so, ensures
339 	 * that the PHY is in a known good state before we read/write
340 	 * PHY registers.  The generic reset is sufficient here,
341 	 * because we haven't determined the PHY type yet.
342 	 */
343 	ret_val = e1000_phy_hw_reset_generic(hw);
344 
345 out:
346 	/* Ungate automatic PHY configuration on non-managed 82579 */
347 	if ((hw->mac.type == e1000_pch2lan) &&
348 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
349 		msec_delay(10);
350 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
351 	}
352 
353 	return ret_val;
354 }
355 
356 /**
357  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
358  *  @hw: pointer to the HW structure
359  *
360  *  Initialize family-specific PHY parameters and function pointers.
361  **/
362 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
363 {
364 	struct e1000_phy_info *phy = &hw->phy;
365 	s32 ret_val;
366 
367 	DEBUGFUNC("e1000_init_phy_params_pchlan");
368 
369 	phy->addr		= 1;
370 	phy->reset_delay_us	= 100;
371 
372 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
373 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
374 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
375 	phy->ops.set_page	= e1000_set_page_igp;
376 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
377 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
378 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
379 	phy->ops.release	= e1000_release_swflag_ich8lan;
380 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
381 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
382 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
383 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
384 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
385 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
386 	phy->ops.power_up	= e1000_power_up_phy_copper;
387 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
388 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
389 
390 	phy->id = e1000_phy_unknown;
391 
392 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
393 	if (ret_val)
394 		return ret_val;
395 
396 	if (phy->id == e1000_phy_unknown)
397 		switch (hw->mac.type) {
398 		default:
399 			ret_val = e1000_get_phy_id(hw);
400 			if (ret_val)
401 				return ret_val;
402 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
403 				break;
404 			/* fall-through */
405 		case e1000_pch2lan:
406 		case e1000_pch_lpt:
407 			/* In case the PHY needs to be in mdio slow mode,
408 			 * set slow mode and try to get the PHY id again.
409 			 */
410 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
411 			if (ret_val)
412 				return ret_val;
413 			ret_val = e1000_get_phy_id(hw);
414 			if (ret_val)
415 				return ret_val;
416 			break;
417 		}
418 	phy->type = e1000_get_phy_type_from_id(phy->id);
419 
420 	switch (phy->type) {
421 	case e1000_phy_82577:
422 	case e1000_phy_82579:
423 	case e1000_phy_i217:
424 		phy->ops.check_polarity = e1000_check_polarity_82577;
425 		phy->ops.force_speed_duplex =
426 			e1000_phy_force_speed_duplex_82577;
427 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
428 		phy->ops.get_info = e1000_get_phy_info_82577;
429 		phy->ops.commit = e1000_phy_sw_reset_generic;
430 		break;
431 	case e1000_phy_82578:
432 		phy->ops.check_polarity = e1000_check_polarity_m88;
433 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
434 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
435 		phy->ops.get_info = e1000_get_phy_info_m88;
436 		break;
437 	default:
438 		ret_val = -E1000_ERR_PHY;
439 		break;
440 	}
441 
442 	return ret_val;
443 }
444 
445 /**
446  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
447  *  @hw: pointer to the HW structure
448  *
449  *  Initialize family-specific PHY parameters and function pointers.
450  **/
451 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
452 {
453 	struct e1000_phy_info *phy = &hw->phy;
454 	s32 ret_val;
455 	u16 i = 0;
456 
457 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
458 
459 	phy->addr		= 1;
460 	phy->reset_delay_us	= 100;
461 
462 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
463 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
464 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
465 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
466 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
467 	phy->ops.release	= e1000_release_swflag_ich8lan;
468 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
469 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
470 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
471 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
472 	phy->ops.power_up	= e1000_power_up_phy_copper;
473 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
474 
475 	/* We may need to do this twice - once for IGP and if that fails,
476 	 * we'll set BM func pointers and try again
477 	 */
478 	ret_val = e1000_determine_phy_address(hw);
479 	if (ret_val) {
480 		phy->ops.write_reg = e1000_write_phy_reg_bm;
481 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
482 		ret_val = e1000_determine_phy_address(hw);
483 		if (ret_val) {
484 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
485 			return ret_val;
486 		}
487 	}
488 
489 	phy->id = 0;
490 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
491 	       (i++ < 100)) {
492 		msec_delay(1);
493 		ret_val = e1000_get_phy_id(hw);
494 		if (ret_val)
495 			return ret_val;
496 	}
497 
498 	/* Verify phy id */
499 	switch (phy->id) {
500 	case IGP03E1000_E_PHY_ID:
501 		phy->type = e1000_phy_igp_3;
502 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
503 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
504 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
505 		phy->ops.get_info = e1000_get_phy_info_igp;
506 		phy->ops.check_polarity = e1000_check_polarity_igp;
507 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
508 		break;
509 	case IFE_E_PHY_ID:
510 	case IFE_PLUS_E_PHY_ID:
511 	case IFE_C_E_PHY_ID:
512 		phy->type = e1000_phy_ife;
513 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
514 		phy->ops.get_info = e1000_get_phy_info_ife;
515 		phy->ops.check_polarity = e1000_check_polarity_ife;
516 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
517 		break;
518 	case BME1000_E_PHY_ID:
519 		phy->type = e1000_phy_bm;
520 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
521 		phy->ops.read_reg = e1000_read_phy_reg_bm;
522 		phy->ops.write_reg = e1000_write_phy_reg_bm;
523 		phy->ops.commit = e1000_phy_sw_reset_generic;
524 		phy->ops.get_info = e1000_get_phy_info_m88;
525 		phy->ops.check_polarity = e1000_check_polarity_m88;
526 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
527 		break;
528 	default:
529 		return -E1000_ERR_PHY;
530 		break;
531 	}
532 
533 	return E1000_SUCCESS;
534 }
535 
536 /**
537  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
538  *  @hw: pointer to the HW structure
539  *
540  *  Initialize family-specific NVM parameters and function
541  *  pointers.
542  **/
543 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
544 {
545 	struct e1000_nvm_info *nvm = &hw->nvm;
546 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
547 	u32 gfpreg, sector_base_addr, sector_end_addr;
548 	u16 i;
549 
550 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
551 
552 	/* Can't read flash registers if the register set isn't mapped. */
553 	if (!hw->flash_address) {
554 		DEBUGOUT("ERROR: Flash registers not mapped\n");
555 		return -E1000_ERR_CONFIG;
556 	}
557 
558 	nvm->type = e1000_nvm_flash_sw;
559 
560 	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
561 
562 	/* sector_X_addr is a "sector"-aligned address (4096 bytes)
563 	 * Add 1 to sector_end_addr since this sector is included in
564 	 * the overall size.
565 	 */
566 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
567 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
568 
569 	/* flash_base_addr is byte-aligned */
570 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
571 
572 	/* find total size of the NVM, then cut in half since the total
573 	 * size represents two separate NVM banks.
574 	 */
575 	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
576 				<< FLASH_SECTOR_ADDR_SHIFT;
577 	nvm->flash_bank_size /= 2;
578 	/* Adjust to word count */
579 	nvm->flash_bank_size /= sizeof(u16);
580 
581 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
582 
583 	/* Clear shadow ram */
584 	for (i = 0; i < nvm->word_size; i++) {
585 		dev_spec->shadow_ram[i].modified = FALSE;
586 		dev_spec->shadow_ram[i].value    = 0xFFFF;
587 	}
588 
589 	E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
590 	E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
591 
592 	/* Function Pointers */
593 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
594 	nvm->ops.release	= e1000_release_nvm_ich8lan;
595 	nvm->ops.read		= e1000_read_nvm_ich8lan;
596 	nvm->ops.update		= e1000_update_nvm_checksum_ich8lan;
597 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
598 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
599 	nvm->ops.write		= e1000_write_nvm_ich8lan;
600 
601 	return E1000_SUCCESS;
602 }
603 
604 /**
605  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
606  *  @hw: pointer to the HW structure
607  *
608  *  Initialize family-specific MAC parameters and function
609  *  pointers.
610  **/
611 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
612 {
613 	struct e1000_mac_info *mac = &hw->mac;
614 
615 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
616 
617 	/* Set media type function pointer */
618 	hw->phy.media_type = e1000_media_type_copper;
619 
620 	/* Set mta register count */
621 	mac->mta_reg_count = 32;
622 	/* Set rar entry count */
623 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
624 	if (mac->type == e1000_ich8lan)
625 		mac->rar_entry_count--;
626 	/* Set if part includes ASF firmware */
627 	mac->asf_firmware_present = TRUE;
628 	/* FWSM register */
629 	mac->has_fwsm = TRUE;
630 	/* ARC subsystem not supported */
631 	mac->arc_subsystem_valid = FALSE;
632 	/* Adaptive IFS supported */
633 	mac->adaptive_ifs = TRUE;
634 
635 	/* Function pointers */
636 
637 	/* bus type/speed/width */
638 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
639 	/* function id */
640 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
641 	/* reset */
642 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
643 	/* hw initialization */
644 	mac->ops.init_hw = e1000_init_hw_ich8lan;
645 	/* link setup */
646 	mac->ops.setup_link = e1000_setup_link_ich8lan;
647 	/* physical interface setup */
648 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
649 	/* check for link */
650 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
651 	/* link info */
652 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
653 	/* multicast address update */
654 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
655 	/* clear hardware counters */
656 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
657 
658 	/* LED and other operations */
659 	switch (mac->type) {
660 	case e1000_ich8lan:
661 	case e1000_ich9lan:
662 	case e1000_ich10lan:
663 		/* check management mode */
664 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
665 		/* ID LED init */
666 		mac->ops.id_led_init = e1000_id_led_init_generic;
667 		/* blink LED */
668 		mac->ops.blink_led = e1000_blink_led_generic;
669 		/* setup LED */
670 		mac->ops.setup_led = e1000_setup_led_generic;
671 		/* cleanup LED */
672 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
673 		/* turn on/off LED */
674 		mac->ops.led_on = e1000_led_on_ich8lan;
675 		mac->ops.led_off = e1000_led_off_ich8lan;
676 		break;
677 	case e1000_pch2lan:
678 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
679 		mac->ops.rar_set = e1000_rar_set_pch2lan;
680 		/* fall-through */
681 	case e1000_pch_lpt:
682 		/* multicast address update for pch2 */
683 		mac->ops.update_mc_addr_list =
684 			e1000_update_mc_addr_list_pch2lan;
685 	case e1000_pchlan:
686 		/* check management mode */
687 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
688 		/* ID LED init */
689 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
690 		/* setup LED */
691 		mac->ops.setup_led = e1000_setup_led_pchlan;
692 		/* cleanup LED */
693 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
694 		/* turn on/off LED */
695 		mac->ops.led_on = e1000_led_on_pchlan;
696 		mac->ops.led_off = e1000_led_off_pchlan;
697 		break;
698 	default:
699 		break;
700 	}
701 
702 	if (mac->type == e1000_pch_lpt) {
703 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
704 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
705 		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
706 		mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
707 	}
708 
709 	/* Enable PCS Lock-loss workaround for ICH8 */
710 	if (mac->type == e1000_ich8lan)
711 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
712 
713 	return E1000_SUCCESS;
714 }
715 
716 /**
717  *  __e1000_access_emi_reg_locked - Read/write EMI register
718  *  @hw: pointer to the HW structure
719  *  @addr: EMI address to program
720  *  @data: pointer to value to read/write from/to the EMI address
721  *  @read: boolean flag to indicate read or write
722  *
723  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
724  **/
725 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
726 					 u16 *data, bool read)
727 {
728 	s32 ret_val;
729 
730 	DEBUGFUNC("__e1000_access_emi_reg_locked");
731 
732 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
733 	if (ret_val)
734 		return ret_val;
735 
736 	if (read)
737 		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
738 						      data);
739 	else
740 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
741 						       *data);
742 
743 	return ret_val;
744 }
745 
746 /**
747  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
748  *  @hw: pointer to the HW structure
749  *  @addr: EMI address to program
750  *  @data: value to be read from the EMI address
751  *
752  *  Assumes the SW/FW/HW Semaphore is already acquired.
753  **/
754 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
755 {
756 	DEBUGFUNC("e1000_read_emi_reg_locked");
757 
758 	return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
759 }
760 
761 /**
762  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
763  *  @hw: pointer to the HW structure
764  *  @addr: EMI address to program
765  *  @data: value to be written to the EMI address
766  *
767  *  Assumes the SW/FW/HW Semaphore is already acquired.
768  **/
769 static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
770 {
771 	DEBUGFUNC("e1000_read_emi_reg_locked");
772 
773 	return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
774 }
775 
776 /**
777  *  e1000_set_eee_pchlan - Enable/disable EEE support
778  *  @hw: pointer to the HW structure
779  *
780  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
781  *  the link and the EEE capabilities of the link partner.  The LPI Control
782  *  register bits will remain set only if/when link is up.
783  **/
784 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
785 {
786 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
787 	s32 ret_val;
788 	u16 lpi_ctrl;
789 
790 	DEBUGFUNC("e1000_set_eee_pchlan");
791 
792 	if ((hw->phy.type != e1000_phy_82579) &&
793 	    (hw->phy.type != e1000_phy_i217))
794 		return E1000_SUCCESS;
795 
796 	ret_val = hw->phy.ops.acquire(hw);
797 	if (ret_val)
798 		return ret_val;
799 
800 	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
801 	if (ret_val)
802 		goto release;
803 
804 	/* Clear bits that enable EEE in various speeds */
805 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
806 
807 	/* Enable EEE if not disabled by user */
808 	if (!dev_spec->eee_disable) {
809 		u16 lpa, pcs_status, data;
810 
811 		/* Save off link partner's EEE ability */
812 		switch (hw->phy.type) {
813 		case e1000_phy_82579:
814 			lpa = I82579_EEE_LP_ABILITY;
815 			pcs_status = I82579_EEE_PCS_STATUS;
816 			break;
817 		case e1000_phy_i217:
818 			lpa = I217_EEE_LP_ABILITY;
819 			pcs_status = I217_EEE_PCS_STATUS;
820 			break;
821 		default:
822 			ret_val = -E1000_ERR_PHY;
823 			goto release;
824 		}
825 		ret_val = e1000_read_emi_reg_locked(hw, lpa,
826 						    &dev_spec->eee_lp_ability);
827 		if (ret_val)
828 			goto release;
829 
830 		/* Enable EEE only for speeds in which the link partner is
831 		 * EEE capable.
832 		 */
833 		if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
834 			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
835 
836 		if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
837 			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
838 			if (data & NWAY_LPAR_100TX_FD_CAPS)
839 				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
840 			else
841 				/* EEE is not supported in 100Half, so ignore
842 				 * partner's EEE in 100 ability if full-duplex
843 				 * is not advertised.
844 				 */
845 				dev_spec->eee_lp_ability &=
846 				    ~I82579_EEE_100_SUPPORTED;
847 		}
848 
849 		/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
850 		ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
851 		if (ret_val)
852 			goto release;
853 	}
854 
855 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
856 release:
857 	hw->phy.ops.release(hw);
858 
859 	return ret_val;
860 }
861 
862 /**
863  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
864  *  @hw:   pointer to the HW structure
865  *  @link: link up bool flag
866  *
867  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
868  *  preventing further DMA write requests.  Workaround the issue by disabling
869  *  the de-assertion of the clock request when in 1Gpbs mode.
870  **/
871 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
872 {
873 	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
874 	s32 ret_val = E1000_SUCCESS;
875 
876 	if (link && (E1000_READ_REG(hw, E1000_STATUS) &
877 		     E1000_STATUS_SPEED_1000)) {
878 		u16 kmrn_reg;
879 
880 		ret_val = hw->phy.ops.acquire(hw);
881 		if (ret_val)
882 			return ret_val;
883 
884 		ret_val =
885 		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
886 					       &kmrn_reg);
887 		if (ret_val)
888 			goto release;
889 
890 		ret_val =
891 		    e1000_write_kmrn_reg_locked(hw,
892 						E1000_KMRNCTRLSTA_K1_CONFIG,
893 						kmrn_reg &
894 						~E1000_KMRNCTRLSTA_K1_ENABLE);
895 		if (ret_val)
896 			goto release;
897 
898 		usec_delay(10);
899 
900 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
901 				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
902 
903 		ret_val =
904 		    e1000_write_kmrn_reg_locked(hw,
905 						E1000_KMRNCTRLSTA_K1_CONFIG,
906 						kmrn_reg);
907 release:
908 		hw->phy.ops.release(hw);
909 	} else {
910 		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
911 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
912 				fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
913 	}
914 
915 	return ret_val;
916 }
917 
918 static u64 e1000_ltr2ns(u16 ltr)
919 {
920 	u32 value, scale;
921 
922 	/* Determine the latency in nsec based on the LTR value & scale */
923 	value = ltr & E1000_LTRV_VALUE_MASK;
924 	scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
925 
926 	return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
927 }
928 
929 /**
930  *  e1000_platform_pm_pch_lpt - Set platform power management values
931  *  @hw: pointer to the HW structure
932  *  @link: bool indicating link status
933  *
934  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
935  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
936  *  when link is up (which must not exceed the maximum latency supported
937  *  by the platform), otherwise specify there is no LTR requirement.
938  *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
939  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
940  *  Capability register set, on this device LTR is set by writing the
941  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
942  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
943  *  message to the PMC.
944  *
945  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
946  *  high-water mark.
947  **/
948 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
949 {
950 	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
951 		  link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
952 	u16 lat_enc = 0;	/* latency encoded */
953 	s32 obff_hwm = 0;
954 
955 	DEBUGFUNC("e1000_platform_pm_pch_lpt");
956 
957 	if (link) {
958 		u16 speed, duplex, scale = 0;
959 		u16 max_snoop, max_nosnoop;
960 		u16 max_ltr_enc;	/* max LTR latency encoded */
961 		s64 lat_ns;		/* latency (ns) */
962 		s64 value;
963 		u32 rxa;
964 
965 		if (!hw->mac.max_frame_size) {
966 			DEBUGOUT("max_frame_size not set.\n");
967 			return -E1000_ERR_CONFIG;
968 		}
969 
970 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
971 		if (!speed) {
972 			DEBUGOUT("Speed not set.\n");
973 			return -E1000_ERR_CONFIG;
974 		}
975 
976 		/* Rx Packet Buffer Allocation size (KB) */
977 		rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
978 
979 		/* Determine the maximum latency tolerated by the device.
980 		 *
981 		 * Per the PCIe spec, the tolerated latencies are encoded as
982 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
983 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
984 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
985 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
986 		 */
987 		lat_ns = ((s64)rxa * 1024 -
988 			  (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
989 		if (lat_ns < 0)
990 			lat_ns = 0;
991 		else
992 			lat_ns /= speed;
993 
994 		value = lat_ns;
995 		while (value > E1000_LTRV_VALUE_MASK) {
996 			scale++;
997 			value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
998 		}
999 		if (scale > E1000_LTRV_SCALE_MAX) {
1000 			DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1001 			return -E1000_ERR_CONFIG;
1002 		}
1003 		lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1004 
1005 		/* Determine the maximum latency tolerated by the platform */
1006 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1007 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1008 		max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1009 
1010 		if (lat_enc > max_ltr_enc) {
1011 			lat_enc = max_ltr_enc;
1012 			lat_ns = e1000_ltr2ns(max_ltr_enc);
1013 		}
1014 
1015 		if (lat_ns) {
1016 			lat_ns *= speed * 1000;
1017 			lat_ns /= 8;
1018 			lat_ns /= 1000000000;
1019 			obff_hwm = (s32)(rxa - lat_ns);
1020 		}
1021 
1022 		if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1023 			DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1024 			return -E1000_ERR_CONFIG;
1025 		}
1026 	}
1027 
1028 	/* Set Snoop and No-Snoop latencies the same */
1029 	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1030 	E1000_WRITE_REG(hw, E1000_LTRV, reg);
1031 
1032 	/* Set OBFF high water mark */
1033 	reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1034 	reg |= obff_hwm;
1035 	E1000_WRITE_REG(hw, E1000_SVT, reg);
1036 
1037 	/* Enable OBFF */
1038 	reg = E1000_READ_REG(hw, E1000_SVCR);
1039 	reg |= E1000_SVCR_OFF_EN;
1040 	/* Always unblock interrupts to the CPU even when the system is
1041 	 * in OBFF mode. This ensures that small round-robin traffic
1042 	 * (like ping) does not get dropped or experience long latency.
1043 	 */
1044 	reg |= E1000_SVCR_OFF_MASKINT;
1045 	E1000_WRITE_REG(hw, E1000_SVCR, reg);
1046 
1047 	return E1000_SUCCESS;
1048 }
1049 
1050 /**
1051  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1052  *  @hw: pointer to the HW structure
1053  *  @itr: interrupt throttling rate
1054  *
1055  *  Configure OBFF with the updated interrupt rate.
1056  **/
1057 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1058 {
1059 	u32 svcr;
1060 	s32 timer;
1061 
1062 	DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1063 
1064 	/* Convert ITR value into microseconds for OBFF timer */
1065 	timer = itr & E1000_ITR_MASK;
1066 	timer = (timer * E1000_ITR_MULT) / 1000;
1067 
1068 	if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1069 		DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1070 		return -E1000_ERR_CONFIG;
1071 	}
1072 
1073 	svcr = E1000_READ_REG(hw, E1000_SVCR);
1074 	svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1075 	svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1076 	E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1077 
1078 	return E1000_SUCCESS;
1079 }
1080 
1081 /**
1082  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1083  *  @hw: pointer to the HW structure
1084  *
1085  *  Checks to see of the link status of the hardware has changed.  If a
1086  *  change in link status has been detected, then we read the PHY registers
1087  *  to get the current speed/duplex if link exists.
1088  **/
1089 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1090 {
1091 	struct e1000_mac_info *mac = &hw->mac;
1092 	s32 ret_val;
1093 	bool link;
1094 	u16 phy_reg;
1095 
1096 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1097 
1098 	/* We only want to go out to the PHY registers to see if Auto-Neg
1099 	 * has completed and/or if our link status has changed.  The
1100 	 * get_link_status flag is set upon receiving a Link Status
1101 	 * Change or Rx Sequence Error interrupt.
1102 	 */
1103 	if (!mac->get_link_status)
1104 		return E1000_SUCCESS;
1105 
1106 	/* First we want to see if the MII Status Register reports
1107 	 * link.  If so, then we want to get the current speed/duplex
1108 	 * of the PHY.
1109 	 */
1110 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1111 	if (ret_val)
1112 		return ret_val;
1113 
1114 	if (hw->mac.type == e1000_pchlan) {
1115 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1116 		if (ret_val)
1117 			return ret_val;
1118 	}
1119 
1120 	/* When connected at 10Mbps half-duplex, 82579 parts are excessively
1121 	 * aggressive resulting in many collisions. To avoid this, increase
1122 	 * the IPG and reduce Rx latency in the PHY.
1123 	 */
1124 	if ((hw->mac.type == e1000_pch2lan) && link) {
1125 		u32 reg;
1126 		reg = E1000_READ_REG(hw, E1000_STATUS);
1127 		if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1128 			reg = E1000_READ_REG(hw, E1000_TIPG);
1129 			reg &= ~E1000_TIPG_IPGT_MASK;
1130 			reg |= 0xFF;
1131 			E1000_WRITE_REG(hw, E1000_TIPG, reg);
1132 
1133 			/* Reduce Rx latency in analog PHY */
1134 			ret_val = hw->phy.ops.acquire(hw);
1135 			if (ret_val)
1136 				return ret_val;
1137 
1138 			ret_val = e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
1139 
1140 			hw->phy.ops.release(hw);
1141 
1142 			if (ret_val)
1143 				return ret_val;
1144 		}
1145 	}
1146 
1147 	/* Work-around I218 hang issue */
1148 	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1149 	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
1150 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1151 		if (ret_val)
1152 			return ret_val;
1153 	}
1154 
1155 	if (hw->mac.type == e1000_pch_lpt) {
1156 		/* Set platform power management values for Latency Tolerance
1157 		 * Reporting (LTR) and Optimized Buffer Flush/Fill (OBFF).
1158 		 */
1159 		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1160 		if (ret_val)
1161 			return ret_val;
1162 	}
1163 
1164 	/* Clear link partner's EEE ability */
1165 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1166 
1167 	if (!link)
1168 		return E1000_SUCCESS; /* No link detected */
1169 
1170 	mac->get_link_status = FALSE;
1171 
1172 	switch (hw->mac.type) {
1173 	case e1000_pch2lan:
1174 		ret_val = e1000_k1_workaround_lv(hw);
1175 		if (ret_val)
1176 			return ret_val;
1177 		/* fall-thru */
1178 	case e1000_pchlan:
1179 		if (hw->phy.type == e1000_phy_82578) {
1180 			ret_val = e1000_link_stall_workaround_hv(hw);
1181 			if (ret_val)
1182 				return ret_val;
1183 		}
1184 
1185 		/* Workaround for PCHx parts in half-duplex:
1186 		 * Set the number of preambles removed from the packet
1187 		 * when it is passed from the PHY to the MAC to prevent
1188 		 * the MAC from misinterpreting the packet type.
1189 		 */
1190 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1191 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1192 
1193 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1194 		    E1000_STATUS_FD)
1195 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1196 
1197 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1198 		break;
1199 	default:
1200 		break;
1201 	}
1202 
1203 	/* Check if there was DownShift, must be checked
1204 	 * immediately after link-up
1205 	 */
1206 	e1000_check_downshift_generic(hw);
1207 
1208 	/* Enable/Disable EEE after link up */
1209 	ret_val = e1000_set_eee_pchlan(hw);
1210 	if (ret_val)
1211 		return ret_val;
1212 
1213 	/* If we are forcing speed/duplex, then we simply return since
1214 	 * we have already determined whether we have link or not.
1215 	 */
1216 	if (!mac->autoneg)
1217 		return -E1000_ERR_CONFIG;
1218 
1219 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1220 	 * of MAC speed/duplex configuration.  So we only need to
1221 	 * configure Collision Distance in the MAC.
1222 	 */
1223 	mac->ops.config_collision_dist(hw);
1224 
1225 	/* Configure Flow Control now that Auto-Neg has completed.
1226 	 * First, we need to restore the desired flow control
1227 	 * settings because we may have had to re-autoneg with a
1228 	 * different link partner.
1229 	 */
1230 	ret_val = e1000_config_fc_after_link_up_generic(hw);
1231 	if (ret_val)
1232 		DEBUGOUT("Error configuring flow control\n");
1233 
1234 	return ret_val;
1235 }
1236 
1237 /**
1238  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1239  *  @hw: pointer to the HW structure
1240  *
1241  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1242  **/
1243 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1244 {
1245 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1246 
1247 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1248 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1249 	switch (hw->mac.type) {
1250 	case e1000_ich8lan:
1251 	case e1000_ich9lan:
1252 	case e1000_ich10lan:
1253 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1254 		break;
1255 	case e1000_pchlan:
1256 	case e1000_pch2lan:
1257 	case e1000_pch_lpt:
1258 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1259 		break;
1260 	default:
1261 		break;
1262 	}
1263 }
1264 
1265 /**
1266  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1267  *  @hw: pointer to the HW structure
1268  *
1269  *  Acquires the mutex for performing NVM operations.
1270  **/
1271 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1272 {
1273 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1274 
1275 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1276 
1277 	return E1000_SUCCESS;
1278 }
1279 
1280 /**
1281  *  e1000_release_nvm_ich8lan - Release NVM mutex
1282  *  @hw: pointer to the HW structure
1283  *
1284  *  Releases the mutex used while performing NVM operations.
1285  **/
1286 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1287 {
1288 	DEBUGFUNC("e1000_release_nvm_ich8lan");
1289 
1290 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1291 
1292 	return;
1293 }
1294 
1295 /**
1296  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1297  *  @hw: pointer to the HW structure
1298  *
1299  *  Acquires the software control flag for performing PHY and select
1300  *  MAC CSR accesses.
1301  **/
1302 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1303 {
1304 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1305 	s32 ret_val = E1000_SUCCESS;
1306 
1307 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1308 
1309 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1310 
1311 	while (timeout) {
1312 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1313 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1314 			break;
1315 
1316 		msec_delay_irq(1);
1317 		timeout--;
1318 	}
1319 
1320 	if (!timeout) {
1321 		DEBUGOUT("SW has already locked the resource.\n");
1322 		ret_val = -E1000_ERR_CONFIG;
1323 		goto out;
1324 	}
1325 
1326 	timeout = SW_FLAG_TIMEOUT;
1327 
1328 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1329 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1330 
1331 	while (timeout) {
1332 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1333 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1334 			break;
1335 
1336 		msec_delay_irq(1);
1337 		timeout--;
1338 	}
1339 
1340 	if (!timeout) {
1341 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1342 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1343 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1344 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1345 		ret_val = -E1000_ERR_CONFIG;
1346 		goto out;
1347 	}
1348 
1349 out:
1350 	if (ret_val)
1351 		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1352 
1353 	return ret_val;
1354 }
1355 
1356 /**
1357  *  e1000_release_swflag_ich8lan - Release software control flag
1358  *  @hw: pointer to the HW structure
1359  *
1360  *  Releases the software control flag for performing PHY and select
1361  *  MAC CSR accesses.
1362  **/
1363 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1364 {
1365 	u32 extcnf_ctrl;
1366 
1367 	DEBUGFUNC("e1000_release_swflag_ich8lan");
1368 
1369 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1370 
1371 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1372 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1373 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1374 	} else {
1375 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1376 	}
1377 
1378 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1379 
1380 	return;
1381 }
1382 
1383 /**
1384  *  e1000_check_mng_mode_ich8lan - Checks management mode
1385  *  @hw: pointer to the HW structure
1386  *
1387  *  This checks if the adapter has any manageability enabled.
1388  *  This is a function pointer entry point only called by read/write
1389  *  routines for the PHY and NVM parts.
1390  **/
1391 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1392 {
1393 	u32 fwsm;
1394 
1395 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1396 
1397 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1398 
1399 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1400 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1401 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1402 }
1403 
1404 /**
1405  *  e1000_check_mng_mode_pchlan - Checks management mode
1406  *  @hw: pointer to the HW structure
1407  *
1408  *  This checks if the adapter has iAMT enabled.
1409  *  This is a function pointer entry point only called by read/write
1410  *  routines for the PHY and NVM parts.
1411  **/
1412 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1413 {
1414 	u32 fwsm;
1415 
1416 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
1417 
1418 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1419 
1420 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1421 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1422 }
1423 
1424 /**
1425  *  e1000_rar_set_pch2lan - Set receive address register
1426  *  @hw: pointer to the HW structure
1427  *  @addr: pointer to the receive address
1428  *  @index: receive address array register
1429  *
1430  *  Sets the receive address array register at index to the address passed
1431  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1432  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1433  *  Use SHRA[0-3] in place of those reserved for ME.
1434  **/
1435 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1436 {
1437 	u32 rar_low, rar_high;
1438 
1439 	DEBUGFUNC("e1000_rar_set_pch2lan");
1440 
1441 	/* HW expects these in little endian so we reverse the byte order
1442 	 * from network order (big endian) to little endian
1443 	 */
1444 	rar_low = ((u32) addr[0] |
1445 		   ((u32) addr[1] << 8) |
1446 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1447 
1448 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1449 
1450 	/* If MAC address zero, no need to set the AV bit */
1451 	if (rar_low || rar_high)
1452 		rar_high |= E1000_RAH_AV;
1453 
1454 	if (index == 0) {
1455 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1456 		E1000_WRITE_FLUSH(hw);
1457 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1458 		E1000_WRITE_FLUSH(hw);
1459 		return;
1460 	}
1461 
1462 	if (index < hw->mac.rar_entry_count) {
1463 		s32 ret_val;
1464 
1465 		ret_val = e1000_acquire_swflag_ich8lan(hw);
1466 		if (ret_val)
1467 			goto out;
1468 
1469 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1470 		E1000_WRITE_FLUSH(hw);
1471 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1472 		E1000_WRITE_FLUSH(hw);
1473 
1474 		e1000_release_swflag_ich8lan(hw);
1475 
1476 		/* verify the register updates */
1477 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1478 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1479 			return;
1480 
1481 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1482 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1483 	}
1484 
1485 out:
1486 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
1487 }
1488 
1489 /**
1490  *  e1000_rar_set_pch_lpt - Set receive address registers
1491  *  @hw: pointer to the HW structure
1492  *  @addr: pointer to the receive address
1493  *  @index: receive address array register
1494  *
1495  *  Sets the receive address register array at index to the address passed
1496  *  in by addr. For LPT, RAR[0] is the base address register that is to
1497  *  contain the MAC address. SHRA[0-10] are the shared receive address
1498  *  registers that are shared between the Host and manageability engine (ME).
1499  **/
1500 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1501 {
1502 	u32 rar_low, rar_high;
1503 	u32 wlock_mac;
1504 
1505 	DEBUGFUNC("e1000_rar_set_pch_lpt");
1506 
1507 	/* HW expects these in little endian so we reverse the byte order
1508 	 * from network order (big endian) to little endian
1509 	 */
1510 	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1511 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1512 
1513 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1514 
1515 	/* If MAC address zero, no need to set the AV bit */
1516 	if (rar_low || rar_high)
1517 		rar_high |= E1000_RAH_AV;
1518 
1519 	if (index == 0) {
1520 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1521 		E1000_WRITE_FLUSH(hw);
1522 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1523 		E1000_WRITE_FLUSH(hw);
1524 		return;
1525 	}
1526 
1527 	/* The manageability engine (ME) can lock certain SHRAR registers that
1528 	 * it is using - those registers are unavailable for use.
1529 	 */
1530 	if (index < hw->mac.rar_entry_count) {
1531 		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1532 			    E1000_FWSM_WLOCK_MAC_MASK;
1533 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1534 
1535 		/* Check if all SHRAR registers are locked */
1536 		if (wlock_mac == 1)
1537 			goto out;
1538 
1539 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
1540 			s32 ret_val;
1541 
1542 			ret_val = e1000_acquire_swflag_ich8lan(hw);
1543 
1544 			if (ret_val)
1545 				goto out;
1546 
1547 			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1548 					rar_low);
1549 			E1000_WRITE_FLUSH(hw);
1550 			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1551 					rar_high);
1552 			E1000_WRITE_FLUSH(hw);
1553 
1554 			e1000_release_swflag_ich8lan(hw);
1555 
1556 			/* verify the register updates */
1557 			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1558 			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1559 				return;
1560 		}
1561 	}
1562 
1563 out:
1564 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
1565 }
1566 
1567 /**
1568  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1569  *  @hw: pointer to the HW structure
1570  *  @mc_addr_list: array of multicast addresses to program
1571  *  @mc_addr_count: number of multicast addresses to program
1572  *
1573  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1574  *  The caller must have a packed mc_addr_list of multicast addresses.
1575  **/
1576 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1577 					      u8 *mc_addr_list,
1578 					      u32 mc_addr_count)
1579 {
1580 	u16 phy_reg = 0;
1581 	int i;
1582 	s32 ret_val;
1583 
1584 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1585 
1586 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1587 
1588 	ret_val = hw->phy.ops.acquire(hw);
1589 	if (ret_val)
1590 		return;
1591 
1592 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1593 	if (ret_val)
1594 		goto release;
1595 
1596 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
1597 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1598 					   (u16)(hw->mac.mta_shadow[i] &
1599 						 0xFFFF));
1600 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1601 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
1602 						 0xFFFF));
1603 	}
1604 
1605 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1606 
1607 release:
1608 	hw->phy.ops.release(hw);
1609 }
1610 
1611 /**
1612  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1613  *  @hw: pointer to the HW structure
1614  *
1615  *  Checks if firmware is blocking the reset of the PHY.
1616  *  This is a function pointer entry point only called by
1617  *  reset routines.
1618  **/
1619 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1620 {
1621 	u32 fwsm;
1622 
1623 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
1624 
1625 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1626 
1627 	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1628 						: E1000_BLK_PHY_RESET;
1629 }
1630 
1631 /**
1632  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1633  *  @hw: pointer to the HW structure
1634  *
1635  *  Assumes semaphore already acquired.
1636  *
1637  **/
1638 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1639 {
1640 	u16 phy_data;
1641 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1642 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1643 		E1000_STRAP_SMT_FREQ_SHIFT;
1644 	s32 ret_val;
1645 
1646 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1647 
1648 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1649 	if (ret_val)
1650 		return ret_val;
1651 
1652 	phy_data &= ~HV_SMB_ADDR_MASK;
1653 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1654 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1655 
1656 	if (hw->phy.type == e1000_phy_i217) {
1657 		/* Restore SMBus frequency */
1658 		if (freq--) {
1659 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1660 			phy_data |= (freq & (1 << 0)) <<
1661 				HV_SMB_ADDR_FREQ_LOW_SHIFT;
1662 			phy_data |= (freq & (1 << 1)) <<
1663 				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1664 		} else {
1665 			DEBUGOUT("Unsupported SMB frequency in PHY\n");
1666 		}
1667 	}
1668 
1669 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1670 }
1671 
1672 /**
1673  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1674  *  @hw:   pointer to the HW structure
1675  *
1676  *  SW should configure the LCD from the NVM extended configuration region
1677  *  as a workaround for certain parts.
1678  **/
1679 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1680 {
1681 	struct e1000_phy_info *phy = &hw->phy;
1682 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1683 	s32 ret_val = E1000_SUCCESS;
1684 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
1685 
1686 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1687 
1688 	/* Initialize the PHY from the NVM on ICH platforms.  This
1689 	 * is needed due to an issue where the NVM configuration is
1690 	 * not properly autoloaded after power transitions.
1691 	 * Therefore, after each PHY reset, we will load the
1692 	 * configuration data out of the NVM manually.
1693 	 */
1694 	switch (hw->mac.type) {
1695 	case e1000_ich8lan:
1696 		if (phy->type != e1000_phy_igp_3)
1697 			return ret_val;
1698 
1699 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1700 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1701 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1702 			break;
1703 		}
1704 		/* Fall-thru */
1705 	case e1000_pchlan:
1706 	case e1000_pch2lan:
1707 	case e1000_pch_lpt:
1708 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1709 		break;
1710 	default:
1711 		return ret_val;
1712 	}
1713 
1714 	ret_val = hw->phy.ops.acquire(hw);
1715 	if (ret_val)
1716 		return ret_val;
1717 
1718 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
1719 	if (!(data & sw_cfg_mask))
1720 		goto release;
1721 
1722 	/* Make sure HW does not configure LCD from PHY
1723 	 * extended configuration before SW configuration
1724 	 */
1725 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1726 	if ((hw->mac.type < e1000_pch2lan) &&
1727 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1728 			goto release;
1729 
1730 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1731 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1732 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1733 	if (!cnf_size)
1734 		goto release;
1735 
1736 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1737 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1738 
1739 	if (((hw->mac.type == e1000_pchlan) &&
1740 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1741 	    (hw->mac.type > e1000_pchlan)) {
1742 		/* HW configures the SMBus address and LEDs when the
1743 		 * OEM and LCD Write Enable bits are set in the NVM.
1744 		 * When both NVM bits are cleared, SW will configure
1745 		 * them instead.
1746 		 */
1747 		ret_val = e1000_write_smbus_addr(hw);
1748 		if (ret_val)
1749 			goto release;
1750 
1751 		data = E1000_READ_REG(hw, E1000_LEDCTL);
1752 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1753 							(u16)data);
1754 		if (ret_val)
1755 			goto release;
1756 	}
1757 
1758 	/* Configure LCD from extended configuration region. */
1759 
1760 	/* cnf_base_addr is in DWORD */
1761 	word_addr = (u16)(cnf_base_addr << 1);
1762 
1763 	for (i = 0; i < cnf_size; i++) {
1764 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1765 					   &reg_data);
1766 		if (ret_val)
1767 			goto release;
1768 
1769 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1770 					   1, &reg_addr);
1771 		if (ret_val)
1772 			goto release;
1773 
1774 		/* Save off the PHY page for future writes. */
1775 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1776 			phy_page = reg_data;
1777 			continue;
1778 		}
1779 
1780 		reg_addr &= PHY_REG_MASK;
1781 		reg_addr |= phy_page;
1782 
1783 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1784 						    reg_data);
1785 		if (ret_val)
1786 			goto release;
1787 	}
1788 
1789 release:
1790 	hw->phy.ops.release(hw);
1791 	return ret_val;
1792 }
1793 
1794 /**
1795  *  e1000_k1_gig_workaround_hv - K1 Si workaround
1796  *  @hw:   pointer to the HW structure
1797  *  @link: link up bool flag
1798  *
1799  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1800  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
1801  *  If link is down, the function will restore the default K1 setting located
1802  *  in the NVM.
1803  **/
1804 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1805 {
1806 	s32 ret_val = E1000_SUCCESS;
1807 	u16 status_reg = 0;
1808 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1809 
1810 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
1811 
1812 	if (hw->mac.type != e1000_pchlan)
1813 		return E1000_SUCCESS;
1814 
1815 	/* Wrap the whole flow with the sw flag */
1816 	ret_val = hw->phy.ops.acquire(hw);
1817 	if (ret_val)
1818 		return ret_val;
1819 
1820 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1821 	if (link) {
1822 		if (hw->phy.type == e1000_phy_82578) {
1823 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1824 							      &status_reg);
1825 			if (ret_val)
1826 				goto release;
1827 
1828 			status_reg &= BM_CS_STATUS_LINK_UP |
1829 				      BM_CS_STATUS_RESOLVED |
1830 				      BM_CS_STATUS_SPEED_MASK;
1831 
1832 			if (status_reg == (BM_CS_STATUS_LINK_UP |
1833 					   BM_CS_STATUS_RESOLVED |
1834 					   BM_CS_STATUS_SPEED_1000))
1835 				k1_enable = FALSE;
1836 		}
1837 
1838 		if (hw->phy.type == e1000_phy_82577) {
1839 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1840 							      &status_reg);
1841 			if (ret_val)
1842 				goto release;
1843 
1844 			status_reg &= HV_M_STATUS_LINK_UP |
1845 				      HV_M_STATUS_AUTONEG_COMPLETE |
1846 				      HV_M_STATUS_SPEED_MASK;
1847 
1848 			if (status_reg == (HV_M_STATUS_LINK_UP |
1849 					   HV_M_STATUS_AUTONEG_COMPLETE |
1850 					   HV_M_STATUS_SPEED_1000))
1851 				k1_enable = FALSE;
1852 		}
1853 
1854 		/* Link stall fix for link up */
1855 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1856 						       0x0100);
1857 		if (ret_val)
1858 			goto release;
1859 
1860 	} else {
1861 		/* Link stall fix for link down */
1862 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1863 						       0x4100);
1864 		if (ret_val)
1865 			goto release;
1866 	}
1867 
1868 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1869 
1870 release:
1871 	hw->phy.ops.release(hw);
1872 
1873 	return ret_val;
1874 }
1875 
1876 /**
1877  *  e1000_configure_k1_ich8lan - Configure K1 power state
1878  *  @hw: pointer to the HW structure
1879  *  @enable: K1 state to configure
1880  *
1881  *  Configure the K1 power state based on the provided parameter.
1882  *  Assumes semaphore already acquired.
1883  *
1884  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1885  **/
1886 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1887 {
1888 	s32 ret_val;
1889 	u32 ctrl_reg = 0;
1890 	u32 ctrl_ext = 0;
1891 	u32 reg = 0;
1892 	u16 kmrn_reg = 0;
1893 
1894 	DEBUGFUNC("e1000_configure_k1_ich8lan");
1895 
1896 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1897 					     &kmrn_reg);
1898 	if (ret_val)
1899 		return ret_val;
1900 
1901 	if (k1_enable)
1902 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1903 	else
1904 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1905 
1906 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1907 					      kmrn_reg);
1908 	if (ret_val)
1909 		return ret_val;
1910 
1911 	usec_delay(20);
1912 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1913 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1914 
1915 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1916 	reg |= E1000_CTRL_FRCSPD;
1917 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
1918 
1919 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1920 	E1000_WRITE_FLUSH(hw);
1921 	usec_delay(20);
1922 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1923 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1924 	E1000_WRITE_FLUSH(hw);
1925 	usec_delay(20);
1926 
1927 	return E1000_SUCCESS;
1928 }
1929 
1930 /**
1931  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1932  *  @hw:       pointer to the HW structure
1933  *  @d0_state: boolean if entering d0 or d3 device state
1934  *
1935  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1936  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
1937  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
1938  **/
1939 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1940 {
1941 	s32 ret_val = 0;
1942 	u32 mac_reg;
1943 	u16 oem_reg;
1944 
1945 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1946 
1947 	if (hw->mac.type < e1000_pchlan)
1948 		return ret_val;
1949 
1950 	ret_val = hw->phy.ops.acquire(hw);
1951 	if (ret_val)
1952 		return ret_val;
1953 
1954 	if (hw->mac.type == e1000_pchlan) {
1955 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1956 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1957 			goto release;
1958 	}
1959 
1960 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1961 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1962 		goto release;
1963 
1964 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1965 
1966 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1967 	if (ret_val)
1968 		goto release;
1969 
1970 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1971 
1972 	if (d0_state) {
1973 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1974 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1975 
1976 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1977 			oem_reg |= HV_OEM_BITS_LPLU;
1978 	} else {
1979 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1980 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1981 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1982 
1983 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1984 		    E1000_PHY_CTRL_NOND0A_LPLU))
1985 			oem_reg |= HV_OEM_BITS_LPLU;
1986 	}
1987 
1988 	/* Set Restart auto-neg to activate the bits */
1989 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1990 	    !hw->phy.ops.check_reset_block(hw))
1991 		oem_reg |= HV_OEM_BITS_RESTART_AN;
1992 
1993 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1994 
1995 release:
1996 	hw->phy.ops.release(hw);
1997 
1998 	return ret_val;
1999 }
2000 
2001 
2002 /**
2003  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2004  *  @hw:   pointer to the HW structure
2005  **/
2006 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2007 {
2008 	s32 ret_val;
2009 	u16 data;
2010 
2011 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2012 
2013 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2014 	if (ret_val)
2015 		return ret_val;
2016 
2017 	data |= HV_KMRN_MDIO_SLOW;
2018 
2019 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2020 
2021 	return ret_val;
2022 }
2023 
2024 /**
2025  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2026  *  done after every PHY reset.
2027  **/
2028 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2029 {
2030 	s32 ret_val = E1000_SUCCESS;
2031 	u16 phy_data;
2032 
2033 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2034 
2035 	if (hw->mac.type != e1000_pchlan)
2036 		return E1000_SUCCESS;
2037 
2038 	/* Set MDIO slow mode before any other MDIO access */
2039 	if (hw->phy.type == e1000_phy_82577) {
2040 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2041 		if (ret_val)
2042 			return ret_val;
2043 	}
2044 
2045 	if (((hw->phy.type == e1000_phy_82577) &&
2046 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2047 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2048 		/* Disable generation of early preamble */
2049 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2050 		if (ret_val)
2051 			return ret_val;
2052 
2053 		/* Preamble tuning for SSC */
2054 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2055 						0xA204);
2056 		if (ret_val)
2057 			return ret_val;
2058 	}
2059 
2060 	if (hw->phy.type == e1000_phy_82578) {
2061 		/* Return registers to default by doing a soft reset then
2062 		 * writing 0x3140 to the control register.
2063 		 */
2064 		if (hw->phy.revision < 2) {
2065 			e1000_phy_sw_reset_generic(hw);
2066 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2067 							0x3140);
2068 		}
2069 	}
2070 
2071 	/* Select page 0 */
2072 	ret_val = hw->phy.ops.acquire(hw);
2073 	if (ret_val)
2074 		return ret_val;
2075 
2076 	hw->phy.addr = 1;
2077 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2078 	hw->phy.ops.release(hw);
2079 	if (ret_val)
2080 		return ret_val;
2081 
2082 	/* Configure the K1 Si workaround during phy reset assuming there is
2083 	 * link so that it disables K1 if link is in 1Gbps.
2084 	 */
2085 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2086 	if (ret_val)
2087 		return ret_val;
2088 
2089 	/* Workaround for link disconnects on a busy hub in half duplex */
2090 	ret_val = hw->phy.ops.acquire(hw);
2091 	if (ret_val)
2092 		return ret_val;
2093 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2094 	if (ret_val)
2095 		goto release;
2096 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2097 					       phy_data & 0x00FF);
2098 	if (ret_val)
2099 		goto release;
2100 
2101 	/* set MSE higher to enable link to stay up when noise is high */
2102 	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2103 release:
2104 	hw->phy.ops.release(hw);
2105 
2106 	return ret_val;
2107 }
2108 
2109 /**
2110  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2111  *  @hw:   pointer to the HW structure
2112  **/
2113 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2114 {
2115 	u32 mac_reg;
2116 	u16 i, phy_reg = 0;
2117 	s32 ret_val;
2118 
2119 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2120 
2121 	ret_val = hw->phy.ops.acquire(hw);
2122 	if (ret_val)
2123 		return;
2124 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2125 	if (ret_val)
2126 		goto release;
2127 
2128 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
2129 	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
2130 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2131 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2132 					   (u16)(mac_reg & 0xFFFF));
2133 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2134 					   (u16)((mac_reg >> 16) & 0xFFFF));
2135 
2136 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2137 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2138 					   (u16)(mac_reg & 0xFFFF));
2139 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2140 					   (u16)((mac_reg & E1000_RAH_AV)
2141 						 >> 16));
2142 	}
2143 
2144 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2145 
2146 release:
2147 	hw->phy.ops.release(hw);
2148 }
2149 
2150 static u32 e1000_calc_rx_da_crc(u8 mac[])
2151 {
2152 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2153 	u32 i, j, mask, crc;
2154 
2155 	DEBUGFUNC("e1000_calc_rx_da_crc");
2156 
2157 	crc = 0xffffffff;
2158 	for (i = 0; i < 6; i++) {
2159 		crc = crc ^ mac[i];
2160 		for (j = 8; j > 0; j--) {
2161 			mask = (crc & 1) * (-1);
2162 			crc = (crc >> 1) ^ (poly & mask);
2163 		}
2164 	}
2165 	return ~crc;
2166 }
2167 
2168 /**
2169  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2170  *  with 82579 PHY
2171  *  @hw: pointer to the HW structure
2172  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2173  **/
2174 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2175 {
2176 	s32 ret_val = E1000_SUCCESS;
2177 	u16 phy_reg, data;
2178 	u32 mac_reg;
2179 	u16 i;
2180 
2181 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2182 
2183 	if (hw->mac.type < e1000_pch2lan)
2184 		return E1000_SUCCESS;
2185 
2186 	/* disable Rx path while enabling/disabling workaround */
2187 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2188 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2189 					phy_reg | (1 << 14));
2190 	if (ret_val)
2191 		return ret_val;
2192 
2193 	if (enable) {
2194 		/* Write Rx addresses (rar_entry_count for RAL/H, +4 for
2195 		 * SHRAL/H) and initial CRC values to the MAC
2196 		 */
2197 		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
2198 			u8 mac_addr[ETH_ADDR_LEN] = {0};
2199 			u32 addr_high, addr_low;
2200 
2201 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2202 			if (!(addr_high & E1000_RAH_AV))
2203 				continue;
2204 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2205 			mac_addr[0] = (addr_low & 0xFF);
2206 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2207 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2208 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2209 			mac_addr[4] = (addr_high & 0xFF);
2210 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2211 
2212 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2213 					e1000_calc_rx_da_crc(mac_addr));
2214 		}
2215 
2216 		/* Write Rx addresses to the PHY */
2217 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2218 
2219 		/* Enable jumbo frame workaround in the MAC */
2220 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2221 		mac_reg &= ~(1 << 14);
2222 		mac_reg |= (7 << 15);
2223 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2224 
2225 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2226 		mac_reg |= E1000_RCTL_SECRC;
2227 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2228 
2229 		ret_val = e1000_read_kmrn_reg_generic(hw,
2230 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2231 						&data);
2232 		if (ret_val)
2233 			return ret_val;
2234 		ret_val = e1000_write_kmrn_reg_generic(hw,
2235 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2236 						data | (1 << 0));
2237 		if (ret_val)
2238 			return ret_val;
2239 		ret_val = e1000_read_kmrn_reg_generic(hw,
2240 						E1000_KMRNCTRLSTA_HD_CTRL,
2241 						&data);
2242 		if (ret_val)
2243 			return ret_val;
2244 		data &= ~(0xF << 8);
2245 		data |= (0xB << 8);
2246 		ret_val = e1000_write_kmrn_reg_generic(hw,
2247 						E1000_KMRNCTRLSTA_HD_CTRL,
2248 						data);
2249 		if (ret_val)
2250 			return ret_val;
2251 
2252 		/* Enable jumbo frame workaround in the PHY */
2253 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2254 		data &= ~(0x7F << 5);
2255 		data |= (0x37 << 5);
2256 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2257 		if (ret_val)
2258 			return ret_val;
2259 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2260 		data &= ~(1 << 13);
2261 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2262 		if (ret_val)
2263 			return ret_val;
2264 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2265 		data &= ~(0x3FF << 2);
2266 		data |= (0x1A << 2);
2267 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2268 		if (ret_val)
2269 			return ret_val;
2270 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2271 		if (ret_val)
2272 			return ret_val;
2273 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2274 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2275 						(1 << 10));
2276 		if (ret_val)
2277 			return ret_val;
2278 	} else {
2279 		/* Write MAC register values back to h/w defaults */
2280 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2281 		mac_reg &= ~(0xF << 14);
2282 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2283 
2284 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2285 		mac_reg &= ~E1000_RCTL_SECRC;
2286 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2287 
2288 		ret_val = e1000_read_kmrn_reg_generic(hw,
2289 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2290 						&data);
2291 		if (ret_val)
2292 			return ret_val;
2293 		ret_val = e1000_write_kmrn_reg_generic(hw,
2294 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2295 						data & ~(1 << 0));
2296 		if (ret_val)
2297 			return ret_val;
2298 		ret_val = e1000_read_kmrn_reg_generic(hw,
2299 						E1000_KMRNCTRLSTA_HD_CTRL,
2300 						&data);
2301 		if (ret_val)
2302 			return ret_val;
2303 		data &= ~(0xF << 8);
2304 		data |= (0xB << 8);
2305 		ret_val = e1000_write_kmrn_reg_generic(hw,
2306 						E1000_KMRNCTRLSTA_HD_CTRL,
2307 						data);
2308 		if (ret_val)
2309 			return ret_val;
2310 
2311 		/* Write PHY register values back to h/w defaults */
2312 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2313 		data &= ~(0x7F << 5);
2314 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2315 		if (ret_val)
2316 			return ret_val;
2317 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2318 		data |= (1 << 13);
2319 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2320 		if (ret_val)
2321 			return ret_val;
2322 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2323 		data &= ~(0x3FF << 2);
2324 		data |= (0x8 << 2);
2325 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2326 		if (ret_val)
2327 			return ret_val;
2328 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2329 		if (ret_val)
2330 			return ret_val;
2331 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2332 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2333 						~(1 << 10));
2334 		if (ret_val)
2335 			return ret_val;
2336 	}
2337 
2338 	/* re-enable Rx path after enabling/disabling workaround */
2339 	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2340 				     ~(1 << 14));
2341 }
2342 
2343 /**
2344  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2345  *  done after every PHY reset.
2346  **/
2347 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2348 {
2349 	s32 ret_val = E1000_SUCCESS;
2350 
2351 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2352 
2353 	if (hw->mac.type != e1000_pch2lan)
2354 		return E1000_SUCCESS;
2355 
2356 	/* Set MDIO slow mode before any other MDIO access */
2357 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2358 	if (ret_val)
2359 		return ret_val;
2360 
2361 	ret_val = hw->phy.ops.acquire(hw);
2362 	if (ret_val)
2363 		return ret_val;
2364 	/* set MSE higher to enable link to stay up when noise is high */
2365 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2366 	if (ret_val)
2367 		goto release;
2368 	/* drop link after 5 times MSE threshold was reached */
2369 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2370 release:
2371 	hw->phy.ops.release(hw);
2372 
2373 	return ret_val;
2374 }
2375 
2376 /**
2377  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2378  *  @hw:   pointer to the HW structure
2379  *
2380  *  Workaround to set the K1 beacon duration for 82579 parts
2381  **/
2382 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2383 {
2384 	s32 ret_val = E1000_SUCCESS;
2385 	u16 status_reg = 0;
2386 	u32 mac_reg;
2387 	u16 phy_reg;
2388 
2389 	DEBUGFUNC("e1000_k1_workaround_lv");
2390 
2391 	if (hw->mac.type != e1000_pch2lan)
2392 		return E1000_SUCCESS;
2393 
2394 	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
2395 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2396 	if (ret_val)
2397 		return ret_val;
2398 
2399 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2400 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2401 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2402 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2403 
2404 		ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
2405 		if (ret_val)
2406 			return ret_val;
2407 
2408 		if (status_reg & HV_M_STATUS_SPEED_1000) {
2409 			u16 pm_phy_reg;
2410 
2411 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
2412 			phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2413 			/* LV 1G Packet drop issue wa  */
2414 			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2415 						       &pm_phy_reg);
2416 			if (ret_val)
2417 				return ret_val;
2418 			pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2419 			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2420 							pm_phy_reg);
2421 			if (ret_val)
2422 				return ret_val;
2423 		} else {
2424 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2425 			phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2426 		}
2427 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2428 		ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
2429 	}
2430 
2431 	return ret_val;
2432 }
2433 
2434 /**
2435  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2436  *  @hw:   pointer to the HW structure
2437  *  @gate: boolean set to TRUE to gate, FALSE to ungate
2438  *
2439  *  Gate/ungate the automatic PHY configuration via hardware; perform
2440  *  the configuration via software instead.
2441  **/
2442 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2443 {
2444 	u32 extcnf_ctrl;
2445 
2446 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2447 
2448 	if (hw->mac.type < e1000_pch2lan)
2449 		return;
2450 
2451 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2452 
2453 	if (gate)
2454 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2455 	else
2456 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2457 
2458 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2459 }
2460 
2461 /**
2462  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2463  *  @hw: pointer to the HW structure
2464  *
2465  *  Check the appropriate indication the MAC has finished configuring the
2466  *  PHY after a software reset.
2467  **/
2468 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2469 {
2470 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2471 
2472 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
2473 
2474 	/* Wait for basic configuration completes before proceeding */
2475 	do {
2476 		data = E1000_READ_REG(hw, E1000_STATUS);
2477 		data &= E1000_STATUS_LAN_INIT_DONE;
2478 		usec_delay(100);
2479 	} while ((!data) && --loop);
2480 
2481 	/* If basic configuration is incomplete before the above loop
2482 	 * count reaches 0, loading the configuration from NVM will
2483 	 * leave the PHY in a bad state possibly resulting in no link.
2484 	 */
2485 	if (loop == 0)
2486 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2487 
2488 	/* Clear the Init Done bit for the next init event */
2489 	data = E1000_READ_REG(hw, E1000_STATUS);
2490 	data &= ~E1000_STATUS_LAN_INIT_DONE;
2491 	E1000_WRITE_REG(hw, E1000_STATUS, data);
2492 }
2493 
2494 /**
2495  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2496  *  @hw: pointer to the HW structure
2497  **/
2498 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2499 {
2500 	s32 ret_val = E1000_SUCCESS;
2501 	u16 reg;
2502 
2503 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2504 
2505 	if (hw->phy.ops.check_reset_block(hw))
2506 		return E1000_SUCCESS;
2507 
2508 	/* Allow time for h/w to get to quiescent state after reset */
2509 	msec_delay(10);
2510 
2511 	/* Perform any necessary post-reset workarounds */
2512 	switch (hw->mac.type) {
2513 	case e1000_pchlan:
2514 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2515 		if (ret_val)
2516 			return ret_val;
2517 		break;
2518 	case e1000_pch2lan:
2519 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2520 		if (ret_val)
2521 			return ret_val;
2522 		break;
2523 	default:
2524 		break;
2525 	}
2526 
2527 	/* Clear the host wakeup bit after lcd reset */
2528 	if (hw->mac.type >= e1000_pchlan) {
2529 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2530 		reg &= ~BM_WUC_HOST_WU_BIT;
2531 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2532 	}
2533 
2534 	/* Configure the LCD with the extended configuration region in NVM */
2535 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
2536 	if (ret_val)
2537 		return ret_val;
2538 
2539 	/* Configure the LCD with the OEM bits in NVM */
2540 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
2541 
2542 	if (hw->mac.type == e1000_pch2lan) {
2543 		/* Ungate automatic PHY configuration on non-managed 82579 */
2544 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
2545 		    E1000_ICH_FWSM_FW_VALID)) {
2546 			msec_delay(10);
2547 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
2548 		}
2549 
2550 		/* Set EEE LPI Update Timer to 200usec */
2551 		ret_val = hw->phy.ops.acquire(hw);
2552 		if (ret_val)
2553 			return ret_val;
2554 		ret_val = e1000_write_emi_reg_locked(hw,
2555 						     I82579_LPI_UPDATE_TIMER,
2556 						     0x1387);
2557 		hw->phy.ops.release(hw);
2558 	}
2559 
2560 	return ret_val;
2561 }
2562 
2563 /**
2564  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2565  *  @hw: pointer to the HW structure
2566  *
2567  *  Resets the PHY
2568  *  This is a function pointer entry point called by drivers
2569  *  or other shared routines.
2570  **/
2571 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2572 {
2573 	s32 ret_val = E1000_SUCCESS;
2574 
2575 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2576 
2577 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
2578 	if ((hw->mac.type == e1000_pch2lan) &&
2579 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2580 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
2581 
2582 	ret_val = e1000_phy_hw_reset_generic(hw);
2583 	if (ret_val)
2584 		return ret_val;
2585 
2586 	return e1000_post_phy_reset_ich8lan(hw);
2587 }
2588 
2589 /**
2590  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2591  *  @hw: pointer to the HW structure
2592  *  @active: TRUE to enable LPLU, FALSE to disable
2593  *
2594  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2595  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2596  *  the phy speed. This function will manually set the LPLU bit and restart
2597  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2598  *  since it configures the same bit.
2599  **/
2600 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2601 {
2602 	s32 ret_val;
2603 	u16 oem_reg;
2604 
2605 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
2606 
2607 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2608 	if (ret_val)
2609 		return ret_val;
2610 
2611 	if (active)
2612 		oem_reg |= HV_OEM_BITS_LPLU;
2613 	else
2614 		oem_reg &= ~HV_OEM_BITS_LPLU;
2615 
2616 	if (!hw->phy.ops.check_reset_block(hw))
2617 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2618 
2619 	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2620 }
2621 
2622 /**
2623  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2624  *  @hw: pointer to the HW structure
2625  *  @active: TRUE to enable LPLU, FALSE to disable
2626  *
2627  *  Sets the LPLU D0 state according to the active flag.  When
2628  *  activating LPLU this function also disables smart speed
2629  *  and vice versa.  LPLU will not be activated unless the
2630  *  device autonegotiation advertisement meets standards of
2631  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2632  *  This is a function pointer entry point only called by
2633  *  PHY setup routines.
2634  **/
2635 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2636 {
2637 	struct e1000_phy_info *phy = &hw->phy;
2638 	u32 phy_ctrl;
2639 	s32 ret_val = E1000_SUCCESS;
2640 	u16 data;
2641 
2642 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2643 
2644 	if (phy->type == e1000_phy_ife)
2645 		return E1000_SUCCESS;
2646 
2647 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2648 
2649 	if (active) {
2650 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2651 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2652 
2653 		if (phy->type != e1000_phy_igp_3)
2654 			return E1000_SUCCESS;
2655 
2656 		/* Call gig speed drop workaround on LPLU before accessing
2657 		 * any PHY registers
2658 		 */
2659 		if (hw->mac.type == e1000_ich8lan)
2660 			e1000_gig_downshift_workaround_ich8lan(hw);
2661 
2662 		/* When LPLU is enabled, we should disable SmartSpeed */
2663 		ret_val = phy->ops.read_reg(hw,
2664 					    IGP01E1000_PHY_PORT_CONFIG,
2665 					    &data);
2666 		if (ret_val)
2667 			return ret_val;
2668 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2669 		ret_val = phy->ops.write_reg(hw,
2670 					     IGP01E1000_PHY_PORT_CONFIG,
2671 					     data);
2672 		if (ret_val)
2673 			return ret_val;
2674 	} else {
2675 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2676 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2677 
2678 		if (phy->type != e1000_phy_igp_3)
2679 			return E1000_SUCCESS;
2680 
2681 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2682 		 * during Dx states where the power conservation is most
2683 		 * important.  During driver activity we should enable
2684 		 * SmartSpeed, so performance is maintained.
2685 		 */
2686 		if (phy->smart_speed == e1000_smart_speed_on) {
2687 			ret_val = phy->ops.read_reg(hw,
2688 						    IGP01E1000_PHY_PORT_CONFIG,
2689 						    &data);
2690 			if (ret_val)
2691 				return ret_val;
2692 
2693 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2694 			ret_val = phy->ops.write_reg(hw,
2695 						     IGP01E1000_PHY_PORT_CONFIG,
2696 						     data);
2697 			if (ret_val)
2698 				return ret_val;
2699 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2700 			ret_val = phy->ops.read_reg(hw,
2701 						    IGP01E1000_PHY_PORT_CONFIG,
2702 						    &data);
2703 			if (ret_val)
2704 				return ret_val;
2705 
2706 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2707 			ret_val = phy->ops.write_reg(hw,
2708 						     IGP01E1000_PHY_PORT_CONFIG,
2709 						     data);
2710 			if (ret_val)
2711 				return ret_val;
2712 		}
2713 	}
2714 
2715 	return E1000_SUCCESS;
2716 }
2717 
2718 /**
2719  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2720  *  @hw: pointer to the HW structure
2721  *  @active: TRUE to enable LPLU, FALSE to disable
2722  *
2723  *  Sets the LPLU D3 state according to the active flag.  When
2724  *  activating LPLU this function also disables smart speed
2725  *  and vice versa.  LPLU will not be activated unless the
2726  *  device autonegotiation advertisement meets standards of
2727  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2728  *  This is a function pointer entry point only called by
2729  *  PHY setup routines.
2730  **/
2731 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2732 {
2733 	struct e1000_phy_info *phy = &hw->phy;
2734 	u32 phy_ctrl;
2735 	s32 ret_val = E1000_SUCCESS;
2736 	u16 data;
2737 
2738 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2739 
2740 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2741 
2742 	if (!active) {
2743 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2744 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2745 
2746 		if (phy->type != e1000_phy_igp_3)
2747 			return E1000_SUCCESS;
2748 
2749 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2750 		 * during Dx states where the power conservation is most
2751 		 * important.  During driver activity we should enable
2752 		 * SmartSpeed, so performance is maintained.
2753 		 */
2754 		if (phy->smart_speed == e1000_smart_speed_on) {
2755 			ret_val = phy->ops.read_reg(hw,
2756 						    IGP01E1000_PHY_PORT_CONFIG,
2757 						    &data);
2758 			if (ret_val)
2759 				return ret_val;
2760 
2761 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2762 			ret_val = phy->ops.write_reg(hw,
2763 						     IGP01E1000_PHY_PORT_CONFIG,
2764 						     data);
2765 			if (ret_val)
2766 				return ret_val;
2767 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2768 			ret_val = phy->ops.read_reg(hw,
2769 						    IGP01E1000_PHY_PORT_CONFIG,
2770 						    &data);
2771 			if (ret_val)
2772 				return ret_val;
2773 
2774 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2775 			ret_val = phy->ops.write_reg(hw,
2776 						     IGP01E1000_PHY_PORT_CONFIG,
2777 						     data);
2778 			if (ret_val)
2779 				return ret_val;
2780 		}
2781 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2782 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2783 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2784 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2785 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2786 
2787 		if (phy->type != e1000_phy_igp_3)
2788 			return E1000_SUCCESS;
2789 
2790 		/* Call gig speed drop workaround on LPLU before accessing
2791 		 * any PHY registers
2792 		 */
2793 		if (hw->mac.type == e1000_ich8lan)
2794 			e1000_gig_downshift_workaround_ich8lan(hw);
2795 
2796 		/* When LPLU is enabled, we should disable SmartSpeed */
2797 		ret_val = phy->ops.read_reg(hw,
2798 					    IGP01E1000_PHY_PORT_CONFIG,
2799 					    &data);
2800 		if (ret_val)
2801 			return ret_val;
2802 
2803 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2804 		ret_val = phy->ops.write_reg(hw,
2805 					     IGP01E1000_PHY_PORT_CONFIG,
2806 					     data);
2807 	}
2808 
2809 	return ret_val;
2810 }
2811 
2812 /**
2813  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2814  *  @hw: pointer to the HW structure
2815  *  @bank:  pointer to the variable that returns the active bank
2816  *
2817  *  Reads signature byte from the NVM using the flash access registers.
2818  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2819  **/
2820 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2821 {
2822 	u32 eecd;
2823 	struct e1000_nvm_info *nvm = &hw->nvm;
2824 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2825 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2826 	u8 sig_byte = 0;
2827 	s32 ret_val;
2828 
2829 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2830 
2831 	switch (hw->mac.type) {
2832 	case e1000_ich8lan:
2833 	case e1000_ich9lan:
2834 		eecd = E1000_READ_REG(hw, E1000_EECD);
2835 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2836 		    E1000_EECD_SEC1VAL_VALID_MASK) {
2837 			if (eecd & E1000_EECD_SEC1VAL)
2838 				*bank = 1;
2839 			else
2840 				*bank = 0;
2841 
2842 			return E1000_SUCCESS;
2843 		}
2844 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2845 		/* fall-thru */
2846 	default:
2847 		/* set bank to 0 in case flash read fails */
2848 		*bank = 0;
2849 
2850 		/* Check bank 0 */
2851 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2852 							&sig_byte);
2853 		if (ret_val)
2854 			return ret_val;
2855 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2856 		    E1000_ICH_NVM_SIG_VALUE) {
2857 			*bank = 0;
2858 			return E1000_SUCCESS;
2859 		}
2860 
2861 		/* Check bank 1 */
2862 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2863 							bank1_offset,
2864 							&sig_byte);
2865 		if (ret_val)
2866 			return ret_val;
2867 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2868 		    E1000_ICH_NVM_SIG_VALUE) {
2869 			*bank = 1;
2870 			return E1000_SUCCESS;
2871 		}
2872 
2873 		DEBUGOUT("ERROR: No valid NVM bank present\n");
2874 		return -E1000_ERR_NVM;
2875 	}
2876 }
2877 
2878 /**
2879  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
2880  *  @hw: pointer to the HW structure
2881  *  @offset: The offset (in bytes) of the word(s) to read.
2882  *  @words: Size of data to read in words
2883  *  @data: Pointer to the word(s) to read at offset.
2884  *
2885  *  Reads a word(s) from the NVM using the flash access registers.
2886  **/
2887 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2888 				  u16 *data)
2889 {
2890 	struct e1000_nvm_info *nvm = &hw->nvm;
2891 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2892 	u32 act_offset;
2893 	s32 ret_val = E1000_SUCCESS;
2894 	u32 bank = 0;
2895 	u16 i, word;
2896 
2897 	DEBUGFUNC("e1000_read_nvm_ich8lan");
2898 
2899 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2900 	    (words == 0)) {
2901 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2902 		ret_val = -E1000_ERR_NVM;
2903 		goto out;
2904 	}
2905 
2906 	nvm->ops.acquire(hw);
2907 
2908 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2909 	if (ret_val != E1000_SUCCESS) {
2910 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2911 		bank = 0;
2912 	}
2913 
2914 	act_offset = (bank) ? nvm->flash_bank_size : 0;
2915 	act_offset += offset;
2916 
2917 	ret_val = E1000_SUCCESS;
2918 	for (i = 0; i < words; i++) {
2919 		if (dev_spec->shadow_ram[offset+i].modified) {
2920 			data[i] = dev_spec->shadow_ram[offset+i].value;
2921 		} else {
2922 			ret_val = e1000_read_flash_word_ich8lan(hw,
2923 								act_offset + i,
2924 								&word);
2925 			if (ret_val)
2926 				break;
2927 			data[i] = word;
2928 		}
2929 	}
2930 
2931 	nvm->ops.release(hw);
2932 
2933 out:
2934 	if (ret_val)
2935 		DEBUGOUT1("NVM read error: %d\n", ret_val);
2936 
2937 	return ret_val;
2938 }
2939 
2940 /**
2941  *  e1000_flash_cycle_init_ich8lan - Initialize flash
2942  *  @hw: pointer to the HW structure
2943  *
2944  *  This function does initial flash setup so that a new read/write/erase cycle
2945  *  can be started.
2946  **/
2947 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2948 {
2949 	union ich8_hws_flash_status hsfsts;
2950 	s32 ret_val = -E1000_ERR_NVM;
2951 
2952 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2953 
2954 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2955 
2956 	/* Check if the flash descriptor is valid */
2957 	if (!hsfsts.hsf_status.fldesvalid) {
2958 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
2959 		return -E1000_ERR_NVM;
2960 	}
2961 
2962 	/* Clear FCERR and DAEL in hw status by writing 1 */
2963 	hsfsts.hsf_status.flcerr = 1;
2964 	hsfsts.hsf_status.dael = 1;
2965 
2966 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2967 
2968 	/* Either we should have a hardware SPI cycle in progress
2969 	 * bit to check against, in order to start a new cycle or
2970 	 * FDONE bit should be changed in the hardware so that it
2971 	 * is 1 after hardware reset, which can then be used as an
2972 	 * indication whether a cycle is in progress or has been
2973 	 * completed.
2974 	 */
2975 
2976 	if (!hsfsts.hsf_status.flcinprog) {
2977 		/* There is no cycle running at present,
2978 		 * so we can start a cycle.
2979 		 * Begin by setting Flash Cycle Done.
2980 		 */
2981 		hsfsts.hsf_status.flcdone = 1;
2982 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2983 		ret_val = E1000_SUCCESS;
2984 	} else {
2985 		s32 i;
2986 
2987 		/* Otherwise poll for sometime so the current
2988 		 * cycle has a chance to end before giving up.
2989 		 */
2990 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2991 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2992 							      ICH_FLASH_HSFSTS);
2993 			if (!hsfsts.hsf_status.flcinprog) {
2994 				ret_val = E1000_SUCCESS;
2995 				break;
2996 			}
2997 			usec_delay(1);
2998 		}
2999 		if (ret_val == E1000_SUCCESS) {
3000 			/* Successful in waiting for previous cycle to timeout,
3001 			 * now set the Flash Cycle Done.
3002 			 */
3003 			hsfsts.hsf_status.flcdone = 1;
3004 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3005 						hsfsts.regval);
3006 		} else {
3007 			DEBUGOUT("Flash controller busy, cannot get access\n");
3008 		}
3009 	}
3010 
3011 	return ret_val;
3012 }
3013 
3014 /**
3015  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3016  *  @hw: pointer to the HW structure
3017  *  @timeout: maximum time to wait for completion
3018  *
3019  *  This function starts a flash cycle and waits for its completion.
3020  **/
3021 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3022 {
3023 	union ich8_hws_flash_ctrl hsflctl;
3024 	union ich8_hws_flash_status hsfsts;
3025 	u32 i = 0;
3026 
3027 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3028 
3029 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3030 	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3031 	hsflctl.hsf_ctrl.flcgo = 1;
3032 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3033 
3034 	/* wait till FDONE bit is set to 1 */
3035 	do {
3036 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3037 		if (hsfsts.hsf_status.flcdone)
3038 			break;
3039 		usec_delay(1);
3040 	} while (i++ < timeout);
3041 
3042 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3043 		return E1000_SUCCESS;
3044 
3045 	return -E1000_ERR_NVM;
3046 }
3047 
3048 /**
3049  *  e1000_read_flash_word_ich8lan - Read word from flash
3050  *  @hw: pointer to the HW structure
3051  *  @offset: offset to data location
3052  *  @data: pointer to the location for storing the data
3053  *
3054  *  Reads the flash word at offset into data.  Offset is converted
3055  *  to bytes before read.
3056  **/
3057 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3058 					 u16 *data)
3059 {
3060 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3061 
3062 	if (!data)
3063 		return -E1000_ERR_NVM;
3064 
3065 	/* Must convert offset into bytes. */
3066 	offset <<= 1;
3067 
3068 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3069 }
3070 
3071 /**
3072  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3073  *  @hw: pointer to the HW structure
3074  *  @offset: The offset of the byte to read.
3075  *  @data: Pointer to a byte to store the value read.
3076  *
3077  *  Reads a single byte from the NVM using the flash access registers.
3078  **/
3079 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3080 					 u8 *data)
3081 {
3082 	s32 ret_val;
3083 	u16 word = 0;
3084 
3085 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3086 	if (ret_val)
3087 		return ret_val;
3088 
3089 	*data = (u8)word;
3090 
3091 	return E1000_SUCCESS;
3092 }
3093 
3094 /**
3095  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3096  *  @hw: pointer to the HW structure
3097  *  @offset: The offset (in bytes) of the byte or word to read.
3098  *  @size: Size of data to read, 1=byte 2=word
3099  *  @data: Pointer to the word to store the value read.
3100  *
3101  *  Reads a byte or word from the NVM using the flash access registers.
3102  **/
3103 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3104 					 u8 size, u16 *data)
3105 {
3106 	union ich8_hws_flash_status hsfsts;
3107 	union ich8_hws_flash_ctrl hsflctl;
3108 	u32 flash_linear_addr;
3109 	u32 flash_data = 0;
3110 	s32 ret_val = -E1000_ERR_NVM;
3111 	u8 count = 0;
3112 
3113 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3114 
3115 	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3116 		return -E1000_ERR_NVM;
3117 
3118 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3119 			    hw->nvm.flash_base_addr;
3120 
3121 	do {
3122 		usec_delay(1);
3123 		/* Steps */
3124 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3125 		if (ret_val != E1000_SUCCESS)
3126 			break;
3127 
3128 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3129 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3130 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3131 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3132 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3133 
3134 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3135 
3136 		ret_val = e1000_flash_cycle_ich8lan(hw,
3137 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3138 
3139 		/* Check if FCERR is set to 1, if set to 1, clear it
3140 		 * and try the whole sequence a few more times, else
3141 		 * read in (shift in) the Flash Data0, the order is
3142 		 * least significant byte first msb to lsb
3143 		 */
3144 		if (ret_val == E1000_SUCCESS) {
3145 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3146 			if (size == 1)
3147 				*data = (u8)(flash_data & 0x000000FF);
3148 			else if (size == 2)
3149 				*data = (u16)(flash_data & 0x0000FFFF);
3150 			break;
3151 		} else {
3152 			/* If we've gotten here, then things are probably
3153 			 * completely hosed, but if the error condition is
3154 			 * detected, it won't hurt to give it another try...
3155 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3156 			 */
3157 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3158 							      ICH_FLASH_HSFSTS);
3159 			if (hsfsts.hsf_status.flcerr) {
3160 				/* Repeat for some time before giving up. */
3161 				continue;
3162 			} else if (!hsfsts.hsf_status.flcdone) {
3163 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3164 				break;
3165 			}
3166 		}
3167 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3168 
3169 	return ret_val;
3170 }
3171 
3172 /**
3173  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3174  *  @hw: pointer to the HW structure
3175  *  @offset: The offset (in bytes) of the word(s) to write.
3176  *  @words: Size of data to write in words
3177  *  @data: Pointer to the word(s) to write at offset.
3178  *
3179  *  Writes a byte or word to the NVM using the flash access registers.
3180  **/
3181 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3182 				   u16 *data)
3183 {
3184 	struct e1000_nvm_info *nvm = &hw->nvm;
3185 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3186 	u16 i;
3187 
3188 	DEBUGFUNC("e1000_write_nvm_ich8lan");
3189 
3190 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3191 	    (words == 0)) {
3192 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3193 		return -E1000_ERR_NVM;
3194 	}
3195 
3196 	nvm->ops.acquire(hw);
3197 
3198 	for (i = 0; i < words; i++) {
3199 		dev_spec->shadow_ram[offset+i].modified = TRUE;
3200 		dev_spec->shadow_ram[offset+i].value = data[i];
3201 	}
3202 
3203 	nvm->ops.release(hw);
3204 
3205 	return E1000_SUCCESS;
3206 }
3207 
3208 /**
3209  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3210  *  @hw: pointer to the HW structure
3211  *
3212  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3213  *  which writes the checksum to the shadow ram.  The changes in the shadow
3214  *  ram are then committed to the EEPROM by processing each bank at a time
3215  *  checking for the modified bit and writing only the pending changes.
3216  *  After a successful commit, the shadow ram is cleared and is ready for
3217  *  future writes.
3218  **/
3219 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3220 {
3221 	struct e1000_nvm_info *nvm = &hw->nvm;
3222 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3223 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3224 	s32 ret_val;
3225 	u16 data;
3226 
3227 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3228 
3229 	ret_val = e1000_update_nvm_checksum_generic(hw);
3230 	if (ret_val)
3231 		goto out;
3232 
3233 	if (nvm->type != e1000_nvm_flash_sw)
3234 		goto out;
3235 
3236 	nvm->ops.acquire(hw);
3237 
3238 	/* We're writing to the opposite bank so if we're on bank 1,
3239 	 * write to bank 0 etc.  We also need to erase the segment that
3240 	 * is going to be written
3241 	 */
3242 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3243 	if (ret_val != E1000_SUCCESS) {
3244 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3245 		bank = 0;
3246 	}
3247 
3248 	if (bank == 0) {
3249 		new_bank_offset = nvm->flash_bank_size;
3250 		old_bank_offset = 0;
3251 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3252 		if (ret_val)
3253 			goto release;
3254 	} else {
3255 		old_bank_offset = nvm->flash_bank_size;
3256 		new_bank_offset = 0;
3257 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3258 		if (ret_val)
3259 			goto release;
3260 	}
3261 
3262 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3263 		/* Determine whether to write the value stored
3264 		 * in the other NVM bank or a modified value stored
3265 		 * in the shadow RAM
3266 		 */
3267 		if (dev_spec->shadow_ram[i].modified) {
3268 			data = dev_spec->shadow_ram[i].value;
3269 		} else {
3270 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
3271 								old_bank_offset,
3272 								&data);
3273 			if (ret_val)
3274 				break;
3275 		}
3276 
3277 		/* If the word is 0x13, then make sure the signature bits
3278 		 * (15:14) are 11b until the commit has completed.
3279 		 * This will allow us to write 10b which indicates the
3280 		 * signature is valid.  We want to do this after the write
3281 		 * has completed so that we don't mark the segment valid
3282 		 * while the write is still in progress
3283 		 */
3284 		if (i == E1000_ICH_NVM_SIG_WORD)
3285 			data |= E1000_ICH_NVM_SIG_MASK;
3286 
3287 		/* Convert offset to bytes. */
3288 		act_offset = (i + new_bank_offset) << 1;
3289 
3290 		usec_delay(100);
3291 		/* Write the bytes to the new bank. */
3292 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3293 							       act_offset,
3294 							       (u8)data);
3295 		if (ret_val)
3296 			break;
3297 
3298 		usec_delay(100);
3299 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3300 							  act_offset + 1,
3301 							  (u8)(data >> 8));
3302 		if (ret_val)
3303 			break;
3304 	}
3305 
3306 	/* Don't bother writing the segment valid bits if sector
3307 	 * programming failed.
3308 	 */
3309 	if (ret_val) {
3310 		DEBUGOUT("Flash commit failed.\n");
3311 		goto release;
3312 	}
3313 
3314 	/* Finally validate the new segment by setting bit 15:14
3315 	 * to 10b in word 0x13 , this can be done without an
3316 	 * erase as well since these bits are 11 to start with
3317 	 * and we need to change bit 14 to 0b
3318 	 */
3319 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3320 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3321 	if (ret_val)
3322 		goto release;
3323 
3324 	data &= 0xBFFF;
3325 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3326 						       act_offset * 2 + 1,
3327 						       (u8)(data >> 8));
3328 	if (ret_val)
3329 		goto release;
3330 
3331 	/* And invalidate the previously valid segment by setting
3332 	 * its signature word (0x13) high_byte to 0b. This can be
3333 	 * done without an erase because flash erase sets all bits
3334 	 * to 1's. We can write 1's to 0's without an erase
3335 	 */
3336 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3337 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3338 	if (ret_val)
3339 		goto release;
3340 
3341 	/* Great!  Everything worked, we can now clear the cached entries. */
3342 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3343 		dev_spec->shadow_ram[i].modified = FALSE;
3344 		dev_spec->shadow_ram[i].value = 0xFFFF;
3345 	}
3346 
3347 release:
3348 	nvm->ops.release(hw);
3349 
3350 	/* Reload the EEPROM, or else modifications will not appear
3351 	 * until after the next adapter reset.
3352 	 */
3353 	if (!ret_val) {
3354 		nvm->ops.reload(hw);
3355 		msec_delay(10);
3356 	}
3357 
3358 out:
3359 	if (ret_val)
3360 		DEBUGOUT1("NVM update error: %d\n", ret_val);
3361 
3362 	return ret_val;
3363 }
3364 
3365 /**
3366  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3367  *  @hw: pointer to the HW structure
3368  *
3369  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3370  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3371  *  calculated, in which case we need to calculate the checksum and set bit 6.
3372  **/
3373 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3374 {
3375 	s32 ret_val;
3376 	u16 data;
3377 	u16 word;
3378 	u16 valid_csum_mask;
3379 
3380 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3381 
3382 	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3383 	 * the checksum needs to be fixed.  This bit is an indication that
3384 	 * the NVM was prepared by OEM software and did not calculate
3385 	 * the checksum...a likely scenario.
3386 	 */
3387 	switch (hw->mac.type) {
3388 	case e1000_pch_lpt:
3389 		word = NVM_COMPAT;
3390 		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3391 		break;
3392 	default:
3393 		word = NVM_FUTURE_INIT_WORD1;
3394 		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3395 		break;
3396 	}
3397 
3398 	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3399 	if (ret_val)
3400 		return ret_val;
3401 
3402 	if (!(data & valid_csum_mask)) {
3403 		data |= valid_csum_mask;
3404 		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3405 		if (ret_val)
3406 			return ret_val;
3407 		ret_val = hw->nvm.ops.update(hw);
3408 		if (ret_val)
3409 			return ret_val;
3410 	}
3411 
3412 	return e1000_validate_nvm_checksum_generic(hw);
3413 }
3414 
3415 /**
3416  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3417  *  @hw: pointer to the HW structure
3418  *  @offset: The offset (in bytes) of the byte/word to read.
3419  *  @size: Size of data to read, 1=byte 2=word
3420  *  @data: The byte(s) to write to the NVM.
3421  *
3422  *  Writes one/two bytes to the NVM using the flash access registers.
3423  **/
3424 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3425 					  u8 size, u16 data)
3426 {
3427 	union ich8_hws_flash_status hsfsts;
3428 	union ich8_hws_flash_ctrl hsflctl;
3429 	u32 flash_linear_addr;
3430 	u32 flash_data = 0;
3431 	s32 ret_val;
3432 	u8 count = 0;
3433 
3434 	DEBUGFUNC("e1000_write_ich8_data");
3435 
3436 	if (size < 1 || size > 2 || data > size * 0xff ||
3437 	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
3438 		return -E1000_ERR_NVM;
3439 
3440 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3441 			    hw->nvm.flash_base_addr;
3442 
3443 	do {
3444 		usec_delay(1);
3445 		/* Steps */
3446 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3447 		if (ret_val != E1000_SUCCESS)
3448 			break;
3449 
3450 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3451 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3452 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3453 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3454 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3455 
3456 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3457 
3458 		if (size == 1)
3459 			flash_data = (u32)data & 0x00FF;
3460 		else
3461 			flash_data = (u32)data;
3462 
3463 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3464 
3465 		/* check if FCERR is set to 1 , if set to 1, clear it
3466 		 * and try the whole sequence a few more times else done
3467 		 */
3468 		ret_val = e1000_flash_cycle_ich8lan(hw,
3469 					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3470 		if (ret_val == E1000_SUCCESS)
3471 			break;
3472 
3473 		/* If we're here, then things are most likely
3474 		 * completely hosed, but if the error condition
3475 		 * is detected, it won't hurt to give it another
3476 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3477 		 */
3478 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3479 		if (hsfsts.hsf_status.flcerr)
3480 			/* Repeat for some time before giving up. */
3481 			continue;
3482 		if (!hsfsts.hsf_status.flcdone) {
3483 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3484 			break;
3485 		}
3486 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3487 
3488 	return ret_val;
3489 }
3490 
3491 /**
3492  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3493  *  @hw: pointer to the HW structure
3494  *  @offset: The index of the byte to read.
3495  *  @data: The byte to write to the NVM.
3496  *
3497  *  Writes a single byte to the NVM using the flash access registers.
3498  **/
3499 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3500 					  u8 data)
3501 {
3502 	u16 word = (u16)data;
3503 
3504 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3505 
3506 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3507 }
3508 
3509 /**
3510  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3511  *  @hw: pointer to the HW structure
3512  *  @offset: The offset of the byte to write.
3513  *  @byte: The byte to write to the NVM.
3514  *
3515  *  Writes a single byte to the NVM using the flash access registers.
3516  *  Goes through a retry algorithm before giving up.
3517  **/
3518 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3519 						u32 offset, u8 byte)
3520 {
3521 	s32 ret_val;
3522 	u16 program_retries;
3523 
3524 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3525 
3526 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3527 	if (!ret_val)
3528 		return ret_val;
3529 
3530 	for (program_retries = 0; program_retries < 100; program_retries++) {
3531 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3532 		usec_delay(100);
3533 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3534 		if (ret_val == E1000_SUCCESS)
3535 			break;
3536 	}
3537 	if (program_retries == 100)
3538 		return -E1000_ERR_NVM;
3539 
3540 	return E1000_SUCCESS;
3541 }
3542 
3543 /**
3544  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3545  *  @hw: pointer to the HW structure
3546  *  @bank: 0 for first bank, 1 for second bank, etc.
3547  *
3548  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3549  *  bank N is 4096 * N + flash_reg_addr.
3550  **/
3551 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3552 {
3553 	struct e1000_nvm_info *nvm = &hw->nvm;
3554 	union ich8_hws_flash_status hsfsts;
3555 	union ich8_hws_flash_ctrl hsflctl;
3556 	u32 flash_linear_addr;
3557 	/* bank size is in 16bit words - adjust to bytes */
3558 	u32 flash_bank_size = nvm->flash_bank_size * 2;
3559 	s32 ret_val;
3560 	s32 count = 0;
3561 	s32 j, iteration, sector_size;
3562 
3563 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3564 
3565 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3566 
3567 	/* Determine HW Sector size: Read BERASE bits of hw flash status
3568 	 * register
3569 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3570 	 *     consecutive sectors.  The start index for the nth Hw sector
3571 	 *     can be calculated as = bank * 4096 + n * 256
3572 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3573 	 *     The start index for the nth Hw sector can be calculated
3574 	 *     as = bank * 4096
3575 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3576 	 *     (ich9 only, otherwise error condition)
3577 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3578 	 */
3579 	switch (hsfsts.hsf_status.berasesz) {
3580 	case 0:
3581 		/* Hw sector size 256 */
3582 		sector_size = ICH_FLASH_SEG_SIZE_256;
3583 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3584 		break;
3585 	case 1:
3586 		sector_size = ICH_FLASH_SEG_SIZE_4K;
3587 		iteration = 1;
3588 		break;
3589 	case 2:
3590 		sector_size = ICH_FLASH_SEG_SIZE_8K;
3591 		iteration = 1;
3592 		break;
3593 	case 3:
3594 		sector_size = ICH_FLASH_SEG_SIZE_64K;
3595 		iteration = 1;
3596 		break;
3597 	default:
3598 		return -E1000_ERR_NVM;
3599 	}
3600 
3601 	/* Start with the base address, then add the sector offset. */
3602 	flash_linear_addr = hw->nvm.flash_base_addr;
3603 	flash_linear_addr += (bank) ? flash_bank_size : 0;
3604 
3605 	for (j = 0; j < iteration ; j++) {
3606 		do {
3607 			/* Steps */
3608 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
3609 			if (ret_val)
3610 				return ret_val;
3611 
3612 			/* Write a value 11 (block Erase) in Flash
3613 			 * Cycle field in hw flash control
3614 			 */
3615 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3616 							      ICH_FLASH_HSFCTL);
3617 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3618 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3619 						hsflctl.regval);
3620 
3621 			/* Write the last 24 bits of an index within the
3622 			 * block into Flash Linear address field in Flash
3623 			 * Address.
3624 			 */
3625 			flash_linear_addr += (j * sector_size);
3626 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3627 					      flash_linear_addr);
3628 
3629 			ret_val = e1000_flash_cycle_ich8lan(hw,
3630 					       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3631 			if (ret_val == E1000_SUCCESS)
3632 				break;
3633 
3634 			/* Check if FCERR is set to 1.  If 1,
3635 			 * clear it and try the whole sequence
3636 			 * a few more times else Done
3637 			 */
3638 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3639 						      ICH_FLASH_HSFSTS);
3640 			if (hsfsts.hsf_status.flcerr)
3641 				/* repeat for some time before giving up */
3642 				continue;
3643 			else if (!hsfsts.hsf_status.flcdone)
3644 				return ret_val;
3645 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3646 	}
3647 
3648 	return E1000_SUCCESS;
3649 }
3650 
3651 /**
3652  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3653  *  @hw: pointer to the HW structure
3654  *  @data: Pointer to the LED settings
3655  *
3656  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3657  *  settings is all 0's or F's, set the LED default to a valid LED default
3658  *  setting.
3659  **/
3660 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3661 {
3662 	s32 ret_val;
3663 
3664 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
3665 
3666 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3667 	if (ret_val) {
3668 		DEBUGOUT("NVM Read Error\n");
3669 		return ret_val;
3670 	}
3671 
3672 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3673 		*data = ID_LED_DEFAULT_ICH8LAN;
3674 
3675 	return E1000_SUCCESS;
3676 }
3677 
3678 /**
3679  *  e1000_id_led_init_pchlan - store LED configurations
3680  *  @hw: pointer to the HW structure
3681  *
3682  *  PCH does not control LEDs via the LEDCTL register, rather it uses
3683  *  the PHY LED configuration register.
3684  *
3685  *  PCH also does not have an "always on" or "always off" mode which
3686  *  complicates the ID feature.  Instead of using the "on" mode to indicate
3687  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3688  *  use "link_up" mode.  The LEDs will still ID on request if there is no
3689  *  link based on logic in e1000_led_[on|off]_pchlan().
3690  **/
3691 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3692 {
3693 	struct e1000_mac_info *mac = &hw->mac;
3694 	s32 ret_val;
3695 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3696 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3697 	u16 data, i, temp, shift;
3698 
3699 	DEBUGFUNC("e1000_id_led_init_pchlan");
3700 
3701 	/* Get default ID LED modes */
3702 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3703 	if (ret_val)
3704 		return ret_val;
3705 
3706 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3707 	mac->ledctl_mode1 = mac->ledctl_default;
3708 	mac->ledctl_mode2 = mac->ledctl_default;
3709 
3710 	for (i = 0; i < 4; i++) {
3711 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3712 		shift = (i * 5);
3713 		switch (temp) {
3714 		case ID_LED_ON1_DEF2:
3715 		case ID_LED_ON1_ON2:
3716 		case ID_LED_ON1_OFF2:
3717 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3718 			mac->ledctl_mode1 |= (ledctl_on << shift);
3719 			break;
3720 		case ID_LED_OFF1_DEF2:
3721 		case ID_LED_OFF1_ON2:
3722 		case ID_LED_OFF1_OFF2:
3723 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3724 			mac->ledctl_mode1 |= (ledctl_off << shift);
3725 			break;
3726 		default:
3727 			/* Do nothing */
3728 			break;
3729 		}
3730 		switch (temp) {
3731 		case ID_LED_DEF1_ON2:
3732 		case ID_LED_ON1_ON2:
3733 		case ID_LED_OFF1_ON2:
3734 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3735 			mac->ledctl_mode2 |= (ledctl_on << shift);
3736 			break;
3737 		case ID_LED_DEF1_OFF2:
3738 		case ID_LED_ON1_OFF2:
3739 		case ID_LED_OFF1_OFF2:
3740 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3741 			mac->ledctl_mode2 |= (ledctl_off << shift);
3742 			break;
3743 		default:
3744 			/* Do nothing */
3745 			break;
3746 		}
3747 	}
3748 
3749 	return E1000_SUCCESS;
3750 }
3751 
3752 /**
3753  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3754  *  @hw: pointer to the HW structure
3755  *
3756  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3757  *  register, so the the bus width is hard coded.
3758  **/
3759 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3760 {
3761 	struct e1000_bus_info *bus = &hw->bus;
3762 	s32 ret_val;
3763 
3764 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
3765 
3766 	ret_val = e1000_get_bus_info_pcie_generic(hw);
3767 
3768 	/* ICH devices are "PCI Express"-ish.  They have
3769 	 * a configuration space, but do not contain
3770 	 * PCI Express Capability registers, so bus width
3771 	 * must be hardcoded.
3772 	 */
3773 	if (bus->width == e1000_bus_width_unknown)
3774 		bus->width = e1000_bus_width_pcie_x1;
3775 
3776 	return ret_val;
3777 }
3778 
3779 /**
3780  *  e1000_reset_hw_ich8lan - Reset the hardware
3781  *  @hw: pointer to the HW structure
3782  *
3783  *  Does a full reset of the hardware which includes a reset of the PHY and
3784  *  MAC.
3785  **/
3786 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3787 {
3788 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3789 	u16 kum_cfg;
3790 	u32 ctrl, reg;
3791 	s32 ret_val;
3792 
3793 	DEBUGFUNC("e1000_reset_hw_ich8lan");
3794 
3795 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
3796 	 * on the last TLP read/write transaction when MAC is reset.
3797 	 */
3798 	ret_val = e1000_disable_pcie_master_generic(hw);
3799 	if (ret_val)
3800 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
3801 
3802 	DEBUGOUT("Masking off all interrupts\n");
3803 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3804 
3805 	/* Disable the Transmit and Receive units.  Then delay to allow
3806 	 * any pending transactions to complete before we hit the MAC
3807 	 * with the global reset.
3808 	 */
3809 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
3810 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3811 	E1000_WRITE_FLUSH(hw);
3812 
3813 	msec_delay(10);
3814 
3815 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
3816 	if (hw->mac.type == e1000_ich8lan) {
3817 		/* Set Tx and Rx buffer allocation to 8k apiece. */
3818 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3819 		/* Set Packet Buffer Size to 16k. */
3820 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3821 	}
3822 
3823 	if (hw->mac.type == e1000_pchlan) {
3824 		/* Save the NVM K1 bit setting*/
3825 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3826 		if (ret_val)
3827 			return ret_val;
3828 
3829 		if (kum_cfg & E1000_NVM_K1_ENABLE)
3830 			dev_spec->nvm_k1_enabled = TRUE;
3831 		else
3832 			dev_spec->nvm_k1_enabled = FALSE;
3833 	}
3834 
3835 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3836 
3837 	if (!hw->phy.ops.check_reset_block(hw)) {
3838 		/* Full-chip reset requires MAC and PHY reset at the same
3839 		 * time to make sure the interface between MAC and the
3840 		 * external PHY is reset.
3841 		 */
3842 		ctrl |= E1000_CTRL_PHY_RST;
3843 
3844 		/* Gate automatic PHY configuration by hardware on
3845 		 * non-managed 82579
3846 		 */
3847 		if ((hw->mac.type == e1000_pch2lan) &&
3848 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3849 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3850 	}
3851 	ret_val = e1000_acquire_swflag_ich8lan(hw);
3852 	DEBUGOUT("Issuing a global reset to ich8lan\n");
3853 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3854 	/* cannot issue a flush here because it hangs the hardware */
3855 	msec_delay(20);
3856 
3857 	/* Set Phy Config Counter to 50msec */
3858 	if (hw->mac.type == e1000_pch2lan) {
3859 		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
3860 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3861 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3862 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
3863 	}
3864 
3865 	if (!ret_val)
3866 		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
3867 
3868 	if (ctrl & E1000_CTRL_PHY_RST) {
3869 		ret_val = hw->phy.ops.get_cfg_done(hw);
3870 		if (ret_val)
3871 			return ret_val;
3872 
3873 		ret_val = e1000_post_phy_reset_ich8lan(hw);
3874 		if (ret_val)
3875 			return ret_val;
3876 	}
3877 
3878 	/* For PCH, this write will make sure that any noise
3879 	 * will be detected as a CRC error and be dropped rather than show up
3880 	 * as a bad packet to the DMA engine.
3881 	 */
3882 	if (hw->mac.type == e1000_pchlan)
3883 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3884 
3885 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3886 	E1000_READ_REG(hw, E1000_ICR);
3887 
3888 	reg = E1000_READ_REG(hw, E1000_KABGTXD);
3889 	reg |= E1000_KABGTXD_BGSQLBIAS;
3890 	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
3891 
3892 	return E1000_SUCCESS;
3893 }
3894 
3895 /**
3896  *  e1000_init_hw_ich8lan - Initialize the hardware
3897  *  @hw: pointer to the HW structure
3898  *
3899  *  Prepares the hardware for transmit and receive by doing the following:
3900  *   - initialize hardware bits
3901  *   - initialize LED identification
3902  *   - setup receive address registers
3903  *   - setup flow control
3904  *   - setup transmit descriptors
3905  *   - clear statistics
3906  **/
3907 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3908 {
3909 	struct e1000_mac_info *mac = &hw->mac;
3910 	u32 ctrl_ext, txdctl, snoop;
3911 	s32 ret_val;
3912 	u16 i;
3913 
3914 	DEBUGFUNC("e1000_init_hw_ich8lan");
3915 
3916 	e1000_initialize_hw_bits_ich8lan(hw);
3917 
3918 	/* Initialize identification LED */
3919 	ret_val = mac->ops.id_led_init(hw);
3920 	/* An error is not fatal and we should not stop init due to this */
3921 	if (ret_val)
3922 		DEBUGOUT("Error initializing identification LED\n");
3923 
3924 	/* Setup the receive address. */
3925 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3926 
3927 	/* Zero out the Multicast HASH table */
3928 	DEBUGOUT("Zeroing the MTA\n");
3929 	for (i = 0; i < mac->mta_reg_count; i++)
3930 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3931 
3932 	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
3933 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
3934 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3935 	 */
3936 	if (hw->phy.type == e1000_phy_82578) {
3937 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
3938 		i &= ~BM_WUC_HOST_WU_BIT;
3939 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
3940 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
3941 		if (ret_val)
3942 			return ret_val;
3943 	}
3944 
3945 	/* Setup link and flow control */
3946 	ret_val = mac->ops.setup_link(hw);
3947 
3948 	/* Set the transmit descriptor write-back policy for both queues */
3949 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3950 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3951 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3952 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3953 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3954 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3955 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3956 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3957 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3958 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3959 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3960 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3961 
3962 	/* ICH8 has opposite polarity of no_snoop bits.
3963 	 * By default, we should use snoop behavior.
3964 	 */
3965 	if (mac->type == e1000_ich8lan)
3966 		snoop = PCIE_ICH8_SNOOP_ALL;
3967 	else
3968 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3969 	e1000_set_pcie_no_snoop_generic(hw, snoop);
3970 
3971 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3972 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3973 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3974 
3975 	/* Clear all of the statistics registers (clear on read).  It is
3976 	 * important that we do this after we have tried to establish link
3977 	 * because the symbol error count will increment wildly if there
3978 	 * is no link.
3979 	 */
3980 	e1000_clear_hw_cntrs_ich8lan(hw);
3981 
3982 	return ret_val;
3983 }
3984 
3985 /**
3986  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3987  *  @hw: pointer to the HW structure
3988  *
3989  *  Sets/Clears required hardware bits necessary for correctly setting up the
3990  *  hardware for transmit and receive.
3991  **/
3992 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3993 {
3994 	u32 reg;
3995 
3996 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3997 
3998 	/* Extended Device Control */
3999 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4000 	reg |= (1 << 22);
4001 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
4002 	if (hw->mac.type >= e1000_pchlan)
4003 		reg |= E1000_CTRL_EXT_PHYPDEN;
4004 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4005 
4006 	/* Transmit Descriptor Control 0 */
4007 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4008 	reg |= (1 << 22);
4009 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4010 
4011 	/* Transmit Descriptor Control 1 */
4012 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4013 	reg |= (1 << 22);
4014 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4015 
4016 	/* Transmit Arbitration Control 0 */
4017 	reg = E1000_READ_REG(hw, E1000_TARC(0));
4018 	if (hw->mac.type == e1000_ich8lan)
4019 		reg |= (1 << 28) | (1 << 29);
4020 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4021 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4022 
4023 	/* Transmit Arbitration Control 1 */
4024 	reg = E1000_READ_REG(hw, E1000_TARC(1));
4025 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4026 		reg &= ~(1 << 28);
4027 	else
4028 		reg |= (1 << 28);
4029 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
4030 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4031 
4032 	/* Device Status */
4033 	if (hw->mac.type == e1000_ich8lan) {
4034 		reg = E1000_READ_REG(hw, E1000_STATUS);
4035 		reg &= ~(1UL << 31);
4036 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
4037 	}
4038 
4039 	/* work-around descriptor data corruption issue during nfs v2 udp
4040 	 * traffic, just disable the nfs filtering capability
4041 	 */
4042 	reg = E1000_READ_REG(hw, E1000_RFCTL);
4043 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4044 	/* Disable IPv6 extension header parsing because some malformed
4045 	 * IPv6 headers can hang the Rx.
4046 	 */
4047 	if (hw->mac.type == e1000_ich8lan)
4048 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4049 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4050 
4051 	/* Enable ECC on Lynxpoint */
4052 	if (hw->mac.type == e1000_pch_lpt) {
4053 		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4054 		reg |= E1000_PBECCSTS_ECC_ENABLE;
4055 		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4056 
4057 		reg = E1000_READ_REG(hw, E1000_CTRL);
4058 		reg |= E1000_CTRL_MEHE;
4059 		E1000_WRITE_REG(hw, E1000_CTRL, reg);
4060 	}
4061 
4062 	return;
4063 }
4064 
4065 /**
4066  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4067  *  @hw: pointer to the HW structure
4068  *
4069  *  Determines which flow control settings to use, then configures flow
4070  *  control.  Calls the appropriate media-specific link configuration
4071  *  function.  Assuming the adapter has a valid link partner, a valid link
4072  *  should be established.  Assumes the hardware has previously been reset
4073  *  and the transmitter and receiver are not enabled.
4074  **/
4075 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4076 {
4077 	s32 ret_val;
4078 
4079 	DEBUGFUNC("e1000_setup_link_ich8lan");
4080 
4081 	if (hw->phy.ops.check_reset_block(hw))
4082 		return E1000_SUCCESS;
4083 
4084 	/* ICH parts do not have a word in the NVM to determine
4085 	 * the default flow control setting, so we explicitly
4086 	 * set it to full.
4087 	 */
4088 	if (hw->fc.requested_mode == e1000_fc_default)
4089 		hw->fc.requested_mode = e1000_fc_full;
4090 
4091 	/* Save off the requested flow control mode for use later.  Depending
4092 	 * on the link partner's capabilities, we may or may not use this mode.
4093 	 */
4094 	hw->fc.current_mode = hw->fc.requested_mode;
4095 
4096 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4097 		hw->fc.current_mode);
4098 
4099 	/* Continue to configure the copper link. */
4100 	ret_val = hw->mac.ops.setup_physical_interface(hw);
4101 	if (ret_val)
4102 		return ret_val;
4103 
4104 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4105 	if ((hw->phy.type == e1000_phy_82578) ||
4106 	    (hw->phy.type == e1000_phy_82579) ||
4107 	    (hw->phy.type == e1000_phy_i217) ||
4108 	    (hw->phy.type == e1000_phy_82577)) {
4109 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4110 
4111 		ret_val = hw->phy.ops.write_reg(hw,
4112 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
4113 					     hw->fc.pause_time);
4114 		if (ret_val)
4115 			return ret_val;
4116 	}
4117 
4118 	return e1000_set_fc_watermarks_generic(hw);
4119 }
4120 
4121 /**
4122  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4123  *  @hw: pointer to the HW structure
4124  *
4125  *  Configures the kumeran interface to the PHY to wait the appropriate time
4126  *  when polling the PHY, then call the generic setup_copper_link to finish
4127  *  configuring the copper link.
4128  **/
4129 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4130 {
4131 	u32 ctrl;
4132 	s32 ret_val;
4133 	u16 reg_data;
4134 
4135 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4136 
4137 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4138 	ctrl |= E1000_CTRL_SLU;
4139 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4140 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4141 
4142 	/* Set the mac to wait the maximum time between each iteration
4143 	 * and increase the max iterations when polling the phy;
4144 	 * this fixes erroneous timeouts at 10Mbps.
4145 	 */
4146 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4147 					       0xFFFF);
4148 	if (ret_val)
4149 		return ret_val;
4150 	ret_val = e1000_read_kmrn_reg_generic(hw,
4151 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
4152 					      &reg_data);
4153 	if (ret_val)
4154 		return ret_val;
4155 	reg_data |= 0x3F;
4156 	ret_val = e1000_write_kmrn_reg_generic(hw,
4157 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
4158 					       reg_data);
4159 	if (ret_val)
4160 		return ret_val;
4161 
4162 	switch (hw->phy.type) {
4163 	case e1000_phy_igp_3:
4164 		ret_val = e1000_copper_link_setup_igp(hw);
4165 		if (ret_val)
4166 			return ret_val;
4167 		break;
4168 	case e1000_phy_bm:
4169 	case e1000_phy_82578:
4170 		ret_val = e1000_copper_link_setup_m88(hw);
4171 		if (ret_val)
4172 			return ret_val;
4173 		break;
4174 	case e1000_phy_82577:
4175 	case e1000_phy_82579:
4176 		ret_val = e1000_copper_link_setup_82577(hw);
4177 		if (ret_val)
4178 			return ret_val;
4179 		break;
4180 	case e1000_phy_ife:
4181 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4182 					       &reg_data);
4183 		if (ret_val)
4184 			return ret_val;
4185 
4186 		reg_data &= ~IFE_PMC_AUTO_MDIX;
4187 
4188 		switch (hw->phy.mdix) {
4189 		case 1:
4190 			reg_data &= ~IFE_PMC_FORCE_MDIX;
4191 			break;
4192 		case 2:
4193 			reg_data |= IFE_PMC_FORCE_MDIX;
4194 			break;
4195 		case 0:
4196 		default:
4197 			reg_data |= IFE_PMC_AUTO_MDIX;
4198 			break;
4199 		}
4200 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4201 						reg_data);
4202 		if (ret_val)
4203 			return ret_val;
4204 		break;
4205 	default:
4206 		break;
4207 	}
4208 
4209 	return e1000_setup_copper_link_generic(hw);
4210 }
4211 
4212 /**
4213  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4214  *  @hw: pointer to the HW structure
4215  *
4216  *  Calls the PHY specific link setup function and then calls the
4217  *  generic setup_copper_link to finish configuring the link for
4218  *  Lynxpoint PCH devices
4219  **/
4220 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4221 {
4222 	u32 ctrl;
4223 	s32 ret_val;
4224 
4225 	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4226 
4227 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4228 	ctrl |= E1000_CTRL_SLU;
4229 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4230 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4231 
4232 	ret_val = e1000_copper_link_setup_82577(hw);
4233 	if (ret_val)
4234 		return ret_val;
4235 
4236 	return e1000_setup_copper_link_generic(hw);
4237 }
4238 
4239 /**
4240  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4241  *  @hw: pointer to the HW structure
4242  *  @speed: pointer to store current link speed
4243  *  @duplex: pointer to store the current link duplex
4244  *
4245  *  Calls the generic get_speed_and_duplex to retrieve the current link
4246  *  information and then calls the Kumeran lock loss workaround for links at
4247  *  gigabit speeds.
4248  **/
4249 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4250 					  u16 *duplex)
4251 {
4252 	s32 ret_val;
4253 
4254 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4255 
4256 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4257 	if (ret_val)
4258 		return ret_val;
4259 
4260 	if ((hw->mac.type == e1000_ich8lan) &&
4261 	    (hw->phy.type == e1000_phy_igp_3) &&
4262 	    (*speed == SPEED_1000)) {
4263 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4264 	}
4265 
4266 	return ret_val;
4267 }
4268 
4269 /**
4270  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4271  *  @hw: pointer to the HW structure
4272  *
4273  *  Work-around for 82566 Kumeran PCS lock loss:
4274  *  On link status change (i.e. PCI reset, speed change) and link is up and
4275  *  speed is gigabit-
4276  *    0) if workaround is optionally disabled do nothing
4277  *    1) wait 1ms for Kumeran link to come up
4278  *    2) check Kumeran Diagnostic register PCS lock loss bit
4279  *    3) if not set the link is locked (all is good), otherwise...
4280  *    4) reset the PHY
4281  *    5) repeat up to 10 times
4282  *  Note: this is only called for IGP3 copper when speed is 1gb.
4283  **/
4284 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4285 {
4286 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4287 	u32 phy_ctrl;
4288 	s32 ret_val;
4289 	u16 i, data;
4290 	bool link;
4291 
4292 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4293 
4294 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4295 		return E1000_SUCCESS;
4296 
4297 	/* Make sure link is up before proceeding.  If not just return.
4298 	 * Attempting this while link is negotiating fouled up link
4299 	 * stability
4300 	 */
4301 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4302 	if (!link)
4303 		return E1000_SUCCESS;
4304 
4305 	for (i = 0; i < 10; i++) {
4306 		/* read once to clear */
4307 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4308 		if (ret_val)
4309 			return ret_val;
4310 		/* and again to get new status */
4311 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4312 		if (ret_val)
4313 			return ret_val;
4314 
4315 		/* check for PCS lock */
4316 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4317 			return E1000_SUCCESS;
4318 
4319 		/* Issue PHY reset */
4320 		hw->phy.ops.reset(hw);
4321 		msec_delay_irq(5);
4322 	}
4323 	/* Disable GigE link negotiation */
4324 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4325 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4326 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4327 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4328 
4329 	/* Call gig speed drop workaround on Gig disable before accessing
4330 	 * any PHY registers
4331 	 */
4332 	e1000_gig_downshift_workaround_ich8lan(hw);
4333 
4334 	/* unable to acquire PCS lock */
4335 	return -E1000_ERR_PHY;
4336 }
4337 
4338 /**
4339  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4340  *  @hw: pointer to the HW structure
4341  *  @state: boolean value used to set the current Kumeran workaround state
4342  *
4343  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
4344  *  /disabled - FALSE).
4345  **/
4346 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4347 						 bool state)
4348 {
4349 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4350 
4351 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4352 
4353 	if (hw->mac.type != e1000_ich8lan) {
4354 		DEBUGOUT("Workaround applies to ICH8 only.\n");
4355 		return;
4356 	}
4357 
4358 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
4359 
4360 	return;
4361 }
4362 
4363 /**
4364  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4365  *  @hw: pointer to the HW structure
4366  *
4367  *  Workaround for 82566 power-down on D3 entry:
4368  *    1) disable gigabit link
4369  *    2) write VR power-down enable
4370  *    3) read it back
4371  *  Continue if successful, else issue LCD reset and repeat
4372  **/
4373 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4374 {
4375 	u32 reg;
4376 	u16 data;
4377 	u8  retry = 0;
4378 
4379 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4380 
4381 	if (hw->phy.type != e1000_phy_igp_3)
4382 		return;
4383 
4384 	/* Try the workaround twice (if needed) */
4385 	do {
4386 		/* Disable link */
4387 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4388 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4389 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4390 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4391 
4392 		/* Call gig speed drop workaround on Gig disable before
4393 		 * accessing any PHY registers
4394 		 */
4395 		if (hw->mac.type == e1000_ich8lan)
4396 			e1000_gig_downshift_workaround_ich8lan(hw);
4397 
4398 		/* Write VR power-down enable */
4399 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4400 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4401 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4402 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4403 
4404 		/* Read it back and test */
4405 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4406 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4407 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4408 			break;
4409 
4410 		/* Issue PHY reset and repeat at most one more time */
4411 		reg = E1000_READ_REG(hw, E1000_CTRL);
4412 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4413 		retry++;
4414 	} while (retry);
4415 }
4416 
4417 /**
4418  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4419  *  @hw: pointer to the HW structure
4420  *
4421  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4422  *  LPLU, Gig disable, MDIC PHY reset):
4423  *    1) Set Kumeran Near-end loopback
4424  *    2) Clear Kumeran Near-end loopback
4425  *  Should only be called for ICH8[m] devices with any 1G Phy.
4426  **/
4427 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4428 {
4429 	s32 ret_val;
4430 	u16 reg_data;
4431 
4432 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4433 
4434 	if ((hw->mac.type != e1000_ich8lan) ||
4435 	    (hw->phy.type == e1000_phy_ife))
4436 		return;
4437 
4438 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4439 					      &reg_data);
4440 	if (ret_val)
4441 		return;
4442 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4443 	ret_val = e1000_write_kmrn_reg_generic(hw,
4444 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
4445 					       reg_data);
4446 	if (ret_val)
4447 		return;
4448 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4449 	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4450 				     reg_data);
4451 }
4452 
4453 /**
4454  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4455  *  @hw: pointer to the HW structure
4456  *
4457  *  During S0 to Sx transition, it is possible the link remains at gig
4458  *  instead of negotiating to a lower speed.  Before going to Sx, set
4459  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4460  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4461  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4462  *  needs to be written.
4463  *  Parts that support (and are linked to a partner which support) EEE in
4464  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4465  *  than 10Mbps w/o EEE.
4466  **/
4467 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4468 {
4469 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4470 	u32 phy_ctrl;
4471 	s32 ret_val;
4472 
4473 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4474 
4475 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4476 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4477 
4478 	if (hw->phy.type == e1000_phy_i217) {
4479 		u16 phy_reg, device_id = hw->device_id;
4480 
4481 		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4482 		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4483 			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4484 
4485 			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4486 					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4487 		}
4488 
4489 		ret_val = hw->phy.ops.acquire(hw);
4490 		if (ret_val)
4491 			goto out;
4492 
4493 		if (!dev_spec->eee_disable) {
4494 			u16 eee_advert;
4495 
4496 			ret_val =
4497 			    e1000_read_emi_reg_locked(hw,
4498 						      I217_EEE_ADVERTISEMENT,
4499 						      &eee_advert);
4500 			if (ret_val)
4501 				goto release;
4502 
4503 			/* Disable LPLU if both link partners support 100BaseT
4504 			 * EEE and 100Full is advertised on both ends of the
4505 			 * link.
4506 			 */
4507 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4508 			    (dev_spec->eee_lp_ability &
4509 			     I82579_EEE_100_SUPPORTED) &&
4510 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
4511 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4512 					      E1000_PHY_CTRL_NOND0A_LPLU);
4513 		}
4514 
4515 		/* For i217 Intel Rapid Start Technology support,
4516 		 * when the system is going into Sx and no manageability engine
4517 		 * is present, the driver must configure proxy to reset only on
4518 		 * power good.  LPI (Low Power Idle) state must also reset only
4519 		 * on power good, as well as the MTA (Multicast table array).
4520 		 * The SMBus release must also be disabled on LCD reset.
4521 		 */
4522 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
4523 			E1000_ICH_FWSM_FW_VALID)) {
4524 			/* Enable proxy to reset only on power good. */
4525 			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4526 						    &phy_reg);
4527 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4528 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4529 						     phy_reg);
4530 
4531 			/* Set bit enable LPI (EEE) to reset only on
4532 			 * power good.
4533 			*/
4534 			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4535 			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4536 			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4537 
4538 			/* Disable the SMB release on LCD reset. */
4539 			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4540 			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4541 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4542 		}
4543 
4544 		/* Enable MTA to reset for Intel Rapid Start Technology
4545 		 * Support
4546 		 */
4547 		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4548 		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4549 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4550 
4551 release:
4552 		hw->phy.ops.release(hw);
4553 	}
4554 out:
4555 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4556 
4557 	if (hw->mac.type == e1000_ich8lan)
4558 		e1000_gig_downshift_workaround_ich8lan(hw);
4559 
4560 	if (hw->mac.type >= e1000_pchlan) {
4561 		e1000_oem_bits_config_ich8lan(hw, FALSE);
4562 
4563 		/* Reset PHY to activate OEM bits on 82577/8 */
4564 		if (hw->mac.type == e1000_pchlan)
4565 			e1000_phy_hw_reset_generic(hw);
4566 
4567 		ret_val = hw->phy.ops.acquire(hw);
4568 		if (ret_val)
4569 			return;
4570 		e1000_write_smbus_addr(hw);
4571 		hw->phy.ops.release(hw);
4572 	}
4573 
4574 	return;
4575 }
4576 
4577 /**
4578  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4579  *  @hw: pointer to the HW structure
4580  *
4581  *  During Sx to S0 transitions on non-managed devices or managed devices
4582  *  on which PHY resets are not blocked, if the PHY registers cannot be
4583  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4584  *  the PHY.
4585  *  On i217, setup Intel Rapid Start Technology.
4586  **/
4587 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4588 {
4589 	s32 ret_val;
4590 
4591 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
4592 
4593 	if (hw->mac.type < e1000_pch2lan)
4594 		return;
4595 
4596 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
4597 	if (ret_val) {
4598 		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4599 		return;
4600 	}
4601 
4602 	/* For i217 Intel Rapid Start Technology support when the system
4603 	 * is transitioning from Sx and no manageability engine is present
4604 	 * configure SMBus to restore on reset, disable proxy, and enable
4605 	 * the reset on MTA (Multicast table array).
4606 	 */
4607 	if (hw->phy.type == e1000_phy_i217) {
4608 		u16 phy_reg;
4609 
4610 		ret_val = hw->phy.ops.acquire(hw);
4611 		if (ret_val) {
4612 			DEBUGOUT("Failed to setup iRST\n");
4613 			return;
4614 		}
4615 
4616 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
4617 		    E1000_ICH_FWSM_FW_VALID)) {
4618 			/* Restore clear on SMB if no manageability engine
4619 			 * is present
4620 			 */
4621 			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4622 							      &phy_reg);
4623 			if (ret_val)
4624 				goto release;
4625 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4626 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4627 
4628 			/* Disable Proxy */
4629 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4630 		}
4631 		/* Enable reset on MTA */
4632 		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4633 						      &phy_reg);
4634 		if (ret_val)
4635 			goto release;
4636 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4637 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4638 release:
4639 		if (ret_val)
4640 			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4641 		hw->phy.ops.release(hw);
4642 	}
4643 }
4644 
4645 /**
4646  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
4647  *  @hw: pointer to the HW structure
4648  *
4649  *  Return the LED back to the default configuration.
4650  **/
4651 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4652 {
4653 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
4654 
4655 	if (hw->phy.type == e1000_phy_ife)
4656 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4657 					     0);
4658 
4659 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4660 	return E1000_SUCCESS;
4661 }
4662 
4663 /**
4664  *  e1000_led_on_ich8lan - Turn LEDs on
4665  *  @hw: pointer to the HW structure
4666  *
4667  *  Turn on the LEDs.
4668  **/
4669 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
4670 {
4671 	DEBUGFUNC("e1000_led_on_ich8lan");
4672 
4673 	if (hw->phy.type == e1000_phy_ife)
4674 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4675 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4676 
4677 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
4678 	return E1000_SUCCESS;
4679 }
4680 
4681 /**
4682  *  e1000_led_off_ich8lan - Turn LEDs off
4683  *  @hw: pointer to the HW structure
4684  *
4685  *  Turn off the LEDs.
4686  **/
4687 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
4688 {
4689 	DEBUGFUNC("e1000_led_off_ich8lan");
4690 
4691 	if (hw->phy.type == e1000_phy_ife)
4692 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4693 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
4694 
4695 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
4696 	return E1000_SUCCESS;
4697 }
4698 
4699 /**
4700  *  e1000_setup_led_pchlan - Configures SW controllable LED
4701  *  @hw: pointer to the HW structure
4702  *
4703  *  This prepares the SW controllable LED for use.
4704  **/
4705 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4706 {
4707 	DEBUGFUNC("e1000_setup_led_pchlan");
4708 
4709 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4710 				     (u16)hw->mac.ledctl_mode1);
4711 }
4712 
4713 /**
4714  *  e1000_cleanup_led_pchlan - Restore the default LED operation
4715  *  @hw: pointer to the HW structure
4716  *
4717  *  Return the LED back to the default configuration.
4718  **/
4719 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4720 {
4721 	DEBUGFUNC("e1000_cleanup_led_pchlan");
4722 
4723 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4724 				     (u16)hw->mac.ledctl_default);
4725 }
4726 
4727 /**
4728  *  e1000_led_on_pchlan - Turn LEDs on
4729  *  @hw: pointer to the HW structure
4730  *
4731  *  Turn on the LEDs.
4732  **/
4733 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4734 {
4735 	u16 data = (u16)hw->mac.ledctl_mode2;
4736 	u32 i, led;
4737 
4738 	DEBUGFUNC("e1000_led_on_pchlan");
4739 
4740 	/* If no link, then turn LED on by setting the invert bit
4741 	 * for each LED that's mode is "link_up" in ledctl_mode2.
4742 	 */
4743 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4744 		for (i = 0; i < 3; i++) {
4745 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4746 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4747 			    E1000_LEDCTL_MODE_LINK_UP)
4748 				continue;
4749 			if (led & E1000_PHY_LED0_IVRT)
4750 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4751 			else
4752 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4753 		}
4754 	}
4755 
4756 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4757 }
4758 
4759 /**
4760  *  e1000_led_off_pchlan - Turn LEDs off
4761  *  @hw: pointer to the HW structure
4762  *
4763  *  Turn off the LEDs.
4764  **/
4765 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4766 {
4767 	u16 data = (u16)hw->mac.ledctl_mode1;
4768 	u32 i, led;
4769 
4770 	DEBUGFUNC("e1000_led_off_pchlan");
4771 
4772 	/* If no link, then turn LED off by clearing the invert bit
4773 	 * for each LED that's mode is "link_up" in ledctl_mode1.
4774 	 */
4775 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4776 		for (i = 0; i < 3; i++) {
4777 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4778 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4779 			    E1000_LEDCTL_MODE_LINK_UP)
4780 				continue;
4781 			if (led & E1000_PHY_LED0_IVRT)
4782 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4783 			else
4784 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4785 		}
4786 	}
4787 
4788 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4789 }
4790 
4791 /**
4792  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4793  *  @hw: pointer to the HW structure
4794  *
4795  *  Read appropriate register for the config done bit for completion status
4796  *  and configure the PHY through s/w for EEPROM-less parts.
4797  *
4798  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
4799  *  config done bit, so only an error is logged and continues.  If we were
4800  *  to return with error, EEPROM-less silicon would not be able to be reset
4801  *  or change link.
4802  **/
4803 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4804 {
4805 	s32 ret_val = E1000_SUCCESS;
4806 	u32 bank = 0;
4807 	u32 status;
4808 
4809 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4810 
4811 	e1000_get_cfg_done_generic(hw);
4812 
4813 	/* Wait for indication from h/w that it has completed basic config */
4814 	if (hw->mac.type >= e1000_ich10lan) {
4815 		e1000_lan_init_done_ich8lan(hw);
4816 	} else {
4817 		ret_val = e1000_get_auto_rd_done_generic(hw);
4818 		if (ret_val) {
4819 			/* When auto config read does not complete, do not
4820 			 * return with an error. This can happen in situations
4821 			 * where there is no eeprom and prevents getting link.
4822 			 */
4823 			DEBUGOUT("Auto Read Done did not complete\n");
4824 			ret_val = E1000_SUCCESS;
4825 		}
4826 	}
4827 
4828 	/* Clear PHY Reset Asserted bit */
4829 	status = E1000_READ_REG(hw, E1000_STATUS);
4830 	if (status & E1000_STATUS_PHYRA) {
4831 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4832 	} else {
4833 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4834 	}
4835 
4836 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
4837 	if (hw->mac.type <= e1000_ich9lan) {
4838 		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
4839 		    (hw->phy.type == e1000_phy_igp_3)) {
4840 			e1000_phy_init_script_igp3(hw);
4841 		}
4842 	} else {
4843 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4844 			/* Maybe we should do a basic PHY config */
4845 			DEBUGOUT("EEPROM not present\n");
4846 			ret_val = -E1000_ERR_CONFIG;
4847 		}
4848 	}
4849 
4850 	return ret_val;
4851 }
4852 
4853 /**
4854  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4855  * @hw: pointer to the HW structure
4856  *
4857  * In the case of a PHY power down to save power, or to turn off link during a
4858  * driver unload, or wake on lan is not enabled, remove the link.
4859  **/
4860 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4861 {
4862 	/* If the management interface is not enabled, then power down */
4863 	if (!(hw->mac.ops.check_mng_mode(hw) ||
4864 	      hw->phy.ops.check_reset_block(hw)))
4865 		e1000_power_down_phy_copper(hw);
4866 
4867 	return;
4868 }
4869 
4870 /**
4871  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4872  *  @hw: pointer to the HW structure
4873  *
4874  *  Clears hardware counters specific to the silicon family and calls
4875  *  clear_hw_cntrs_generic to clear all general purpose counters.
4876  **/
4877 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4878 {
4879 	u16 phy_data;
4880 	s32 ret_val;
4881 
4882 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4883 
4884 	e1000_clear_hw_cntrs_base_generic(hw);
4885 
4886 	E1000_READ_REG(hw, E1000_ALGNERRC);
4887 	E1000_READ_REG(hw, E1000_RXERRC);
4888 	E1000_READ_REG(hw, E1000_TNCRS);
4889 	E1000_READ_REG(hw, E1000_CEXTERR);
4890 	E1000_READ_REG(hw, E1000_TSCTC);
4891 	E1000_READ_REG(hw, E1000_TSCTFC);
4892 
4893 	E1000_READ_REG(hw, E1000_MGTPRC);
4894 	E1000_READ_REG(hw, E1000_MGTPDC);
4895 	E1000_READ_REG(hw, E1000_MGTPTC);
4896 
4897 	E1000_READ_REG(hw, E1000_IAC);
4898 	E1000_READ_REG(hw, E1000_ICRXOC);
4899 
4900 	/* Clear PHY statistics registers */
4901 	if ((hw->phy.type == e1000_phy_82578) ||
4902 	    (hw->phy.type == e1000_phy_82579) ||
4903 	    (hw->phy.type == e1000_phy_i217) ||
4904 	    (hw->phy.type == e1000_phy_82577)) {
4905 		ret_val = hw->phy.ops.acquire(hw);
4906 		if (ret_val)
4907 			return;
4908 		ret_val = hw->phy.ops.set_page(hw,
4909 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
4910 		if (ret_val)
4911 			goto release;
4912 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4913 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4914 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4915 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4916 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4917 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4918 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4919 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4920 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4921 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4922 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4923 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4924 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4925 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4926 release:
4927 		hw->phy.ops.release(hw);
4928 	}
4929 }
4930 
4931