xref: /freebsd/sys/dev/e1000/e1000_ich8lan.c (revision 9a14aa017b21c292740c00ee098195cd46642730)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2011, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 /*
36  * 82562G 10/100 Network Connection
37  * 82562G-2 10/100 Network Connection
38  * 82562GT 10/100 Network Connection
39  * 82562GT-2 10/100 Network Connection
40  * 82562V 10/100 Network Connection
41  * 82562V-2 10/100 Network Connection
42  * 82566DC-2 Gigabit Network Connection
43  * 82566DC Gigabit Network Connection
44  * 82566DM-2 Gigabit Network Connection
45  * 82566DM Gigabit Network Connection
46  * 82566MC Gigabit Network Connection
47  * 82566MM Gigabit Network Connection
48  * 82567LM Gigabit Network Connection
49  * 82567LF Gigabit Network Connection
50  * 82567V Gigabit Network Connection
51  * 82567LM-2 Gigabit Network Connection
52  * 82567LF-2 Gigabit Network Connection
53  * 82567V-2 Gigabit Network Connection
54  * 82567LF-3 Gigabit Network Connection
55  * 82567LM-3 Gigabit Network Connection
56  * 82567LM-4 Gigabit Network Connection
57  * 82577LM Gigabit Network Connection
58  * 82577LC Gigabit Network Connection
59  * 82578DM Gigabit Network Connection
60  * 82578DC Gigabit Network Connection
61  * 82579LM Gigabit Network Connection
62  * 82579V Gigabit Network Connection
63  */
64 
65 #include "e1000_api.h"
66 
67 static s32  e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
69 static s32  e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
70 static s32  e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
71 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
73 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
74 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
75 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
76 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
77 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
78 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
79 					      u8 *mc_addr_list,
80 					      u32 mc_addr_count);
81 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
82 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
83 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
84 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
85 					    bool active);
86 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
87 					    bool active);
88 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
89 				   u16 words, u16 *data);
90 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
91 				    u16 words, u16 *data);
92 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
93 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
94 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
95 					    u16 *data);
96 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
97 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
98 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
99 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
101 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
102 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
103 					   u16 *speed, u16 *duplex);
104 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
105 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
108 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
109 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
110 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
111 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
112 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
113 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
114 static s32  e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
115 static s32  e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
116 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
117 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
118 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
119 					  u32 offset, u8 *data);
120 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
121 					  u8 size, u16 *data);
122 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
123 					  u32 offset, u16 *data);
124 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
125 						 u32 offset, u8 byte);
126 static s32  e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
127 					   u32 offset, u8 data);
128 static s32  e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
129 					   u8 size, u16 data);
130 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
131 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
132 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
133 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
134 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
135 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138 #if defined(NAHUM6_HW) && (defined(LTR_SUPPORT) || defined(OBFF_SUPPORT))
139 
140 #endif /* NAHUM6_HW && (LTR_SUPPORT || OBFF_SUPPORT) */
141 
142 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
143 /* Offset 04h HSFSTS */
144 union ich8_hws_flash_status {
145 	struct ich8_hsfsts {
146 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
147 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
148 		u16 dael:1; /* bit 2 Direct Access error Log */
149 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
150 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
151 		u16 reserved1:2; /* bit 13:6 Reserved */
152 		u16 reserved2:6; /* bit 13:6 Reserved */
153 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
154 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
155 	} hsf_status;
156 	u16 regval;
157 };
158 
159 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
160 /* Offset 06h FLCTL */
161 union ich8_hws_flash_ctrl {
162 	struct ich8_hsflctl {
163 		u16 flcgo:1;   /* 0 Flash Cycle Go */
164 		u16 flcycle:2;   /* 2:1 Flash Cycle */
165 		u16 reserved:5;   /* 7:3 Reserved  */
166 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
167 		u16 flockdn:6;   /* 15:10 Reserved */
168 	} hsf_ctrl;
169 	u16 regval;
170 };
171 
172 /* ICH Flash Region Access Permissions */
173 union ich8_hws_flash_regacc {
174 	struct ich8_flracc {
175 		u32 grra:8; /* 0:7 GbE region Read Access */
176 		u32 grwa:8; /* 8:15 GbE region Write Access */
177 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
178 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
179 	} hsf_flregacc;
180 	u16 regval;
181 };
182 
183 static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
184 {
185 	u32 ctrl;
186 
187 	DEBUGFUNC("e1000_toggle_lanphypc_value_ich8lan");
188 
189 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
190 	ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
191 	ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
192 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
193 	E1000_WRITE_FLUSH(hw);
194 	usec_delay(10);
195 	ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
196 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
197 }
198 
199 /**
200  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
201  *  @hw: pointer to the HW structure
202  *
203  *  Initialize family-specific PHY parameters and function pointers.
204  **/
205 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
206 {
207 	struct e1000_phy_info *phy = &hw->phy;
208 	s32 ret_val = E1000_SUCCESS;
209 
210 	DEBUGFUNC("e1000_init_phy_params_pchlan");
211 
212 	phy->addr		= 1;
213 	phy->reset_delay_us	= 100;
214 
215 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
216 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
217 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
218 	phy->ops.set_page	= e1000_set_page_igp;
219 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
220 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
221 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
222 	phy->ops.release	= e1000_release_swflag_ich8lan;
223 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
224 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
225 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
226 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
227 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
228 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
229 	phy->ops.power_up	= e1000_power_up_phy_copper;
230 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
231 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
232 
233 	if (!hw->phy.ops.check_reset_block(hw)) {
234 		u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
235 
236 		/*
237 		 * The MAC-PHY interconnect may still be in SMBus mode after
238 		 * Sx->S0.  If resetting the PHY is not blocked, toggle the
239 		 * LANPHYPC Value bit to force the interconnect to PCIe mode.
240 		 */
241 		e1000_toggle_lanphypc_value_ich8lan(hw);
242 		msec_delay(50);
243 
244 		/*
245 		 * Gate automatic PHY configuration by hardware on
246 		 * non-managed 82579
247 		 */
248 		if ((hw->mac.type == e1000_pch2lan) &&
249 		    !(fwsm & E1000_ICH_FWSM_FW_VALID))
250 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
251 
252 		/*
253 		 * Reset the PHY before any access to it.  Doing so, ensures
254 		 * that the PHY is in a known good state before we read/write
255 		 * PHY registers.  The generic reset is sufficient here,
256 		 * because we haven't determined the PHY type yet.
257 		 */
258 		ret_val = e1000_phy_hw_reset_generic(hw);
259 		if (ret_val)
260 			goto out;
261 
262 		/* Ungate automatic PHY configuration on non-managed 82579 */
263 		if ((hw->mac.type == e1000_pch2lan) &&
264 		    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
265 			msec_delay(10);
266 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
267 		}
268 	}
269 
270 	phy->id = e1000_phy_unknown;
271 	switch (hw->mac.type) {
272 	default:
273 		ret_val = e1000_get_phy_id(hw);
274 		if (ret_val)
275 			goto out;
276 		if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
277 			break;
278 		/* fall-through */
279 	case e1000_pch2lan:
280 		/*
281 		 * In case the PHY needs to be in mdio slow mode,
282 		 * set slow mode and try to get the PHY id again.
283 		 */
284 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
285 		if (ret_val)
286 			goto out;
287 		ret_val = e1000_get_phy_id(hw);
288 		if (ret_val)
289 			goto out;
290 		break;
291 	}
292 	phy->type = e1000_get_phy_type_from_id(phy->id);
293 
294 	switch (phy->type) {
295 	case e1000_phy_82577:
296 	case e1000_phy_82579:
297 		phy->ops.check_polarity = e1000_check_polarity_82577;
298 		phy->ops.force_speed_duplex =
299 			e1000_phy_force_speed_duplex_82577;
300 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
301 		phy->ops.get_info = e1000_get_phy_info_82577;
302 		phy->ops.commit = e1000_phy_sw_reset_generic;
303 		break;
304 	case e1000_phy_82578:
305 		phy->ops.check_polarity = e1000_check_polarity_m88;
306 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
307 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
308 		phy->ops.get_info = e1000_get_phy_info_m88;
309 		break;
310 	default:
311 		ret_val = -E1000_ERR_PHY;
312 		break;
313 	}
314 
315 out:
316 	return ret_val;
317 }
318 
319 /**
320  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
321  *  @hw: pointer to the HW structure
322  *
323  *  Initialize family-specific PHY parameters and function pointers.
324  **/
325 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
326 {
327 	struct e1000_phy_info *phy = &hw->phy;
328 	s32 ret_val = E1000_SUCCESS;
329 	u16 i = 0;
330 
331 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
332 
333 	phy->addr		= 1;
334 	phy->reset_delay_us	= 100;
335 
336 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
337 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
338 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
339 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
340 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
341 	phy->ops.release	= e1000_release_swflag_ich8lan;
342 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
343 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
344 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
345 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
346 	phy->ops.power_up	= e1000_power_up_phy_copper;
347 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
348 
349 	/*
350 	 * We may need to do this twice - once for IGP and if that fails,
351 	 * we'll set BM func pointers and try again
352 	 */
353 	ret_val = e1000_determine_phy_address(hw);
354 	if (ret_val) {
355 		phy->ops.write_reg = e1000_write_phy_reg_bm;
356 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
357 		ret_val = e1000_determine_phy_address(hw);
358 		if (ret_val) {
359 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
360 			goto out;
361 		}
362 	}
363 
364 	phy->id = 0;
365 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
366 	       (i++ < 100)) {
367 		msec_delay(1);
368 		ret_val = e1000_get_phy_id(hw);
369 		if (ret_val)
370 			goto out;
371 	}
372 
373 	/* Verify phy id */
374 	switch (phy->id) {
375 	case IGP03E1000_E_PHY_ID:
376 		phy->type = e1000_phy_igp_3;
377 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
378 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
379 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
380 		phy->ops.get_info = e1000_get_phy_info_igp;
381 		phy->ops.check_polarity = e1000_check_polarity_igp;
382 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
383 		break;
384 	case IFE_E_PHY_ID:
385 	case IFE_PLUS_E_PHY_ID:
386 	case IFE_C_E_PHY_ID:
387 		phy->type = e1000_phy_ife;
388 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
389 		phy->ops.get_info = e1000_get_phy_info_ife;
390 		phy->ops.check_polarity = e1000_check_polarity_ife;
391 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
392 		break;
393 	case BME1000_E_PHY_ID:
394 		phy->type = e1000_phy_bm;
395 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
396 		phy->ops.read_reg = e1000_read_phy_reg_bm;
397 		phy->ops.write_reg = e1000_write_phy_reg_bm;
398 		phy->ops.commit = e1000_phy_sw_reset_generic;
399 		phy->ops.get_info = e1000_get_phy_info_m88;
400 		phy->ops.check_polarity = e1000_check_polarity_m88;
401 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
402 		break;
403 	default:
404 		ret_val = -E1000_ERR_PHY;
405 		goto out;
406 	}
407 
408 out:
409 	return ret_val;
410 }
411 
412 /**
413  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
414  *  @hw: pointer to the HW structure
415  *
416  *  Initialize family-specific NVM parameters and function
417  *  pointers.
418  **/
419 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
420 {
421 	struct e1000_nvm_info *nvm = &hw->nvm;
422 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
423 	u32 gfpreg, sector_base_addr, sector_end_addr;
424 	s32 ret_val = E1000_SUCCESS;
425 	u16 i;
426 
427 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
428 
429 	/* Can't read flash registers if the register set isn't mapped. */
430 	if (!hw->flash_address) {
431 		DEBUGOUT("ERROR: Flash registers not mapped\n");
432 		ret_val = -E1000_ERR_CONFIG;
433 		goto out;
434 	}
435 
436 	nvm->type = e1000_nvm_flash_sw;
437 
438 	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
439 
440 	/*
441 	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
442 	 * Add 1 to sector_end_addr since this sector is included in
443 	 * the overall size.
444 	 */
445 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
446 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
447 
448 	/* flash_base_addr is byte-aligned */
449 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
450 
451 	/*
452 	 * find total size of the NVM, then cut in half since the total
453 	 * size represents two separate NVM banks.
454 	 */
455 	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
456 				<< FLASH_SECTOR_ADDR_SHIFT;
457 	nvm->flash_bank_size /= 2;
458 	/* Adjust to word count */
459 	nvm->flash_bank_size /= sizeof(u16);
460 
461 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
462 
463 	/* Clear shadow ram */
464 	for (i = 0; i < nvm->word_size; i++) {
465 		dev_spec->shadow_ram[i].modified = FALSE;
466 		dev_spec->shadow_ram[i].value    = 0xFFFF;
467 	}
468 
469 	E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
470 	E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
471 
472 	/* Function Pointers */
473 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
474 	nvm->ops.release	= e1000_release_nvm_ich8lan;
475 	nvm->ops.read		= e1000_read_nvm_ich8lan;
476 	nvm->ops.update		= e1000_update_nvm_checksum_ich8lan;
477 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
478 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
479 	nvm->ops.write		= e1000_write_nvm_ich8lan;
480 
481 out:
482 	return ret_val;
483 }
484 
485 /**
486  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
487  *  @hw: pointer to the HW structure
488  *
489  *  Initialize family-specific MAC parameters and function
490  *  pointers.
491  **/
492 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
493 {
494 	struct e1000_mac_info *mac = &hw->mac;
495 
496 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
497 
498 	/* Set media type function pointer */
499 	hw->phy.media_type = e1000_media_type_copper;
500 
501 	/* Set mta register count */
502 	mac->mta_reg_count = 32;
503 	/* Set rar entry count */
504 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
505 	if (mac->type == e1000_ich8lan)
506 		mac->rar_entry_count--;
507 	/* Set if part includes ASF firmware */
508 	mac->asf_firmware_present = TRUE;
509 	/* FWSM register */
510 	mac->has_fwsm = TRUE;
511 	/* ARC subsystem not supported */
512 	mac->arc_subsystem_valid = FALSE;
513 	/* Adaptive IFS supported */
514 	mac->adaptive_ifs = TRUE;
515 
516 	/* Function pointers */
517 
518 	/* bus type/speed/width */
519 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
520 	/* function id */
521 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
522 	/* reset */
523 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
524 	/* hw initialization */
525 	mac->ops.init_hw = e1000_init_hw_ich8lan;
526 	/* link setup */
527 	mac->ops.setup_link = e1000_setup_link_ich8lan;
528 	/* physical interface setup */
529 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
530 	/* check for link */
531 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
532 	/* link info */
533 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
534 	/* multicast address update */
535 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
536 	/* clear hardware counters */
537 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
538 
539 	/* LED operations */
540 	switch (mac->type) {
541 	case e1000_ich8lan:
542 	case e1000_ich9lan:
543 	case e1000_ich10lan:
544 		/* check management mode */
545 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
546 		/* ID LED init */
547 		mac->ops.id_led_init = e1000_id_led_init_generic;
548 		/* blink LED */
549 		mac->ops.blink_led = e1000_blink_led_generic;
550 		/* setup LED */
551 		mac->ops.setup_led = e1000_setup_led_generic;
552 		/* cleanup LED */
553 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
554 		/* turn on/off LED */
555 		mac->ops.led_on = e1000_led_on_ich8lan;
556 		mac->ops.led_off = e1000_led_off_ich8lan;
557 		break;
558 	case e1000_pch2lan:
559 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
560 		mac->ops.rar_set = e1000_rar_set_pch2lan;
561 		/* multicast address update for pch2 */
562 		mac->ops.update_mc_addr_list =
563 			e1000_update_mc_addr_list_pch2lan;
564 		/* fall-through */
565 	case e1000_pchlan:
566 		/* check management mode */
567 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
568 		/* ID LED init */
569 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
570 		/* setup LED */
571 		mac->ops.setup_led = e1000_setup_led_pchlan;
572 		/* cleanup LED */
573 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
574 		/* turn on/off LED */
575 		mac->ops.led_on = e1000_led_on_pchlan;
576 		mac->ops.led_off = e1000_led_off_pchlan;
577 		break;
578 	default:
579 		break;
580 	}
581 
582 #if defined(NAHUM6_HW) && (defined(LTR_SUPPORT) || defined(OBFF_SUPPORT))
583 	if (mac->type == e1000_pch_lpt) {
584 	}
585 
586 #endif /* NAHUM6_HW && (LTR_SUPPORT || OBFF_SUPPORT) */
587 	/* Enable PCS Lock-loss workaround for ICH8 */
588 	if (mac->type == e1000_ich8lan)
589 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
590 
591 	/* Gate automatic PHY configuration by hardware on managed 82579 */
592 	if ((mac->type == e1000_pch2lan) &&
593 	    (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
594 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
595 
596 	return E1000_SUCCESS;
597 }
598 
599 /**
600  *  e1000_set_eee_pchlan - Enable/disable EEE support
601  *  @hw: pointer to the HW structure
602  *
603  *  Enable/disable EEE based on setting in dev_spec structure.  The bits in
604  *  the LPI Control register will remain set only if/when link is up.
605  **/
606 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
607 {
608 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
609 	s32 ret_val = E1000_SUCCESS;
610 	u16 phy_reg;
611 
612 	DEBUGFUNC("e1000_set_eee_pchlan");
613 
614 	if (hw->phy.type != e1000_phy_82579)
615 		goto out;
616 
617 	ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
618 	if (ret_val)
619 		goto out;
620 
621 	if (dev_spec->eee_disable)
622 		phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
623 	else
624 		phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
625 
626 	ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
627 out:
628 	return ret_val;
629 }
630 
631 /**
632  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
633  *  @hw: pointer to the HW structure
634  *
635  *  Checks to see of the link status of the hardware has changed.  If a
636  *  change in link status has been detected, then we read the PHY registers
637  *  to get the current speed/duplex if link exists.
638  **/
639 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
640 {
641 	struct e1000_mac_info *mac = &hw->mac;
642 	s32 ret_val;
643 	bool link;
644 	u16 phy_reg;
645 
646 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
647 
648 	/*
649 	 * We only want to go out to the PHY registers to see if Auto-Neg
650 	 * has completed and/or if our link status has changed.  The
651 	 * get_link_status flag is set upon receiving a Link Status
652 	 * Change or Rx Sequence Error interrupt.
653 	 */
654 	if (!mac->get_link_status) {
655 		ret_val = E1000_SUCCESS;
656 		goto out;
657 	}
658 
659 	/*
660 	 * First we want to see if the MII Status Register reports
661 	 * link.  If so, then we want to get the current speed/duplex
662 	 * of the PHY.
663 	 */
664 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
665 	if (ret_val)
666 		goto out;
667 
668 	if (hw->mac.type == e1000_pchlan) {
669 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
670 		if (ret_val)
671 			goto out;
672 	}
673 
674 #if defined(NAHUM6_HW) && (defined(LTR_SUPPORT) || defined(OBFF_SUPPORT))
675 	if (hw->mac.type == e1000_pch_lpt) {
676 	}
677 
678 #endif /* NAHUM6_HW && (LTR_SUPPORT || OBFF_SUPPORT) */
679 	if (!link)
680 		goto out; /* No link detected */
681 
682 	mac->get_link_status = FALSE;
683 
684 	switch (hw->mac.type) {
685 	case e1000_pch2lan:
686 		ret_val = e1000_k1_workaround_lv(hw);
687 		if (ret_val)
688 			goto out;
689 		/* fall-thru */
690 	case e1000_pchlan:
691 		if (hw->phy.type == e1000_phy_82578) {
692 			ret_val = e1000_link_stall_workaround_hv(hw);
693 			if (ret_val)
694 				goto out;
695 		}
696 
697 		/*
698 		 * Workaround for PCHx parts in half-duplex:
699 		 * Set the number of preambles removed from the packet
700 		 * when it is passed from the PHY to the MAC to prevent
701 		 * the MAC from misinterpreting the packet type.
702 		 */
703 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
704 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
705 
706 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
707 		    E1000_STATUS_FD)
708 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
709 
710 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
711 		break;
712 	default:
713 		break;
714 	}
715 
716 	/*
717 	 * Check if there was DownShift, must be checked
718 	 * immediately after link-up
719 	 */
720 	e1000_check_downshift_generic(hw);
721 
722 	/* Enable/Disable EEE after link up */
723 	ret_val = e1000_set_eee_pchlan(hw);
724 	if (ret_val)
725 		goto out;
726 
727 	/*
728 	 * If we are forcing speed/duplex, then we simply return since
729 	 * we have already determined whether we have link or not.
730 	 */
731 	if (!mac->autoneg) {
732 		ret_val = -E1000_ERR_CONFIG;
733 		goto out;
734 	}
735 
736 	/*
737 	 * Auto-Neg is enabled.  Auto Speed Detection takes care
738 	 * of MAC speed/duplex configuration.  So we only need to
739 	 * configure Collision Distance in the MAC.
740 	 */
741 	e1000_config_collision_dist_generic(hw);
742 
743 	/*
744 	 * Configure Flow Control now that Auto-Neg has completed.
745 	 * First, we need to restore the desired flow control
746 	 * settings because we may have had to re-autoneg with a
747 	 * different link partner.
748 	 */
749 	ret_val = e1000_config_fc_after_link_up_generic(hw);
750 	if (ret_val)
751 		DEBUGOUT("Error configuring flow control\n");
752 
753 out:
754 	return ret_val;
755 }
756 
757 /**
758  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
759  *  @hw: pointer to the HW structure
760  *
761  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
762  **/
763 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
764 {
765 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
766 
767 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
768 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
769 	switch (hw->mac.type) {
770 	case e1000_ich8lan:
771 	case e1000_ich9lan:
772 	case e1000_ich10lan:
773 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
774 		break;
775 	case e1000_pchlan:
776 	case e1000_pch2lan:
777 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
778 		break;
779 	default:
780 		break;
781 	}
782 }
783 
784 /**
785  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
786  *  @hw: pointer to the HW structure
787  *
788  *  Acquires the mutex for performing NVM operations.
789  **/
790 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
791 {
792 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
793 
794 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
795 
796 	return E1000_SUCCESS;
797 }
798 
799 /**
800  *  e1000_release_nvm_ich8lan - Release NVM mutex
801  *  @hw: pointer to the HW structure
802  *
803  *  Releases the mutex used while performing NVM operations.
804  **/
805 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
806 {
807 	DEBUGFUNC("e1000_release_nvm_ich8lan");
808 
809 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
810 
811 	return;
812 }
813 
814 /**
815  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
816  *  @hw: pointer to the HW structure
817  *
818  *  Acquires the software control flag for performing PHY and select
819  *  MAC CSR accesses.
820  **/
821 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
822 {
823 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
824 	s32 ret_val = E1000_SUCCESS;
825 
826 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
827 
828 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
829 
830 	while (timeout) {
831 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
832 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
833 			break;
834 
835 		msec_delay_irq(1);
836 		timeout--;
837 	}
838 
839 	if (!timeout) {
840 		DEBUGOUT("SW has already locked the resource.\n");
841 		ret_val = -E1000_ERR_CONFIG;
842 		goto out;
843 	}
844 
845 	timeout = SW_FLAG_TIMEOUT;
846 
847 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
848 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
849 
850 	while (timeout) {
851 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
852 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
853 			break;
854 
855 		msec_delay_irq(1);
856 		timeout--;
857 	}
858 
859 	if (!timeout) {
860 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
861 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
862 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
863 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
864 		ret_val = -E1000_ERR_CONFIG;
865 		goto out;
866 	}
867 
868 out:
869 	if (ret_val)
870 		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
871 
872 	return ret_val;
873 }
874 
875 /**
876  *  e1000_release_swflag_ich8lan - Release software control flag
877  *  @hw: pointer to the HW structure
878  *
879  *  Releases the software control flag for performing PHY and select
880  *  MAC CSR accesses.
881  **/
882 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
883 {
884 	u32 extcnf_ctrl;
885 
886 	DEBUGFUNC("e1000_release_swflag_ich8lan");
887 
888 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
889 
890 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
891 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
892 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
893 	} else {
894 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
895 	}
896 
897 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
898 
899 	return;
900 }
901 
902 /**
903  *  e1000_check_mng_mode_ich8lan - Checks management mode
904  *  @hw: pointer to the HW structure
905  *
906  *  This checks if the adapter has any manageability enabled.
907  *  This is a function pointer entry point only called by read/write
908  *  routines for the PHY and NVM parts.
909  **/
910 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
911 {
912 	u32 fwsm;
913 
914 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
915 
916 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
917 
918 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
919 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
920 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
921 }
922 
923 /**
924  *  e1000_check_mng_mode_pchlan - Checks management mode
925  *  @hw: pointer to the HW structure
926  *
927  *  This checks if the adapter has iAMT enabled.
928  *  This is a function pointer entry point only called by read/write
929  *  routines for the PHY and NVM parts.
930  **/
931 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
932 {
933 	u32 fwsm;
934 
935 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
936 
937 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
938 
939 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
940 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
941 }
942 
943 /**
944  *  e1000_rar_set_pch2lan - Set receive address register
945  *  @hw: pointer to the HW structure
946  *  @addr: pointer to the receive address
947  *  @index: receive address array register
948  *
949  *  Sets the receive address array register at index to the address passed
950  *  in by addr.  For 82579, RAR[0] is the base address register that is to
951  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
952  *  Use SHRA[0-3] in place of those reserved for ME.
953  **/
954 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
955 {
956 	u32 rar_low, rar_high;
957 
958 	DEBUGFUNC("e1000_rar_set_pch2lan");
959 
960 	/*
961 	 * HW expects these in little endian so we reverse the byte order
962 	 * from network order (big endian) to little endian
963 	 */
964 	rar_low = ((u32) addr[0] |
965 		   ((u32) addr[1] << 8) |
966 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
967 
968 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
969 
970 	/* If MAC address zero, no need to set the AV bit */
971 	if (rar_low || rar_high)
972 		rar_high |= E1000_RAH_AV;
973 
974 	if (index == 0) {
975 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
976 		E1000_WRITE_FLUSH(hw);
977 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
978 		E1000_WRITE_FLUSH(hw);
979 		return;
980 	}
981 
982 	if (index < hw->mac.rar_entry_count) {
983 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
984 		E1000_WRITE_FLUSH(hw);
985 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
986 		E1000_WRITE_FLUSH(hw);
987 
988 		/* verify the register updates */
989 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
990 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
991 			return;
992 
993 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
994 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
995 	}
996 
997 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
998 }
999 
1000 /**
1001  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1002  *  @hw: pointer to the HW structure
1003  *  @mc_addr_list: array of multicast addresses to program
1004  *  @mc_addr_count: number of multicast addresses to program
1005  *
1006  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1007  *  The caller must have a packed mc_addr_list of multicast addresses.
1008  **/
1009 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1010 					      u8 *mc_addr_list,
1011 					      u32 mc_addr_count)
1012 {
1013 	u16 phy_reg = 0;
1014 	int i;
1015 	s32 ret_val;
1016 
1017 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1018 
1019 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1020 
1021 	ret_val = hw->phy.ops.acquire(hw);
1022 	if (ret_val)
1023 		return;
1024 
1025 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1026 	if (ret_val)
1027 		goto release;
1028 
1029 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
1030 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1031 					   (u16)(hw->mac.mta_shadow[i] &
1032 						 0xFFFF));
1033 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1034 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
1035 						 0xFFFF));
1036 	}
1037 
1038 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1039 
1040 release:
1041 	hw->phy.ops.release(hw);
1042 }
1043 
1044 /**
1045  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1046  *  @hw: pointer to the HW structure
1047  *
1048  *  Checks if firmware is blocking the reset of the PHY.
1049  *  This is a function pointer entry point only called by
1050  *  reset routines.
1051  **/
1052 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1053 {
1054 	u32 fwsm;
1055 
1056 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
1057 
1058 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1059 
1060 	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1061 						: E1000_BLK_PHY_RESET;
1062 }
1063 
1064 /**
1065  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1066  *  @hw: pointer to the HW structure
1067  *
1068  *  Assumes semaphore already acquired.
1069  *
1070  **/
1071 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1072 {
1073 	u16 phy_data;
1074 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1075 	s32 ret_val = E1000_SUCCESS;
1076 
1077 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1078 
1079 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1080 	if (ret_val)
1081 		goto out;
1082 
1083 	phy_data &= ~HV_SMB_ADDR_MASK;
1084 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1085 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1086 	ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1087 
1088 out:
1089 	return ret_val;
1090 }
1091 
1092 /**
1093  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1094  *  @hw:   pointer to the HW structure
1095  *
1096  *  SW should configure the LCD from the NVM extended configuration region
1097  *  as a workaround for certain parts.
1098  **/
1099 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1100 {
1101 	struct e1000_phy_info *phy = &hw->phy;
1102 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1103 	s32 ret_val = E1000_SUCCESS;
1104 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
1105 
1106 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1107 
1108 	/*
1109 	 * Initialize the PHY from the NVM on ICH platforms.  This
1110 	 * is needed due to an issue where the NVM configuration is
1111 	 * not properly autoloaded after power transitions.
1112 	 * Therefore, after each PHY reset, we will load the
1113 	 * configuration data out of the NVM manually.
1114 	 */
1115 	switch (hw->mac.type) {
1116 	case e1000_ich8lan:
1117 		if (phy->type != e1000_phy_igp_3)
1118 			return ret_val;
1119 
1120 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1121 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1122 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1123 			break;
1124 		}
1125 		/* Fall-thru */
1126 	case e1000_pchlan:
1127 	case e1000_pch2lan:
1128 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1129 		break;
1130 	default:
1131 		return ret_val;
1132 	}
1133 
1134 	ret_val = hw->phy.ops.acquire(hw);
1135 	if (ret_val)
1136 		return ret_val;
1137 
1138 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
1139 	if (!(data & sw_cfg_mask))
1140 		goto out;
1141 
1142 	/*
1143 	 * Make sure HW does not configure LCD from PHY
1144 	 * extended configuration before SW configuration
1145 	 */
1146 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1147 	if (!(hw->mac.type == e1000_pch2lan)) {
1148 		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1149 			goto out;
1150 	}
1151 
1152 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1153 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1154 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1155 	if (!cnf_size)
1156 		goto out;
1157 
1158 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1159 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1160 
1161 	if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1162 	    (hw->mac.type == e1000_pchlan)) ||
1163 	     (hw->mac.type == e1000_pch2lan)) {
1164 		/*
1165 		 * HW configures the SMBus address and LEDs when the
1166 		 * OEM and LCD Write Enable bits are set in the NVM.
1167 		 * When both NVM bits are cleared, SW will configure
1168 		 * them instead.
1169 		 */
1170 		ret_val = e1000_write_smbus_addr(hw);
1171 		if (ret_val)
1172 			goto out;
1173 
1174 		data = E1000_READ_REG(hw, E1000_LEDCTL);
1175 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1176 							(u16)data);
1177 		if (ret_val)
1178 			goto out;
1179 	}
1180 
1181 	/* Configure LCD from extended configuration region. */
1182 
1183 	/* cnf_base_addr is in DWORD */
1184 	word_addr = (u16)(cnf_base_addr << 1);
1185 
1186 	for (i = 0; i < cnf_size; i++) {
1187 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1188 					   &reg_data);
1189 		if (ret_val)
1190 			goto out;
1191 
1192 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1193 					   1, &reg_addr);
1194 		if (ret_val)
1195 			goto out;
1196 
1197 		/* Save off the PHY page for future writes. */
1198 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1199 			phy_page = reg_data;
1200 			continue;
1201 		}
1202 
1203 		reg_addr &= PHY_REG_MASK;
1204 		reg_addr |= phy_page;
1205 
1206 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1207 						    reg_data);
1208 		if (ret_val)
1209 			goto out;
1210 	}
1211 
1212 out:
1213 	hw->phy.ops.release(hw);
1214 	return ret_val;
1215 }
1216 
1217 /**
1218  *  e1000_k1_gig_workaround_hv - K1 Si workaround
1219  *  @hw:   pointer to the HW structure
1220  *  @link: link up bool flag
1221  *
1222  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1223  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
1224  *  If link is down, the function will restore the default K1 setting located
1225  *  in the NVM.
1226  **/
1227 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1228 {
1229 	s32 ret_val = E1000_SUCCESS;
1230 	u16 status_reg = 0;
1231 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1232 
1233 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
1234 
1235 	if (hw->mac.type != e1000_pchlan)
1236 		goto out;
1237 
1238 	/* Wrap the whole flow with the sw flag */
1239 	ret_val = hw->phy.ops.acquire(hw);
1240 	if (ret_val)
1241 		goto out;
1242 
1243 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1244 	if (link) {
1245 		if (hw->phy.type == e1000_phy_82578) {
1246 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1247 							      &status_reg);
1248 			if (ret_val)
1249 				goto release;
1250 
1251 			status_reg &= BM_CS_STATUS_LINK_UP |
1252 				      BM_CS_STATUS_RESOLVED |
1253 				      BM_CS_STATUS_SPEED_MASK;
1254 
1255 			if (status_reg == (BM_CS_STATUS_LINK_UP |
1256 					   BM_CS_STATUS_RESOLVED |
1257 					   BM_CS_STATUS_SPEED_1000))
1258 				k1_enable = FALSE;
1259 		}
1260 
1261 		if (hw->phy.type == e1000_phy_82577) {
1262 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1263 							      &status_reg);
1264 			if (ret_val)
1265 				goto release;
1266 
1267 			status_reg &= HV_M_STATUS_LINK_UP |
1268 				      HV_M_STATUS_AUTONEG_COMPLETE |
1269 				      HV_M_STATUS_SPEED_MASK;
1270 
1271 			if (status_reg == (HV_M_STATUS_LINK_UP |
1272 					   HV_M_STATUS_AUTONEG_COMPLETE |
1273 					   HV_M_STATUS_SPEED_1000))
1274 				k1_enable = FALSE;
1275 		}
1276 
1277 		/* Link stall fix for link up */
1278 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1279 						       0x0100);
1280 		if (ret_val)
1281 			goto release;
1282 
1283 	} else {
1284 		/* Link stall fix for link down */
1285 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1286 						       0x4100);
1287 		if (ret_val)
1288 			goto release;
1289 	}
1290 
1291 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1292 
1293 release:
1294 	hw->phy.ops.release(hw);
1295 out:
1296 	return ret_val;
1297 }
1298 
1299 /**
1300  *  e1000_configure_k1_ich8lan - Configure K1 power state
1301  *  @hw: pointer to the HW structure
1302  *  @enable: K1 state to configure
1303  *
1304  *  Configure the K1 power state based on the provided parameter.
1305  *  Assumes semaphore already acquired.
1306  *
1307  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1308  **/
1309 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1310 {
1311 	s32 ret_val = E1000_SUCCESS;
1312 	u32 ctrl_reg = 0;
1313 	u32 ctrl_ext = 0;
1314 	u32 reg = 0;
1315 	u16 kmrn_reg = 0;
1316 
1317 	DEBUGFUNC("e1000_configure_k1_ich8lan");
1318 
1319 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1320 					     &kmrn_reg);
1321 	if (ret_val)
1322 		goto out;
1323 
1324 	if (k1_enable)
1325 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1326 	else
1327 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1328 
1329 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1330 					      kmrn_reg);
1331 	if (ret_val)
1332 		goto out;
1333 
1334 	usec_delay(20);
1335 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1336 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1337 
1338 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1339 	reg |= E1000_CTRL_FRCSPD;
1340 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
1341 
1342 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1343 	E1000_WRITE_FLUSH(hw);
1344 	usec_delay(20);
1345 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1346 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1347 	E1000_WRITE_FLUSH(hw);
1348 	usec_delay(20);
1349 
1350 out:
1351 	return ret_val;
1352 }
1353 
1354 /**
1355  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1356  *  @hw:       pointer to the HW structure
1357  *  @d0_state: boolean if entering d0 or d3 device state
1358  *
1359  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1360  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
1361  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
1362  **/
1363 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1364 {
1365 	s32 ret_val = 0;
1366 	u32 mac_reg;
1367 	u16 oem_reg;
1368 
1369 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1370 
1371 	if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1372 		return ret_val;
1373 
1374 	ret_val = hw->phy.ops.acquire(hw);
1375 	if (ret_val)
1376 		return ret_val;
1377 
1378 	if (!(hw->mac.type == e1000_pch2lan)) {
1379 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1380 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1381 			goto out;
1382 	}
1383 
1384 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1385 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1386 		goto out;
1387 
1388 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1389 
1390 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1391 	if (ret_val)
1392 		goto out;
1393 
1394 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1395 
1396 	if (d0_state) {
1397 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1398 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1399 
1400 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1401 			oem_reg |= HV_OEM_BITS_LPLU;
1402 
1403 		/* Set Restart auto-neg to activate the bits */
1404 		if (!hw->phy.ops.check_reset_block(hw))
1405 			oem_reg |= HV_OEM_BITS_RESTART_AN;
1406 	} else {
1407 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1408 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1409 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1410 
1411 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1412 		    E1000_PHY_CTRL_NOND0A_LPLU))
1413 			oem_reg |= HV_OEM_BITS_LPLU;
1414 	}
1415 
1416 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1417 
1418 out:
1419 	hw->phy.ops.release(hw);
1420 
1421 	return ret_val;
1422 }
1423 
1424 
1425 /**
1426  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1427  *  @hw:   pointer to the HW structure
1428  **/
1429 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1430 {
1431 	s32 ret_val;
1432 	u16 data;
1433 
1434 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1435 
1436 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1437 	if (ret_val)
1438 		return ret_val;
1439 
1440 	data |= HV_KMRN_MDIO_SLOW;
1441 
1442 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1443 
1444 	return ret_val;
1445 }
1446 
1447 /**
1448  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1449  *  done after every PHY reset.
1450  **/
1451 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1452 {
1453 	s32 ret_val = E1000_SUCCESS;
1454 	u16 phy_data;
1455 
1456 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1457 
1458 	if (hw->mac.type != e1000_pchlan)
1459 		goto out;
1460 
1461 	/* Set MDIO slow mode before any other MDIO access */
1462 	if (hw->phy.type == e1000_phy_82577) {
1463 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
1464 		if (ret_val)
1465 			goto out;
1466 	}
1467 
1468 	if (((hw->phy.type == e1000_phy_82577) &&
1469 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1470 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1471 		/* Disable generation of early preamble */
1472 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1473 		if (ret_val)
1474 			goto out;
1475 
1476 		/* Preamble tuning for SSC */
1477 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
1478 						0xA204);
1479 		if (ret_val)
1480 			goto out;
1481 	}
1482 
1483 	if (hw->phy.type == e1000_phy_82578) {
1484 		/*
1485 		 * Return registers to default by doing a soft reset then
1486 		 * writing 0x3140 to the control register.
1487 		 */
1488 		if (hw->phy.revision < 2) {
1489 			e1000_phy_sw_reset_generic(hw);
1490 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1491 							0x3140);
1492 		}
1493 	}
1494 
1495 	/* Select page 0 */
1496 	ret_val = hw->phy.ops.acquire(hw);
1497 	if (ret_val)
1498 		goto out;
1499 
1500 	hw->phy.addr = 1;
1501 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1502 	hw->phy.ops.release(hw);
1503 	if (ret_val)
1504 		goto out;
1505 
1506 	/*
1507 	 * Configure the K1 Si workaround during phy reset assuming there is
1508 	 * link so that it disables K1 if link is in 1Gbps.
1509 	 */
1510 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1511 	if (ret_val)
1512 		goto out;
1513 
1514 	/* Workaround for link disconnects on a busy hub in half duplex */
1515 	ret_val = hw->phy.ops.acquire(hw);
1516 	if (ret_val)
1517 		goto out;
1518 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1519 	if (ret_val)
1520 		goto release;
1521 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
1522 					       phy_data & 0x00FF);
1523 release:
1524 	hw->phy.ops.release(hw);
1525 out:
1526 	return ret_val;
1527 }
1528 
1529 /**
1530  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1531  *  @hw:   pointer to the HW structure
1532  **/
1533 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1534 {
1535 	u32 mac_reg;
1536 	u16 i, phy_reg = 0;
1537 	s32 ret_val;
1538 
1539 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1540 
1541 	ret_val = hw->phy.ops.acquire(hw);
1542 	if (ret_val)
1543 		return;
1544 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1545 	if (ret_val)
1546 		goto release;
1547 
1548 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1549 	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1550 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1551 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1552 					   (u16)(mac_reg & 0xFFFF));
1553 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1554 					   (u16)((mac_reg >> 16) & 0xFFFF));
1555 
1556 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1557 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1558 					   (u16)(mac_reg & 0xFFFF));
1559 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1560 					   (u16)((mac_reg & E1000_RAH_AV)
1561 						 >> 16));
1562 	}
1563 
1564 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1565 
1566 release:
1567 	hw->phy.ops.release(hw);
1568 }
1569 
1570 static u32 e1000_calc_rx_da_crc(u8 mac[])
1571 {
1572 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
1573 	u32 i, j, mask, crc;
1574 
1575 	DEBUGFUNC("e1000_calc_rx_da_crc");
1576 
1577 	crc = 0xffffffff;
1578 	for (i = 0; i < 6; i++) {
1579 		crc = crc ^ mac[i];
1580 		for (j = 8; j > 0; j--) {
1581 			mask = (crc & 1) * (-1);
1582 			crc = (crc >> 1) ^ (poly & mask);
1583 		}
1584 	}
1585 	return ~crc;
1586 }
1587 
1588 /**
1589  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1590  *  with 82579 PHY
1591  *  @hw: pointer to the HW structure
1592  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
1593  **/
1594 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1595 {
1596 	s32 ret_val = E1000_SUCCESS;
1597 	u16 phy_reg, data;
1598 	u32 mac_reg;
1599 	u16 i;
1600 
1601 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1602 
1603 	if (hw->mac.type != e1000_pch2lan)
1604 		goto out;
1605 
1606 	/* disable Rx path while enabling/disabling workaround */
1607 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1608 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
1609 					phy_reg | (1 << 14));
1610 	if (ret_val)
1611 		goto out;
1612 
1613 	if (enable) {
1614 		/*
1615 		 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1616 		 * SHRAL/H) and initial CRC values to the MAC
1617 		 */
1618 		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1619 			u8 mac_addr[ETH_ADDR_LEN] = {0};
1620 			u32 addr_high, addr_low;
1621 
1622 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1623 			if (!(addr_high & E1000_RAH_AV))
1624 				continue;
1625 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1626 			mac_addr[0] = (addr_low & 0xFF);
1627 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
1628 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
1629 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
1630 			mac_addr[4] = (addr_high & 0xFF);
1631 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
1632 
1633 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1634 					e1000_calc_rx_da_crc(mac_addr));
1635 		}
1636 
1637 		/* Write Rx addresses to the PHY */
1638 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1639 
1640 		/* Enable jumbo frame workaround in the MAC */
1641 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1642 		mac_reg &= ~(1 << 14);
1643 		mac_reg |= (7 << 15);
1644 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1645 
1646 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1647 		mac_reg |= E1000_RCTL_SECRC;
1648 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1649 
1650 		ret_val = e1000_read_kmrn_reg_generic(hw,
1651 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1652 						&data);
1653 		if (ret_val)
1654 			goto out;
1655 		ret_val = e1000_write_kmrn_reg_generic(hw,
1656 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1657 						data | (1 << 0));
1658 		if (ret_val)
1659 			goto out;
1660 		ret_val = e1000_read_kmrn_reg_generic(hw,
1661 						E1000_KMRNCTRLSTA_HD_CTRL,
1662 						&data);
1663 		if (ret_val)
1664 			goto out;
1665 		data &= ~(0xF << 8);
1666 		data |= (0xB << 8);
1667 		ret_val = e1000_write_kmrn_reg_generic(hw,
1668 						E1000_KMRNCTRLSTA_HD_CTRL,
1669 						data);
1670 		if (ret_val)
1671 			goto out;
1672 
1673 		/* Enable jumbo frame workaround in the PHY */
1674 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1675 		data &= ~(0x7F << 5);
1676 		data |= (0x37 << 5);
1677 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1678 		if (ret_val)
1679 			goto out;
1680 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1681 		data &= ~(1 << 13);
1682 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1683 		if (ret_val)
1684 			goto out;
1685 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1686 		data &= ~(0x3FF << 2);
1687 		data |= (0x1A << 2);
1688 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1689 		if (ret_val)
1690 			goto out;
1691 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
1692 		if (ret_val)
1693 			goto out;
1694 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1695 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
1696 						(1 << 10));
1697 		if (ret_val)
1698 			goto out;
1699 	} else {
1700 		/* Write MAC register values back to h/w defaults */
1701 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1702 		mac_reg &= ~(0xF << 14);
1703 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1704 
1705 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1706 		mac_reg &= ~E1000_RCTL_SECRC;
1707 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1708 
1709 		ret_val = e1000_read_kmrn_reg_generic(hw,
1710 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1711 						&data);
1712 		if (ret_val)
1713 			goto out;
1714 		ret_val = e1000_write_kmrn_reg_generic(hw,
1715 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1716 						data & ~(1 << 0));
1717 		if (ret_val)
1718 			goto out;
1719 		ret_val = e1000_read_kmrn_reg_generic(hw,
1720 						E1000_KMRNCTRLSTA_HD_CTRL,
1721 						&data);
1722 		if (ret_val)
1723 			goto out;
1724 		data &= ~(0xF << 8);
1725 		data |= (0xB << 8);
1726 		ret_val = e1000_write_kmrn_reg_generic(hw,
1727 						E1000_KMRNCTRLSTA_HD_CTRL,
1728 						data);
1729 		if (ret_val)
1730 			goto out;
1731 
1732 		/* Write PHY register values back to h/w defaults */
1733 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1734 		data &= ~(0x7F << 5);
1735 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1736 		if (ret_val)
1737 			goto out;
1738 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1739 		data |= (1 << 13);
1740 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1741 		if (ret_val)
1742 			goto out;
1743 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1744 		data &= ~(0x3FF << 2);
1745 		data |= (0x8 << 2);
1746 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1747 		if (ret_val)
1748 			goto out;
1749 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1750 		if (ret_val)
1751 			goto out;
1752 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1753 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
1754 						~(1 << 10));
1755 		if (ret_val)
1756 			goto out;
1757 	}
1758 
1759 	/* re-enable Rx path after enabling/disabling workaround */
1760 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
1761 					~(1 << 14));
1762 
1763 out:
1764 	return ret_val;
1765 }
1766 
1767 /**
1768  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1769  *  done after every PHY reset.
1770  **/
1771 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1772 {
1773 	s32 ret_val = E1000_SUCCESS;
1774 
1775 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1776 
1777 	if (hw->mac.type != e1000_pch2lan)
1778 		goto out;
1779 
1780 	/* Set MDIO slow mode before any other MDIO access */
1781 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
1782 
1783 	ret_val = hw->phy.ops.acquire(hw);
1784 	if (ret_val)
1785 		goto out;
1786 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1787 					       I82579_MSE_THRESHOLD);
1788 	if (ret_val)
1789 		goto release;
1790 	/* set MSE higher to enable link to stay up when noise is high */
1791 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1792 					       0x0034);
1793 	if (ret_val)
1794 		goto release;
1795 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1796 					       I82579_MSE_LINK_DOWN);
1797 	if (ret_val)
1798 		goto release;
1799 	/* drop link after 5 times MSE threshold was reached */
1800 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1801 					       0x0005);
1802 release:
1803 	hw->phy.ops.release(hw);
1804 
1805 out:
1806 	return ret_val;
1807 }
1808 
1809 /**
1810  *  e1000_k1_gig_workaround_lv - K1 Si workaround
1811  *  @hw:   pointer to the HW structure
1812  *
1813  *  Workaround to set the K1 beacon duration for 82579 parts
1814  **/
1815 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1816 {
1817 	s32 ret_val = E1000_SUCCESS;
1818 	u16 status_reg = 0;
1819 	u32 mac_reg;
1820 	u16 phy_reg;
1821 
1822 	DEBUGFUNC("e1000_k1_workaround_lv");
1823 
1824 	if (hw->mac.type != e1000_pch2lan)
1825 		goto out;
1826 
1827 	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
1828 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1829 	if (ret_val)
1830 		goto out;
1831 
1832 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1833 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1834 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1835 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1836 
1837 		ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
1838 		if (ret_val)
1839 			goto out;
1840 
1841 		if (status_reg & HV_M_STATUS_SPEED_1000) {
1842 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1843 			phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1844 		} else {
1845 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1846 			phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1847 		}
1848 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1849 		ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
1850 	}
1851 
1852 out:
1853 	return ret_val;
1854 }
1855 
1856 /**
1857  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1858  *  @hw:   pointer to the HW structure
1859  *  @gate: boolean set to TRUE to gate, FALSE to ungate
1860  *
1861  *  Gate/ungate the automatic PHY configuration via hardware; perform
1862  *  the configuration via software instead.
1863  **/
1864 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1865 {
1866 	u32 extcnf_ctrl;
1867 
1868 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
1869 
1870 	if (hw->mac.type != e1000_pch2lan)
1871 		return;
1872 
1873 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1874 
1875 	if (gate)
1876 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1877 	else
1878 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1879 
1880 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1881 	return;
1882 }
1883 
1884 /**
1885  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
1886  *  @hw: pointer to the HW structure
1887  *
1888  *  Check the appropriate indication the MAC has finished configuring the
1889  *  PHY after a software reset.
1890  **/
1891 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1892 {
1893 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1894 
1895 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
1896 
1897 	/* Wait for basic configuration completes before proceeding */
1898 	do {
1899 		data = E1000_READ_REG(hw, E1000_STATUS);
1900 		data &= E1000_STATUS_LAN_INIT_DONE;
1901 		usec_delay(100);
1902 	} while ((!data) && --loop);
1903 
1904 	/*
1905 	 * If basic configuration is incomplete before the above loop
1906 	 * count reaches 0, loading the configuration from NVM will
1907 	 * leave the PHY in a bad state possibly resulting in no link.
1908 	 */
1909 	if (loop == 0)
1910 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1911 
1912 	/* Clear the Init Done bit for the next init event */
1913 	data = E1000_READ_REG(hw, E1000_STATUS);
1914 	data &= ~E1000_STATUS_LAN_INIT_DONE;
1915 	E1000_WRITE_REG(hw, E1000_STATUS, data);
1916 }
1917 
1918 /**
1919  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1920  *  @hw: pointer to the HW structure
1921  **/
1922 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1923 {
1924 	s32 ret_val = E1000_SUCCESS;
1925 	u16 reg;
1926 
1927 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
1928 
1929 	if (hw->phy.ops.check_reset_block(hw))
1930 		goto out;
1931 
1932 	/* Allow time for h/w to get to quiescent state after reset */
1933 	msec_delay(10);
1934 
1935 	/* Perform any necessary post-reset workarounds */
1936 	switch (hw->mac.type) {
1937 	case e1000_pchlan:
1938 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1939 		if (ret_val)
1940 			goto out;
1941 		break;
1942 	case e1000_pch2lan:
1943 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1944 		if (ret_val)
1945 			goto out;
1946 		break;
1947 	default:
1948 		break;
1949 	}
1950 
1951 	/* Clear the host wakeup bit after lcd reset */
1952 	if (hw->mac.type >= e1000_pchlan) {
1953 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
1954 		reg &= ~BM_WUC_HOST_WU_BIT;
1955 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
1956 	}
1957 
1958 	/* Configure the LCD with the extended configuration region in NVM */
1959 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
1960 	if (ret_val)
1961 		goto out;
1962 
1963 	/* Configure the LCD with the OEM bits in NVM */
1964 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1965 
1966 	if (hw->mac.type == e1000_pch2lan) {
1967 		/* Ungate automatic PHY configuration on non-managed 82579 */
1968 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
1969 		    E1000_ICH_FWSM_FW_VALID)) {
1970 			msec_delay(10);
1971 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
1972 		}
1973 
1974 		/* Set EEE LPI Update Timer to 200usec */
1975 		ret_val = hw->phy.ops.acquire(hw);
1976 		if (ret_val)
1977 			goto out;
1978 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1979 						       I82579_LPI_UPDATE_TIMER);
1980 		if (ret_val)
1981 			goto release;
1982 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1983 						       0x1387);
1984 release:
1985 		hw->phy.ops.release(hw);
1986 	}
1987 
1988 out:
1989 	return ret_val;
1990 }
1991 
1992 /**
1993  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1994  *  @hw: pointer to the HW structure
1995  *
1996  *  Resets the PHY
1997  *  This is a function pointer entry point called by drivers
1998  *  or other shared routines.
1999  **/
2000 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2001 {
2002 	s32 ret_val = E1000_SUCCESS;
2003 
2004 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2005 
2006 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
2007 	if ((hw->mac.type == e1000_pch2lan) &&
2008 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2009 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
2010 
2011 	ret_val = e1000_phy_hw_reset_generic(hw);
2012 	if (ret_val)
2013 		goto out;
2014 
2015 	ret_val = e1000_post_phy_reset_ich8lan(hw);
2016 
2017 out:
2018 	return ret_val;
2019 }
2020 
2021 /**
2022  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2023  *  @hw: pointer to the HW structure
2024  *  @active: TRUE to enable LPLU, FALSE to disable
2025  *
2026  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2027  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2028  *  the phy speed. This function will manually set the LPLU bit and restart
2029  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2030  *  since it configures the same bit.
2031  **/
2032 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2033 {
2034 	s32 ret_val = E1000_SUCCESS;
2035 	u16 oem_reg;
2036 
2037 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
2038 
2039 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2040 	if (ret_val)
2041 		goto out;
2042 
2043 	if (active)
2044 		oem_reg |= HV_OEM_BITS_LPLU;
2045 	else
2046 		oem_reg &= ~HV_OEM_BITS_LPLU;
2047 
2048 	if (!hw->phy.ops.check_reset_block(hw))
2049 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2050 
2051 	ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2052 
2053 out:
2054 	return ret_val;
2055 }
2056 
2057 /**
2058  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2059  *  @hw: pointer to the HW structure
2060  *  @active: TRUE to enable LPLU, FALSE to disable
2061  *
2062  *  Sets the LPLU D0 state according to the active flag.  When
2063  *  activating LPLU this function also disables smart speed
2064  *  and vice versa.  LPLU will not be activated unless the
2065  *  device autonegotiation advertisement meets standards of
2066  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2067  *  This is a function pointer entry point only called by
2068  *  PHY setup routines.
2069  **/
2070 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2071 {
2072 	struct e1000_phy_info *phy = &hw->phy;
2073 	u32 phy_ctrl;
2074 	s32 ret_val = E1000_SUCCESS;
2075 	u16 data;
2076 
2077 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2078 
2079 	if (phy->type == e1000_phy_ife)
2080 		goto out;
2081 
2082 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2083 
2084 	if (active) {
2085 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2086 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2087 
2088 		if (phy->type != e1000_phy_igp_3)
2089 			goto out;
2090 
2091 		/*
2092 		 * Call gig speed drop workaround on LPLU before accessing
2093 		 * any PHY registers
2094 		 */
2095 		if (hw->mac.type == e1000_ich8lan)
2096 			e1000_gig_downshift_workaround_ich8lan(hw);
2097 
2098 		/* When LPLU is enabled, we should disable SmartSpeed */
2099 		ret_val = phy->ops.read_reg(hw,
2100 					    IGP01E1000_PHY_PORT_CONFIG,
2101 					    &data);
2102 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2103 		ret_val = phy->ops.write_reg(hw,
2104 					     IGP01E1000_PHY_PORT_CONFIG,
2105 					     data);
2106 		if (ret_val)
2107 			goto out;
2108 	} else {
2109 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2110 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2111 
2112 		if (phy->type != e1000_phy_igp_3)
2113 			goto out;
2114 
2115 		/*
2116 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2117 		 * during Dx states where the power conservation is most
2118 		 * important.  During driver activity we should enable
2119 		 * SmartSpeed, so performance is maintained.
2120 		 */
2121 		if (phy->smart_speed == e1000_smart_speed_on) {
2122 			ret_val = phy->ops.read_reg(hw,
2123 						    IGP01E1000_PHY_PORT_CONFIG,
2124 						    &data);
2125 			if (ret_val)
2126 				goto out;
2127 
2128 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2129 			ret_val = phy->ops.write_reg(hw,
2130 						     IGP01E1000_PHY_PORT_CONFIG,
2131 						     data);
2132 			if (ret_val)
2133 				goto out;
2134 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2135 			ret_val = phy->ops.read_reg(hw,
2136 						    IGP01E1000_PHY_PORT_CONFIG,
2137 						    &data);
2138 			if (ret_val)
2139 				goto out;
2140 
2141 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2142 			ret_val = phy->ops.write_reg(hw,
2143 						     IGP01E1000_PHY_PORT_CONFIG,
2144 						     data);
2145 			if (ret_val)
2146 				goto out;
2147 		}
2148 	}
2149 
2150 out:
2151 	return ret_val;
2152 }
2153 
2154 /**
2155  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2156  *  @hw: pointer to the HW structure
2157  *  @active: TRUE to enable LPLU, FALSE to disable
2158  *
2159  *  Sets the LPLU D3 state according to the active flag.  When
2160  *  activating LPLU this function also disables smart speed
2161  *  and vice versa.  LPLU will not be activated unless the
2162  *  device autonegotiation advertisement meets standards of
2163  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2164  *  This is a function pointer entry point only called by
2165  *  PHY setup routines.
2166  **/
2167 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2168 {
2169 	struct e1000_phy_info *phy = &hw->phy;
2170 	u32 phy_ctrl;
2171 	s32 ret_val = E1000_SUCCESS;
2172 	u16 data;
2173 
2174 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2175 
2176 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2177 
2178 	if (!active) {
2179 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2180 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2181 
2182 		if (phy->type != e1000_phy_igp_3)
2183 			goto out;
2184 
2185 		/*
2186 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2187 		 * during Dx states where the power conservation is most
2188 		 * important.  During driver activity we should enable
2189 		 * SmartSpeed, so performance is maintained.
2190 		 */
2191 		if (phy->smart_speed == e1000_smart_speed_on) {
2192 			ret_val = phy->ops.read_reg(hw,
2193 						    IGP01E1000_PHY_PORT_CONFIG,
2194 						    &data);
2195 			if (ret_val)
2196 				goto out;
2197 
2198 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2199 			ret_val = phy->ops.write_reg(hw,
2200 						     IGP01E1000_PHY_PORT_CONFIG,
2201 						     data);
2202 			if (ret_val)
2203 				goto out;
2204 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2205 			ret_val = phy->ops.read_reg(hw,
2206 						    IGP01E1000_PHY_PORT_CONFIG,
2207 						    &data);
2208 			if (ret_val)
2209 				goto out;
2210 
2211 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2212 			ret_val = phy->ops.write_reg(hw,
2213 						     IGP01E1000_PHY_PORT_CONFIG,
2214 						     data);
2215 			if (ret_val)
2216 				goto out;
2217 		}
2218 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2219 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2220 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2221 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2222 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2223 
2224 		if (phy->type != e1000_phy_igp_3)
2225 			goto out;
2226 
2227 		/*
2228 		 * Call gig speed drop workaround on LPLU before accessing
2229 		 * any PHY registers
2230 		 */
2231 		if (hw->mac.type == e1000_ich8lan)
2232 			e1000_gig_downshift_workaround_ich8lan(hw);
2233 
2234 		/* When LPLU is enabled, we should disable SmartSpeed */
2235 		ret_val = phy->ops.read_reg(hw,
2236 					    IGP01E1000_PHY_PORT_CONFIG,
2237 					    &data);
2238 		if (ret_val)
2239 			goto out;
2240 
2241 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2242 		ret_val = phy->ops.write_reg(hw,
2243 					     IGP01E1000_PHY_PORT_CONFIG,
2244 					     data);
2245 	}
2246 
2247 out:
2248 	return ret_val;
2249 }
2250 
2251 /**
2252  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2253  *  @hw: pointer to the HW structure
2254  *  @bank:  pointer to the variable that returns the active bank
2255  *
2256  *  Reads signature byte from the NVM using the flash access registers.
2257  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2258  **/
2259 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2260 {
2261 	u32 eecd;
2262 	struct e1000_nvm_info *nvm = &hw->nvm;
2263 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2264 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2265 	u8 sig_byte = 0;
2266 	s32 ret_val = E1000_SUCCESS;
2267 
2268 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2269 
2270 	switch (hw->mac.type) {
2271 	case e1000_ich8lan:
2272 	case e1000_ich9lan:
2273 		eecd = E1000_READ_REG(hw, E1000_EECD);
2274 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2275 		    E1000_EECD_SEC1VAL_VALID_MASK) {
2276 			if (eecd & E1000_EECD_SEC1VAL)
2277 				*bank = 1;
2278 			else
2279 				*bank = 0;
2280 
2281 			goto out;
2282 		}
2283 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2284 		/* fall-thru */
2285 	default:
2286 		/* set bank to 0 in case flash read fails */
2287 		*bank = 0;
2288 
2289 		/* Check bank 0 */
2290 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2291 							&sig_byte);
2292 		if (ret_val)
2293 			goto out;
2294 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2295 		    E1000_ICH_NVM_SIG_VALUE) {
2296 			*bank = 0;
2297 			goto out;
2298 		}
2299 
2300 		/* Check bank 1 */
2301 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2302 							bank1_offset,
2303 							&sig_byte);
2304 		if (ret_val)
2305 			goto out;
2306 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2307 		    E1000_ICH_NVM_SIG_VALUE) {
2308 			*bank = 1;
2309 			goto out;
2310 		}
2311 
2312 		DEBUGOUT("ERROR: No valid NVM bank present\n");
2313 		ret_val = -E1000_ERR_NVM;
2314 		break;
2315 	}
2316 out:
2317 	return ret_val;
2318 }
2319 
2320 /**
2321  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
2322  *  @hw: pointer to the HW structure
2323  *  @offset: The offset (in bytes) of the word(s) to read.
2324  *  @words: Size of data to read in words
2325  *  @data: Pointer to the word(s) to read at offset.
2326  *
2327  *  Reads a word(s) from the NVM using the flash access registers.
2328  **/
2329 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2330 				  u16 *data)
2331 {
2332 	struct e1000_nvm_info *nvm = &hw->nvm;
2333 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2334 	u32 act_offset;
2335 	s32 ret_val = E1000_SUCCESS;
2336 	u32 bank = 0;
2337 	u16 i, word;
2338 
2339 	DEBUGFUNC("e1000_read_nvm_ich8lan");
2340 
2341 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2342 	    (words == 0)) {
2343 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2344 		ret_val = -E1000_ERR_NVM;
2345 		goto out;
2346 	}
2347 
2348 	nvm->ops.acquire(hw);
2349 
2350 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2351 	if (ret_val != E1000_SUCCESS) {
2352 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2353 		bank = 0;
2354 	}
2355 
2356 	act_offset = (bank) ? nvm->flash_bank_size : 0;
2357 	act_offset += offset;
2358 
2359 	ret_val = E1000_SUCCESS;
2360 	for (i = 0; i < words; i++) {
2361 		if (dev_spec->shadow_ram[offset+i].modified) {
2362 			data[i] = dev_spec->shadow_ram[offset+i].value;
2363 		} else {
2364 			ret_val = e1000_read_flash_word_ich8lan(hw,
2365 								act_offset + i,
2366 								&word);
2367 			if (ret_val)
2368 				break;
2369 			data[i] = word;
2370 		}
2371 	}
2372 
2373 	nvm->ops.release(hw);
2374 
2375 out:
2376 	if (ret_val)
2377 		DEBUGOUT1("NVM read error: %d\n", ret_val);
2378 
2379 	return ret_val;
2380 }
2381 
2382 /**
2383  *  e1000_flash_cycle_init_ich8lan - Initialize flash
2384  *  @hw: pointer to the HW structure
2385  *
2386  *  This function does initial flash setup so that a new read/write/erase cycle
2387  *  can be started.
2388  **/
2389 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2390 {
2391 	union ich8_hws_flash_status hsfsts;
2392 	s32 ret_val = -E1000_ERR_NVM;
2393 
2394 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2395 
2396 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2397 
2398 	/* Check if the flash descriptor is valid */
2399 	if (hsfsts.hsf_status.fldesvalid == 0) {
2400 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
2401 		goto out;
2402 	}
2403 
2404 	/* Clear FCERR and DAEL in hw status by writing 1 */
2405 	hsfsts.hsf_status.flcerr = 1;
2406 	hsfsts.hsf_status.dael = 1;
2407 
2408 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2409 
2410 	/*
2411 	 * Either we should have a hardware SPI cycle in progress
2412 	 * bit to check against, in order to start a new cycle or
2413 	 * FDONE bit should be changed in the hardware so that it
2414 	 * is 1 after hardware reset, which can then be used as an
2415 	 * indication whether a cycle is in progress or has been
2416 	 * completed.
2417 	 */
2418 
2419 	if (hsfsts.hsf_status.flcinprog == 0) {
2420 		/*
2421 		 * There is no cycle running at present,
2422 		 * so we can start a cycle.
2423 		 * Begin by setting Flash Cycle Done.
2424 		 */
2425 		hsfsts.hsf_status.flcdone = 1;
2426 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2427 		ret_val = E1000_SUCCESS;
2428 	} else {
2429 		s32 i;
2430 
2431 		/*
2432 		 * Otherwise poll for sometime so the current
2433 		 * cycle has a chance to end before giving up.
2434 		 */
2435 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2436 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2437 							      ICH_FLASH_HSFSTS);
2438 			if (hsfsts.hsf_status.flcinprog == 0) {
2439 				ret_val = E1000_SUCCESS;
2440 				break;
2441 			}
2442 			usec_delay(1);
2443 		}
2444 		if (ret_val == E1000_SUCCESS) {
2445 			/*
2446 			 * Successful in waiting for previous cycle to timeout,
2447 			 * now set the Flash Cycle Done.
2448 			 */
2449 			hsfsts.hsf_status.flcdone = 1;
2450 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2451 						hsfsts.regval);
2452 		} else {
2453 			DEBUGOUT("Flash controller busy, cannot get access\n");
2454 		}
2455 	}
2456 
2457 out:
2458 	return ret_val;
2459 }
2460 
2461 /**
2462  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2463  *  @hw: pointer to the HW structure
2464  *  @timeout: maximum time to wait for completion
2465  *
2466  *  This function starts a flash cycle and waits for its completion.
2467  **/
2468 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2469 {
2470 	union ich8_hws_flash_ctrl hsflctl;
2471 	union ich8_hws_flash_status hsfsts;
2472 	s32 ret_val = -E1000_ERR_NVM;
2473 	u32 i = 0;
2474 
2475 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
2476 
2477 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2478 	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2479 	hsflctl.hsf_ctrl.flcgo = 1;
2480 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2481 
2482 	/* wait till FDONE bit is set to 1 */
2483 	do {
2484 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2485 		if (hsfsts.hsf_status.flcdone == 1)
2486 			break;
2487 		usec_delay(1);
2488 	} while (i++ < timeout);
2489 
2490 	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2491 		ret_val = E1000_SUCCESS;
2492 
2493 	return ret_val;
2494 }
2495 
2496 /**
2497  *  e1000_read_flash_word_ich8lan - Read word from flash
2498  *  @hw: pointer to the HW structure
2499  *  @offset: offset to data location
2500  *  @data: pointer to the location for storing the data
2501  *
2502  *  Reads the flash word at offset into data.  Offset is converted
2503  *  to bytes before read.
2504  **/
2505 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2506 					 u16 *data)
2507 {
2508 	s32 ret_val;
2509 
2510 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
2511 
2512 	if (!data) {
2513 		ret_val = -E1000_ERR_NVM;
2514 		goto out;
2515 	}
2516 
2517 	/* Must convert offset into bytes. */
2518 	offset <<= 1;
2519 
2520 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2521 
2522 out:
2523 	return ret_val;
2524 }
2525 
2526 /**
2527  *  e1000_read_flash_byte_ich8lan - Read byte from flash
2528  *  @hw: pointer to the HW structure
2529  *  @offset: The offset of the byte to read.
2530  *  @data: Pointer to a byte to store the value read.
2531  *
2532  *  Reads a single byte from the NVM using the flash access registers.
2533  **/
2534 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2535 					 u8 *data)
2536 {
2537 	s32 ret_val = E1000_SUCCESS;
2538 	u16 word = 0;
2539 
2540 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2541 	if (ret_val)
2542 		goto out;
2543 
2544 	*data = (u8)word;
2545 
2546 out:
2547 	return ret_val;
2548 }
2549 
2550 /**
2551  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
2552  *  @hw: pointer to the HW structure
2553  *  @offset: The offset (in bytes) of the byte or word to read.
2554  *  @size: Size of data to read, 1=byte 2=word
2555  *  @data: Pointer to the word to store the value read.
2556  *
2557  *  Reads a byte or word from the NVM using the flash access registers.
2558  **/
2559 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2560 					 u8 size, u16 *data)
2561 {
2562 	union ich8_hws_flash_status hsfsts;
2563 	union ich8_hws_flash_ctrl hsflctl;
2564 	u32 flash_linear_addr;
2565 	u32 flash_data = 0;
2566 	s32 ret_val = -E1000_ERR_NVM;
2567 	u8 count = 0;
2568 
2569 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
2570 
2571 	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2572 		goto out;
2573 
2574 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2575 			    hw->nvm.flash_base_addr;
2576 
2577 	do {
2578 		usec_delay(1);
2579 		/* Steps */
2580 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2581 		if (ret_val != E1000_SUCCESS)
2582 			break;
2583 
2584 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2585 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2586 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2587 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2588 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2589 
2590 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2591 
2592 		ret_val = e1000_flash_cycle_ich8lan(hw,
2593 						ICH_FLASH_READ_COMMAND_TIMEOUT);
2594 
2595 		/*
2596 		 * Check if FCERR is set to 1, if set to 1, clear it
2597 		 * and try the whole sequence a few more times, else
2598 		 * read in (shift in) the Flash Data0, the order is
2599 		 * least significant byte first msb to lsb
2600 		 */
2601 		if (ret_val == E1000_SUCCESS) {
2602 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2603 			if (size == 1)
2604 				*data = (u8)(flash_data & 0x000000FF);
2605 			else if (size == 2)
2606 				*data = (u16)(flash_data & 0x0000FFFF);
2607 			break;
2608 		} else {
2609 			/*
2610 			 * If we've gotten here, then things are probably
2611 			 * completely hosed, but if the error condition is
2612 			 * detected, it won't hurt to give it another try...
2613 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2614 			 */
2615 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2616 							      ICH_FLASH_HSFSTS);
2617 			if (hsfsts.hsf_status.flcerr == 1) {
2618 				/* Repeat for some time before giving up. */
2619 				continue;
2620 			} else if (hsfsts.hsf_status.flcdone == 0) {
2621 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
2622 				break;
2623 			}
2624 		}
2625 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2626 
2627 out:
2628 	return ret_val;
2629 }
2630 
2631 /**
2632  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
2633  *  @hw: pointer to the HW structure
2634  *  @offset: The offset (in bytes) of the word(s) to write.
2635  *  @words: Size of data to write in words
2636  *  @data: Pointer to the word(s) to write at offset.
2637  *
2638  *  Writes a byte or word to the NVM using the flash access registers.
2639  **/
2640 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2641 				   u16 *data)
2642 {
2643 	struct e1000_nvm_info *nvm = &hw->nvm;
2644 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2645 	s32 ret_val = E1000_SUCCESS;
2646 	u16 i;
2647 
2648 	DEBUGFUNC("e1000_write_nvm_ich8lan");
2649 
2650 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2651 	    (words == 0)) {
2652 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2653 		ret_val = -E1000_ERR_NVM;
2654 		goto out;
2655 	}
2656 
2657 	nvm->ops.acquire(hw);
2658 
2659 	for (i = 0; i < words; i++) {
2660 		dev_spec->shadow_ram[offset+i].modified = TRUE;
2661 		dev_spec->shadow_ram[offset+i].value = data[i];
2662 	}
2663 
2664 	nvm->ops.release(hw);
2665 
2666 out:
2667 	return ret_val;
2668 }
2669 
2670 /**
2671  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2672  *  @hw: pointer to the HW structure
2673  *
2674  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
2675  *  which writes the checksum to the shadow ram.  The changes in the shadow
2676  *  ram are then committed to the EEPROM by processing each bank at a time
2677  *  checking for the modified bit and writing only the pending changes.
2678  *  After a successful commit, the shadow ram is cleared and is ready for
2679  *  future writes.
2680  **/
2681 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2682 {
2683 	struct e1000_nvm_info *nvm = &hw->nvm;
2684 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2685 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2686 	s32 ret_val;
2687 	u16 data;
2688 
2689 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2690 
2691 	ret_val = e1000_update_nvm_checksum_generic(hw);
2692 	if (ret_val)
2693 		goto out;
2694 
2695 	if (nvm->type != e1000_nvm_flash_sw)
2696 		goto out;
2697 
2698 	nvm->ops.acquire(hw);
2699 
2700 	/*
2701 	 * We're writing to the opposite bank so if we're on bank 1,
2702 	 * write to bank 0 etc.  We also need to erase the segment that
2703 	 * is going to be written
2704 	 */
2705 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2706 	if (ret_val != E1000_SUCCESS) {
2707 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2708 		bank = 0;
2709 	}
2710 
2711 	if (bank == 0) {
2712 		new_bank_offset = nvm->flash_bank_size;
2713 		old_bank_offset = 0;
2714 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2715 		if (ret_val)
2716 			goto release;
2717 	} else {
2718 		old_bank_offset = nvm->flash_bank_size;
2719 		new_bank_offset = 0;
2720 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2721 		if (ret_val)
2722 			goto release;
2723 	}
2724 
2725 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2726 		/*
2727 		 * Determine whether to write the value stored
2728 		 * in the other NVM bank or a modified value stored
2729 		 * in the shadow RAM
2730 		 */
2731 		if (dev_spec->shadow_ram[i].modified) {
2732 			data = dev_spec->shadow_ram[i].value;
2733 		} else {
2734 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
2735 								old_bank_offset,
2736 								&data);
2737 			if (ret_val)
2738 				break;
2739 		}
2740 
2741 		/*
2742 		 * If the word is 0x13, then make sure the signature bits
2743 		 * (15:14) are 11b until the commit has completed.
2744 		 * This will allow us to write 10b which indicates the
2745 		 * signature is valid.  We want to do this after the write
2746 		 * has completed so that we don't mark the segment valid
2747 		 * while the write is still in progress
2748 		 */
2749 		if (i == E1000_ICH_NVM_SIG_WORD)
2750 			data |= E1000_ICH_NVM_SIG_MASK;
2751 
2752 		/* Convert offset to bytes. */
2753 		act_offset = (i + new_bank_offset) << 1;
2754 
2755 		usec_delay(100);
2756 		/* Write the bytes to the new bank. */
2757 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2758 							       act_offset,
2759 							       (u8)data);
2760 		if (ret_val)
2761 			break;
2762 
2763 		usec_delay(100);
2764 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2765 							  act_offset + 1,
2766 							  (u8)(data >> 8));
2767 		if (ret_val)
2768 			break;
2769 	}
2770 
2771 	/*
2772 	 * Don't bother writing the segment valid bits if sector
2773 	 * programming failed.
2774 	 */
2775 	if (ret_val) {
2776 		DEBUGOUT("Flash commit failed.\n");
2777 		goto release;
2778 	}
2779 
2780 	/*
2781 	 * Finally validate the new segment by setting bit 15:14
2782 	 * to 10b in word 0x13 , this can be done without an
2783 	 * erase as well since these bits are 11 to start with
2784 	 * and we need to change bit 14 to 0b
2785 	 */
2786 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2787 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2788 	if (ret_val)
2789 		goto release;
2790 
2791 	data &= 0xBFFF;
2792 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2793 						       act_offset * 2 + 1,
2794 						       (u8)(data >> 8));
2795 	if (ret_val)
2796 		goto release;
2797 
2798 	/*
2799 	 * And invalidate the previously valid segment by setting
2800 	 * its signature word (0x13) high_byte to 0b. This can be
2801 	 * done without an erase because flash erase sets all bits
2802 	 * to 1's. We can write 1's to 0's without an erase
2803 	 */
2804 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2805 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2806 	if (ret_val)
2807 		goto release;
2808 
2809 	/* Great!  Everything worked, we can now clear the cached entries. */
2810 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2811 		dev_spec->shadow_ram[i].modified = FALSE;
2812 		dev_spec->shadow_ram[i].value = 0xFFFF;
2813 	}
2814 
2815 release:
2816 	nvm->ops.release(hw);
2817 
2818 	/*
2819 	 * Reload the EEPROM, or else modifications will not appear
2820 	 * until after the next adapter reset.
2821 	 */
2822 	if (!ret_val) {
2823 		nvm->ops.reload(hw);
2824 		msec_delay(10);
2825 	}
2826 
2827 out:
2828 	if (ret_val)
2829 		DEBUGOUT1("NVM update error: %d\n", ret_val);
2830 
2831 	return ret_val;
2832 }
2833 
2834 /**
2835  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2836  *  @hw: pointer to the HW structure
2837  *
2838  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2839  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
2840  *  calculated, in which case we need to calculate the checksum and set bit 6.
2841  **/
2842 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2843 {
2844 	s32 ret_val = E1000_SUCCESS;
2845 	u16 data;
2846 
2847 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2848 
2849 	/*
2850 	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
2851 	 * needs to be fixed.  This bit is an indication that the NVM
2852 	 * was prepared by OEM software and did not calculate the
2853 	 * checksum...a likely scenario.
2854 	 */
2855 	ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2856 	if (ret_val)
2857 		goto out;
2858 
2859 	if ((data & 0x40) == 0) {
2860 		data |= 0x40;
2861 		ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2862 		if (ret_val)
2863 			goto out;
2864 		ret_val = hw->nvm.ops.update(hw);
2865 		if (ret_val)
2866 			goto out;
2867 	}
2868 
2869 	ret_val = e1000_validate_nvm_checksum_generic(hw);
2870 
2871 out:
2872 	return ret_val;
2873 }
2874 
2875 /**
2876  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2877  *  @hw: pointer to the HW structure
2878  *  @offset: The offset (in bytes) of the byte/word to read.
2879  *  @size: Size of data to read, 1=byte 2=word
2880  *  @data: The byte(s) to write to the NVM.
2881  *
2882  *  Writes one/two bytes to the NVM using the flash access registers.
2883  **/
2884 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2885 					  u8 size, u16 data)
2886 {
2887 	union ich8_hws_flash_status hsfsts;
2888 	union ich8_hws_flash_ctrl hsflctl;
2889 	u32 flash_linear_addr;
2890 	u32 flash_data = 0;
2891 	s32 ret_val = -E1000_ERR_NVM;
2892 	u8 count = 0;
2893 
2894 	DEBUGFUNC("e1000_write_ich8_data");
2895 
2896 	if (size < 1 || size > 2 || data > size * 0xff ||
2897 	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
2898 		goto out;
2899 
2900 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2901 			    hw->nvm.flash_base_addr;
2902 
2903 	do {
2904 		usec_delay(1);
2905 		/* Steps */
2906 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2907 		if (ret_val != E1000_SUCCESS)
2908 			break;
2909 
2910 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2911 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2912 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2913 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2914 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2915 
2916 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2917 
2918 		if (size == 1)
2919 			flash_data = (u32)data & 0x00FF;
2920 		else
2921 			flash_data = (u32)data;
2922 
2923 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2924 
2925 		/*
2926 		 * check if FCERR is set to 1 , if set to 1, clear it
2927 		 * and try the whole sequence a few more times else done
2928 		 */
2929 		ret_val = e1000_flash_cycle_ich8lan(hw,
2930 					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2931 		if (ret_val == E1000_SUCCESS)
2932 			break;
2933 
2934 		/*
2935 		 * If we're here, then things are most likely
2936 		 * completely hosed, but if the error condition
2937 		 * is detected, it won't hurt to give it another
2938 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2939 		 */
2940 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2941 		if (hsfsts.hsf_status.flcerr == 1)
2942 			/* Repeat for some time before giving up. */
2943 			continue;
2944 		if (hsfsts.hsf_status.flcdone == 0) {
2945 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
2946 			break;
2947 		}
2948 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2949 
2950 out:
2951 	return ret_val;
2952 }
2953 
2954 /**
2955  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2956  *  @hw: pointer to the HW structure
2957  *  @offset: The index of the byte to read.
2958  *  @data: The byte to write to the NVM.
2959  *
2960  *  Writes a single byte to the NVM using the flash access registers.
2961  **/
2962 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2963 					  u8 data)
2964 {
2965 	u16 word = (u16)data;
2966 
2967 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2968 
2969 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2970 }
2971 
2972 /**
2973  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2974  *  @hw: pointer to the HW structure
2975  *  @offset: The offset of the byte to write.
2976  *  @byte: The byte to write to the NVM.
2977  *
2978  *  Writes a single byte to the NVM using the flash access registers.
2979  *  Goes through a retry algorithm before giving up.
2980  **/
2981 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2982 						u32 offset, u8 byte)
2983 {
2984 	s32 ret_val;
2985 	u16 program_retries;
2986 
2987 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2988 
2989 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2990 	if (ret_val == E1000_SUCCESS)
2991 		goto out;
2992 
2993 	for (program_retries = 0; program_retries < 100; program_retries++) {
2994 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2995 		usec_delay(100);
2996 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2997 		if (ret_val == E1000_SUCCESS)
2998 			break;
2999 	}
3000 	if (program_retries == 100) {
3001 		ret_val = -E1000_ERR_NVM;
3002 		goto out;
3003 	}
3004 
3005 out:
3006 	return ret_val;
3007 }
3008 
3009 /**
3010  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3011  *  @hw: pointer to the HW structure
3012  *  @bank: 0 for first bank, 1 for second bank, etc.
3013  *
3014  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3015  *  bank N is 4096 * N + flash_reg_addr.
3016  **/
3017 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3018 {
3019 	struct e1000_nvm_info *nvm = &hw->nvm;
3020 	union ich8_hws_flash_status hsfsts;
3021 	union ich8_hws_flash_ctrl hsflctl;
3022 	u32 flash_linear_addr;
3023 	/* bank size is in 16bit words - adjust to bytes */
3024 	u32 flash_bank_size = nvm->flash_bank_size * 2;
3025 	s32 ret_val = E1000_SUCCESS;
3026 	s32 count = 0;
3027 	s32 j, iteration, sector_size;
3028 
3029 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3030 
3031 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3032 
3033 	/*
3034 	 * Determine HW Sector size: Read BERASE bits of hw flash status
3035 	 * register
3036 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3037 	 *     consecutive sectors.  The start index for the nth Hw sector
3038 	 *     can be calculated as = bank * 4096 + n * 256
3039 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3040 	 *     The start index for the nth Hw sector can be calculated
3041 	 *     as = bank * 4096
3042 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3043 	 *     (ich9 only, otherwise error condition)
3044 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3045 	 */
3046 	switch (hsfsts.hsf_status.berasesz) {
3047 	case 0:
3048 		/* Hw sector size 256 */
3049 		sector_size = ICH_FLASH_SEG_SIZE_256;
3050 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3051 		break;
3052 	case 1:
3053 		sector_size = ICH_FLASH_SEG_SIZE_4K;
3054 		iteration = 1;
3055 		break;
3056 	case 2:
3057 		sector_size = ICH_FLASH_SEG_SIZE_8K;
3058 		iteration = 1;
3059 		break;
3060 	case 3:
3061 		sector_size = ICH_FLASH_SEG_SIZE_64K;
3062 		iteration = 1;
3063 		break;
3064 	default:
3065 		ret_val = -E1000_ERR_NVM;
3066 		goto out;
3067 	}
3068 
3069 	/* Start with the base address, then add the sector offset. */
3070 	flash_linear_addr = hw->nvm.flash_base_addr;
3071 	flash_linear_addr += (bank) ? flash_bank_size : 0;
3072 
3073 	for (j = 0; j < iteration ; j++) {
3074 		do {
3075 			/* Steps */
3076 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
3077 			if (ret_val)
3078 				goto out;
3079 
3080 			/*
3081 			 * Write a value 11 (block Erase) in Flash
3082 			 * Cycle field in hw flash control
3083 			 */
3084 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3085 							      ICH_FLASH_HSFCTL);
3086 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3087 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3088 						hsflctl.regval);
3089 
3090 			/*
3091 			 * Write the last 24 bits of an index within the
3092 			 * block into Flash Linear address field in Flash
3093 			 * Address.
3094 			 */
3095 			flash_linear_addr += (j * sector_size);
3096 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3097 					      flash_linear_addr);
3098 
3099 			ret_val = e1000_flash_cycle_ich8lan(hw,
3100 					       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3101 			if (ret_val == E1000_SUCCESS)
3102 				break;
3103 
3104 			/*
3105 			 * Check if FCERR is set to 1.  If 1,
3106 			 * clear it and try the whole sequence
3107 			 * a few more times else Done
3108 			 */
3109 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3110 						      ICH_FLASH_HSFSTS);
3111 			if (hsfsts.hsf_status.flcerr == 1)
3112 				/* repeat for some time before giving up */
3113 				continue;
3114 			else if (hsfsts.hsf_status.flcdone == 0)
3115 				goto out;
3116 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3117 	}
3118 
3119 out:
3120 	return ret_val;
3121 }
3122 
3123 /**
3124  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3125  *  @hw: pointer to the HW structure
3126  *  @data: Pointer to the LED settings
3127  *
3128  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3129  *  settings is all 0's or F's, set the LED default to a valid LED default
3130  *  setting.
3131  **/
3132 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3133 {
3134 	s32 ret_val;
3135 
3136 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
3137 
3138 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3139 	if (ret_val) {
3140 		DEBUGOUT("NVM Read Error\n");
3141 		goto out;
3142 	}
3143 
3144 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3145 		*data = ID_LED_DEFAULT_ICH8LAN;
3146 
3147 out:
3148 	return ret_val;
3149 }
3150 
3151 /**
3152  *  e1000_id_led_init_pchlan - store LED configurations
3153  *  @hw: pointer to the HW structure
3154  *
3155  *  PCH does not control LEDs via the LEDCTL register, rather it uses
3156  *  the PHY LED configuration register.
3157  *
3158  *  PCH also does not have an "always on" or "always off" mode which
3159  *  complicates the ID feature.  Instead of using the "on" mode to indicate
3160  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3161  *  use "link_up" mode.  The LEDs will still ID on request if there is no
3162  *  link based on logic in e1000_led_[on|off]_pchlan().
3163  **/
3164 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3165 {
3166 	struct e1000_mac_info *mac = &hw->mac;
3167 	s32 ret_val;
3168 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3169 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3170 	u16 data, i, temp, shift;
3171 
3172 	DEBUGFUNC("e1000_id_led_init_pchlan");
3173 
3174 	/* Get default ID LED modes */
3175 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3176 	if (ret_val)
3177 		goto out;
3178 
3179 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3180 	mac->ledctl_mode1 = mac->ledctl_default;
3181 	mac->ledctl_mode2 = mac->ledctl_default;
3182 
3183 	for (i = 0; i < 4; i++) {
3184 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3185 		shift = (i * 5);
3186 		switch (temp) {
3187 		case ID_LED_ON1_DEF2:
3188 		case ID_LED_ON1_ON2:
3189 		case ID_LED_ON1_OFF2:
3190 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3191 			mac->ledctl_mode1 |= (ledctl_on << shift);
3192 			break;
3193 		case ID_LED_OFF1_DEF2:
3194 		case ID_LED_OFF1_ON2:
3195 		case ID_LED_OFF1_OFF2:
3196 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3197 			mac->ledctl_mode1 |= (ledctl_off << shift);
3198 			break;
3199 		default:
3200 			/* Do nothing */
3201 			break;
3202 		}
3203 		switch (temp) {
3204 		case ID_LED_DEF1_ON2:
3205 		case ID_LED_ON1_ON2:
3206 		case ID_LED_OFF1_ON2:
3207 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3208 			mac->ledctl_mode2 |= (ledctl_on << shift);
3209 			break;
3210 		case ID_LED_DEF1_OFF2:
3211 		case ID_LED_ON1_OFF2:
3212 		case ID_LED_OFF1_OFF2:
3213 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3214 			mac->ledctl_mode2 |= (ledctl_off << shift);
3215 			break;
3216 		default:
3217 			/* Do nothing */
3218 			break;
3219 		}
3220 	}
3221 
3222 out:
3223 	return ret_val;
3224 }
3225 
3226 /**
3227  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3228  *  @hw: pointer to the HW structure
3229  *
3230  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3231  *  register, so the the bus width is hard coded.
3232  **/
3233 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3234 {
3235 	struct e1000_bus_info *bus = &hw->bus;
3236 	s32 ret_val;
3237 
3238 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
3239 
3240 	ret_val = e1000_get_bus_info_pcie_generic(hw);
3241 
3242 	/*
3243 	 * ICH devices are "PCI Express"-ish.  They have
3244 	 * a configuration space, but do not contain
3245 	 * PCI Express Capability registers, so bus width
3246 	 * must be hardcoded.
3247 	 */
3248 	if (bus->width == e1000_bus_width_unknown)
3249 		bus->width = e1000_bus_width_pcie_x1;
3250 
3251 	return ret_val;
3252 }
3253 
3254 /**
3255  *  e1000_reset_hw_ich8lan - Reset the hardware
3256  *  @hw: pointer to the HW structure
3257  *
3258  *  Does a full reset of the hardware which includes a reset of the PHY and
3259  *  MAC.
3260  **/
3261 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3262 {
3263 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3264 	u16 reg;
3265 	u32 ctrl, kab;
3266 	s32 ret_val;
3267 
3268 	DEBUGFUNC("e1000_reset_hw_ich8lan");
3269 
3270 	/*
3271 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
3272 	 * on the last TLP read/write transaction when MAC is reset.
3273 	 */
3274 	ret_val = e1000_disable_pcie_master_generic(hw);
3275 	if (ret_val)
3276 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
3277 
3278 	DEBUGOUT("Masking off all interrupts\n");
3279 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3280 
3281 	/*
3282 	 * Disable the Transmit and Receive units.  Then delay to allow
3283 	 * any pending transactions to complete before we hit the MAC
3284 	 * with the global reset.
3285 	 */
3286 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
3287 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3288 	E1000_WRITE_FLUSH(hw);
3289 
3290 	msec_delay(10);
3291 
3292 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
3293 	if (hw->mac.type == e1000_ich8lan) {
3294 		/* Set Tx and Rx buffer allocation to 8k apiece. */
3295 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3296 		/* Set Packet Buffer Size to 16k. */
3297 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3298 	}
3299 
3300 	if (hw->mac.type == e1000_pchlan) {
3301 		/* Save the NVM K1 bit setting*/
3302 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
3303 		if (ret_val)
3304 			return ret_val;
3305 
3306 		if (reg & E1000_NVM_K1_ENABLE)
3307 			dev_spec->nvm_k1_enabled = TRUE;
3308 		else
3309 			dev_spec->nvm_k1_enabled = FALSE;
3310 	}
3311 
3312 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3313 
3314 	if (!hw->phy.ops.check_reset_block(hw)) {
3315 		/*
3316 		 * Full-chip reset requires MAC and PHY reset at the same
3317 		 * time to make sure the interface between MAC and the
3318 		 * external PHY is reset.
3319 		 */
3320 		ctrl |= E1000_CTRL_PHY_RST;
3321 
3322 		/*
3323 		 * Gate automatic PHY configuration by hardware on
3324 		 * non-managed 82579
3325 		 */
3326 		if ((hw->mac.type == e1000_pch2lan) &&
3327 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3328 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3329 	}
3330 	ret_val = e1000_acquire_swflag_ich8lan(hw);
3331 	DEBUGOUT("Issuing a global reset to ich8lan\n");
3332 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3333 	/* cannot issue a flush here because it hangs the hardware */
3334 	msec_delay(20);
3335 
3336 	if (!ret_val)
3337 		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
3338 
3339 	if (ctrl & E1000_CTRL_PHY_RST) {
3340 		ret_val = hw->phy.ops.get_cfg_done(hw);
3341 		if (ret_val)
3342 			goto out;
3343 
3344 		ret_val = e1000_post_phy_reset_ich8lan(hw);
3345 		if (ret_val)
3346 			goto out;
3347 	}
3348 
3349 	/*
3350 	 * For PCH, this write will make sure that any noise
3351 	 * will be detected as a CRC error and be dropped rather than show up
3352 	 * as a bad packet to the DMA engine.
3353 	 */
3354 	if (hw->mac.type == e1000_pchlan)
3355 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3356 
3357 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3358 	E1000_READ_REG(hw, E1000_ICR);
3359 
3360 	kab = E1000_READ_REG(hw, E1000_KABGTXD);
3361 	kab |= E1000_KABGTXD_BGSQLBIAS;
3362 	E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
3363 
3364 out:
3365 	return ret_val;
3366 }
3367 
3368 /**
3369  *  e1000_init_hw_ich8lan - Initialize the hardware
3370  *  @hw: pointer to the HW structure
3371  *
3372  *  Prepares the hardware for transmit and receive by doing the following:
3373  *   - initialize hardware bits
3374  *   - initialize LED identification
3375  *   - setup receive address registers
3376  *   - setup flow control
3377  *   - setup transmit descriptors
3378  *   - clear statistics
3379  **/
3380 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3381 {
3382 	struct e1000_mac_info *mac = &hw->mac;
3383 	u32 ctrl_ext, txdctl, snoop;
3384 	s32 ret_val;
3385 	u16 i;
3386 
3387 	DEBUGFUNC("e1000_init_hw_ich8lan");
3388 
3389 	e1000_initialize_hw_bits_ich8lan(hw);
3390 
3391 	/* Initialize identification LED */
3392 	ret_val = mac->ops.id_led_init(hw);
3393 	if (ret_val)
3394 		DEBUGOUT("Error initializing identification LED\n");
3395 		/* This is not fatal and we should not stop init due to this */
3396 
3397 	/* Setup the receive address. */
3398 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3399 
3400 	/* Zero out the Multicast HASH table */
3401 	DEBUGOUT("Zeroing the MTA\n");
3402 	for (i = 0; i < mac->mta_reg_count; i++)
3403 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3404 
3405 	/*
3406 	 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3407 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
3408 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3409 	 */
3410 	if (hw->phy.type == e1000_phy_82578) {
3411 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
3412 		i &= ~BM_WUC_HOST_WU_BIT;
3413 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
3414 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
3415 		if (ret_val)
3416 			return ret_val;
3417 	}
3418 
3419 	/* Setup link and flow control */
3420 	ret_val = mac->ops.setup_link(hw);
3421 
3422 	/* Set the transmit descriptor write-back policy for both queues */
3423 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3424 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3425 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3426 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3427 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3428 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3429 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3430 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3431 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3432 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3433 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3434 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3435 
3436 	/*
3437 	 * ICH8 has opposite polarity of no_snoop bits.
3438 	 * By default, we should use snoop behavior.
3439 	 */
3440 	if (mac->type == e1000_ich8lan)
3441 		snoop = PCIE_ICH8_SNOOP_ALL;
3442 	else
3443 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3444 	e1000_set_pcie_no_snoop_generic(hw, snoop);
3445 
3446 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3447 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3448 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3449 
3450 	/*
3451 	 * Clear all of the statistics registers (clear on read).  It is
3452 	 * important that we do this after we have tried to establish link
3453 	 * because the symbol error count will increment wildly if there
3454 	 * is no link.
3455 	 */
3456 	e1000_clear_hw_cntrs_ich8lan(hw);
3457 
3458 	return ret_val;
3459 }
3460 /**
3461  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3462  *  @hw: pointer to the HW structure
3463  *
3464  *  Sets/Clears required hardware bits necessary for correctly setting up the
3465  *  hardware for transmit and receive.
3466  **/
3467 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3468 {
3469 	u32 reg;
3470 
3471 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3472 
3473 	/* Extended Device Control */
3474 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3475 	reg |= (1 << 22);
3476 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3477 	if (hw->mac.type >= e1000_pchlan)
3478 		reg |= E1000_CTRL_EXT_PHYPDEN;
3479 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3480 
3481 	/* Transmit Descriptor Control 0 */
3482 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3483 	reg |= (1 << 22);
3484 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3485 
3486 	/* Transmit Descriptor Control 1 */
3487 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3488 	reg |= (1 << 22);
3489 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3490 
3491 	/* Transmit Arbitration Control 0 */
3492 	reg = E1000_READ_REG(hw, E1000_TARC(0));
3493 	if (hw->mac.type == e1000_ich8lan)
3494 		reg |= (1 << 28) | (1 << 29);
3495 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3496 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3497 
3498 	/* Transmit Arbitration Control 1 */
3499 	reg = E1000_READ_REG(hw, E1000_TARC(1));
3500 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3501 		reg &= ~(1 << 28);
3502 	else
3503 		reg |= (1 << 28);
3504 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
3505 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3506 
3507 	/* Device Status */
3508 	if (hw->mac.type == e1000_ich8lan) {
3509 		reg = E1000_READ_REG(hw, E1000_STATUS);
3510 		reg &= ~(1 << 31);
3511 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
3512 	}
3513 
3514 	/*
3515 	 * work-around descriptor data corruption issue during nfs v2 udp
3516 	 * traffic, just disable the nfs filtering capability
3517 	 */
3518 	reg = E1000_READ_REG(hw, E1000_RFCTL);
3519 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3520 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3521 
3522 	return;
3523 }
3524 
3525 /**
3526  *  e1000_setup_link_ich8lan - Setup flow control and link settings
3527  *  @hw: pointer to the HW structure
3528  *
3529  *  Determines which flow control settings to use, then configures flow
3530  *  control.  Calls the appropriate media-specific link configuration
3531  *  function.  Assuming the adapter has a valid link partner, a valid link
3532  *  should be established.  Assumes the hardware has previously been reset
3533  *  and the transmitter and receiver are not enabled.
3534  **/
3535 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3536 {
3537 	s32 ret_val = E1000_SUCCESS;
3538 
3539 	DEBUGFUNC("e1000_setup_link_ich8lan");
3540 
3541 	if (hw->phy.ops.check_reset_block(hw))
3542 		goto out;
3543 
3544 	/*
3545 	 * ICH parts do not have a word in the NVM to determine
3546 	 * the default flow control setting, so we explicitly
3547 	 * set it to full.
3548 	 */
3549 	if (hw->fc.requested_mode == e1000_fc_default)
3550 		hw->fc.requested_mode = e1000_fc_full;
3551 
3552 	/*
3553 	 * Save off the requested flow control mode for use later.  Depending
3554 	 * on the link partner's capabilities, we may or may not use this mode.
3555 	 */
3556 	hw->fc.current_mode = hw->fc.requested_mode;
3557 
3558 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3559 		hw->fc.current_mode);
3560 
3561 	/* Continue to configure the copper link. */
3562 	ret_val = hw->mac.ops.setup_physical_interface(hw);
3563 	if (ret_val)
3564 		goto out;
3565 
3566 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3567 	if ((hw->phy.type == e1000_phy_82578) ||
3568 	    (hw->phy.type == e1000_phy_82579) ||
3569 	    (hw->phy.type == e1000_phy_82577)) {
3570 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3571 
3572 		ret_val = hw->phy.ops.write_reg(hw,
3573 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
3574 					     hw->fc.pause_time);
3575 		if (ret_val)
3576 			goto out;
3577 	}
3578 
3579 	ret_val = e1000_set_fc_watermarks_generic(hw);
3580 
3581 out:
3582 	return ret_val;
3583 }
3584 
3585 /**
3586  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3587  *  @hw: pointer to the HW structure
3588  *
3589  *  Configures the kumeran interface to the PHY to wait the appropriate time
3590  *  when polling the PHY, then call the generic setup_copper_link to finish
3591  *  configuring the copper link.
3592  **/
3593 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3594 {
3595 	u32 ctrl;
3596 	s32 ret_val;
3597 	u16 reg_data;
3598 
3599 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3600 
3601 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3602 	ctrl |= E1000_CTRL_SLU;
3603 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3604 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3605 
3606 	/*
3607 	 * Set the mac to wait the maximum time between each iteration
3608 	 * and increase the max iterations when polling the phy;
3609 	 * this fixes erroneous timeouts at 10Mbps.
3610 	 */
3611 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3612 					       0xFFFF);
3613 	if (ret_val)
3614 		goto out;
3615 	ret_val = e1000_read_kmrn_reg_generic(hw,
3616 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
3617 					      &reg_data);
3618 	if (ret_val)
3619 		goto out;
3620 	reg_data |= 0x3F;
3621 	ret_val = e1000_write_kmrn_reg_generic(hw,
3622 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
3623 					       reg_data);
3624 	if (ret_val)
3625 		goto out;
3626 
3627 	switch (hw->phy.type) {
3628 	case e1000_phy_igp_3:
3629 		ret_val = e1000_copper_link_setup_igp(hw);
3630 		if (ret_val)
3631 			goto out;
3632 		break;
3633 	case e1000_phy_bm:
3634 	case e1000_phy_82578:
3635 		ret_val = e1000_copper_link_setup_m88(hw);
3636 		if (ret_val)
3637 			goto out;
3638 		break;
3639 	case e1000_phy_82577:
3640 	case e1000_phy_82579:
3641 		ret_val = e1000_copper_link_setup_82577(hw);
3642 		if (ret_val)
3643 			goto out;
3644 		break;
3645 	case e1000_phy_ife:
3646 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3647 					       &reg_data);
3648 		if (ret_val)
3649 			goto out;
3650 
3651 		reg_data &= ~IFE_PMC_AUTO_MDIX;
3652 
3653 		switch (hw->phy.mdix) {
3654 		case 1:
3655 			reg_data &= ~IFE_PMC_FORCE_MDIX;
3656 			break;
3657 		case 2:
3658 			reg_data |= IFE_PMC_FORCE_MDIX;
3659 			break;
3660 		case 0:
3661 		default:
3662 			reg_data |= IFE_PMC_AUTO_MDIX;
3663 			break;
3664 		}
3665 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3666 						reg_data);
3667 		if (ret_val)
3668 			goto out;
3669 		break;
3670 	default:
3671 		break;
3672 	}
3673 	ret_val = e1000_setup_copper_link_generic(hw);
3674 
3675 out:
3676 	return ret_val;
3677 }
3678 
3679 /**
3680  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3681  *  @hw: pointer to the HW structure
3682  *  @speed: pointer to store current link speed
3683  *  @duplex: pointer to store the current link duplex
3684  *
3685  *  Calls the generic get_speed_and_duplex to retrieve the current link
3686  *  information and then calls the Kumeran lock loss workaround for links at
3687  *  gigabit speeds.
3688  **/
3689 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3690 					  u16 *duplex)
3691 {
3692 	s32 ret_val;
3693 
3694 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3695 
3696 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3697 	if (ret_val)
3698 		goto out;
3699 
3700 	if ((hw->mac.type == e1000_ich8lan) &&
3701 	    (hw->phy.type == e1000_phy_igp_3) &&
3702 	    (*speed == SPEED_1000)) {
3703 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3704 	}
3705 
3706 out:
3707 	return ret_val;
3708 }
3709 
3710 /**
3711  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3712  *  @hw: pointer to the HW structure
3713  *
3714  *  Work-around for 82566 Kumeran PCS lock loss:
3715  *  On link status change (i.e. PCI reset, speed change) and link is up and
3716  *  speed is gigabit-
3717  *    0) if workaround is optionally disabled do nothing
3718  *    1) wait 1ms for Kumeran link to come up
3719  *    2) check Kumeran Diagnostic register PCS lock loss bit
3720  *    3) if not set the link is locked (all is good), otherwise...
3721  *    4) reset the PHY
3722  *    5) repeat up to 10 times
3723  *  Note: this is only called for IGP3 copper when speed is 1gb.
3724  **/
3725 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3726 {
3727 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3728 	u32 phy_ctrl;
3729 	s32 ret_val = E1000_SUCCESS;
3730 	u16 i, data;
3731 	bool link;
3732 
3733 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3734 
3735 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3736 		goto out;
3737 
3738 	/*
3739 	 * Make sure link is up before proceeding.  If not just return.
3740 	 * Attempting this while link is negotiating fouled up link
3741 	 * stability
3742 	 */
3743 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3744 	if (!link) {
3745 		ret_val = E1000_SUCCESS;
3746 		goto out;
3747 	}
3748 
3749 	for (i = 0; i < 10; i++) {
3750 		/* read once to clear */
3751 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3752 		if (ret_val)
3753 			goto out;
3754 		/* and again to get new status */
3755 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3756 		if (ret_val)
3757 			goto out;
3758 
3759 		/* check for PCS lock */
3760 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3761 			ret_val = E1000_SUCCESS;
3762 			goto out;
3763 		}
3764 
3765 		/* Issue PHY reset */
3766 		hw->phy.ops.reset(hw);
3767 		msec_delay_irq(5);
3768 	}
3769 	/* Disable GigE link negotiation */
3770 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3771 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3772 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3773 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3774 
3775 	/*
3776 	 * Call gig speed drop workaround on Gig disable before accessing
3777 	 * any PHY registers
3778 	 */
3779 	e1000_gig_downshift_workaround_ich8lan(hw);
3780 
3781 	/* unable to acquire PCS lock */
3782 	ret_val = -E1000_ERR_PHY;
3783 
3784 out:
3785 	return ret_val;
3786 }
3787 
3788 /**
3789  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3790  *  @hw: pointer to the HW structure
3791  *  @state: boolean value used to set the current Kumeran workaround state
3792  *
3793  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
3794  *  /disabled - FALSE).
3795  **/
3796 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3797 						 bool state)
3798 {
3799 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3800 
3801 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3802 
3803 	if (hw->mac.type != e1000_ich8lan) {
3804 		DEBUGOUT("Workaround applies to ICH8 only.\n");
3805 		return;
3806 	}
3807 
3808 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
3809 
3810 	return;
3811 }
3812 
3813 /**
3814  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3815  *  @hw: pointer to the HW structure
3816  *
3817  *  Workaround for 82566 power-down on D3 entry:
3818  *    1) disable gigabit link
3819  *    2) write VR power-down enable
3820  *    3) read it back
3821  *  Continue if successful, else issue LCD reset and repeat
3822  **/
3823 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3824 {
3825 	u32 reg;
3826 	u16 data;
3827 	u8  retry = 0;
3828 
3829 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3830 
3831 	if (hw->phy.type != e1000_phy_igp_3)
3832 		goto out;
3833 
3834 	/* Try the workaround twice (if needed) */
3835 	do {
3836 		/* Disable link */
3837 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3838 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3839 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3840 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3841 
3842 		/*
3843 		 * Call gig speed drop workaround on Gig disable before
3844 		 * accessing any PHY registers
3845 		 */
3846 		if (hw->mac.type == e1000_ich8lan)
3847 			e1000_gig_downshift_workaround_ich8lan(hw);
3848 
3849 		/* Write VR power-down enable */
3850 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3851 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3852 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3853 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3854 
3855 		/* Read it back and test */
3856 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3857 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3858 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3859 			break;
3860 
3861 		/* Issue PHY reset and repeat at most one more time */
3862 		reg = E1000_READ_REG(hw, E1000_CTRL);
3863 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3864 		retry++;
3865 	} while (retry);
3866 
3867 out:
3868 	return;
3869 }
3870 
3871 /**
3872  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3873  *  @hw: pointer to the HW structure
3874  *
3875  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3876  *  LPLU, Gig disable, MDIC PHY reset):
3877  *    1) Set Kumeran Near-end loopback
3878  *    2) Clear Kumeran Near-end loopback
3879  *  Should only be called for ICH8[m] devices with any 1G Phy.
3880  **/
3881 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3882 {
3883 	s32 ret_val = E1000_SUCCESS;
3884 	u16 reg_data;
3885 
3886 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3887 
3888 	if ((hw->mac.type != e1000_ich8lan) ||
3889 	    (hw->phy.type == e1000_phy_ife))
3890 		goto out;
3891 
3892 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3893 					      &reg_data);
3894 	if (ret_val)
3895 		goto out;
3896 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3897 	ret_val = e1000_write_kmrn_reg_generic(hw,
3898 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3899 					       reg_data);
3900 	if (ret_val)
3901 		goto out;
3902 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3903 	ret_val = e1000_write_kmrn_reg_generic(hw,
3904 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3905 					       reg_data);
3906 out:
3907 	return;
3908 }
3909 
3910 /**
3911  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3912  *  @hw: pointer to the HW structure
3913  *
3914  *  During S0 to Sx transition, it is possible the link remains at gig
3915  *  instead of negotiating to a lower speed.  Before going to Sx, set
3916  *  'Gig Disable' to force link speed negotiation to a lower speed based on
3917  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
3918  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
3919  *  needs to be written.
3920  **/
3921 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3922 {
3923 	u32 phy_ctrl;
3924 	s32 ret_val;
3925 
3926 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
3927 
3928 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3929 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
3930 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3931 	if (hw->mac.type == e1000_ich8lan)
3932 		e1000_gig_downshift_workaround_ich8lan(hw);
3933 
3934 	if (hw->mac.type >= e1000_pchlan) {
3935 		e1000_oem_bits_config_ich8lan(hw, FALSE);
3936 		e1000_phy_hw_reset_ich8lan(hw);
3937 		ret_val = hw->phy.ops.acquire(hw);
3938 		if (ret_val)
3939 			return;
3940 		e1000_write_smbus_addr(hw);
3941 		hw->phy.ops.release(hw);
3942 	}
3943 
3944 	return;
3945 }
3946 
3947 /**
3948  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
3949  *  @hw: pointer to the HW structure
3950  *
3951  *  During Sx to S0 transitions on non-managed devices or managed devices
3952  *  on which PHY resets are not blocked, if the PHY registers cannot be
3953  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
3954  *  the PHY.
3955  **/
3956 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3957 {
3958 	u16 phy_id1, phy_id2;
3959 	s32 ret_val;
3960 
3961 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
3962 
3963 	if ((hw->mac.type != e1000_pch2lan) ||
3964 	    hw->phy.ops.check_reset_block(hw))
3965 		return;
3966 
3967 	ret_val = hw->phy.ops.acquire(hw);
3968 	if (ret_val) {
3969 		DEBUGOUT("Failed to acquire PHY semaphore in resume\n");
3970 		return;
3971 	}
3972 
3973 	/* Test access to the PHY registers by reading the ID regs */
3974 	ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
3975 	if (ret_val)
3976 		goto release;
3977 	ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
3978 	if (ret_val)
3979 		goto release;
3980 
3981 	if (hw->phy.id == ((u32)(phy_id1 << 16) |
3982 			   (u32)(phy_id2 & PHY_REVISION_MASK)))
3983 		goto release;
3984 
3985 	e1000_toggle_lanphypc_value_ich8lan(hw);
3986 
3987 	hw->phy.ops.release(hw);
3988 	msec_delay(50);
3989 	hw->phy.ops.reset(hw);
3990 	msec_delay(50);
3991 	return;
3992 
3993 release:
3994 	hw->phy.ops.release(hw);
3995 
3996 	return;
3997 }
3998 
3999 /**
4000  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
4001  *  @hw: pointer to the HW structure
4002  *
4003  *  Return the LED back to the default configuration.
4004  **/
4005 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4006 {
4007 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
4008 
4009 	if (hw->phy.type == e1000_phy_ife)
4010 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4011 					     0);
4012 
4013 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4014 	return E1000_SUCCESS;
4015 }
4016 
4017 /**
4018  *  e1000_led_on_ich8lan - Turn LEDs on
4019  *  @hw: pointer to the HW structure
4020  *
4021  *  Turn on the LEDs.
4022  **/
4023 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
4024 {
4025 	DEBUGFUNC("e1000_led_on_ich8lan");
4026 
4027 	if (hw->phy.type == e1000_phy_ife)
4028 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4029 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4030 
4031 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
4032 	return E1000_SUCCESS;
4033 }
4034 
4035 /**
4036  *  e1000_led_off_ich8lan - Turn LEDs off
4037  *  @hw: pointer to the HW structure
4038  *
4039  *  Turn off the LEDs.
4040  **/
4041 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
4042 {
4043 	DEBUGFUNC("e1000_led_off_ich8lan");
4044 
4045 	if (hw->phy.type == e1000_phy_ife)
4046 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4047 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
4048 
4049 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
4050 	return E1000_SUCCESS;
4051 }
4052 
4053 /**
4054  *  e1000_setup_led_pchlan - Configures SW controllable LED
4055  *  @hw: pointer to the HW structure
4056  *
4057  *  This prepares the SW controllable LED for use.
4058  **/
4059 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4060 {
4061 	DEBUGFUNC("e1000_setup_led_pchlan");
4062 
4063 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4064 				     (u16)hw->mac.ledctl_mode1);
4065 }
4066 
4067 /**
4068  *  e1000_cleanup_led_pchlan - Restore the default LED operation
4069  *  @hw: pointer to the HW structure
4070  *
4071  *  Return the LED back to the default configuration.
4072  **/
4073 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4074 {
4075 	DEBUGFUNC("e1000_cleanup_led_pchlan");
4076 
4077 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4078 				     (u16)hw->mac.ledctl_default);
4079 }
4080 
4081 /**
4082  *  e1000_led_on_pchlan - Turn LEDs on
4083  *  @hw: pointer to the HW structure
4084  *
4085  *  Turn on the LEDs.
4086  **/
4087 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4088 {
4089 	u16 data = (u16)hw->mac.ledctl_mode2;
4090 	u32 i, led;
4091 
4092 	DEBUGFUNC("e1000_led_on_pchlan");
4093 
4094 	/*
4095 	 * If no link, then turn LED on by setting the invert bit
4096 	 * for each LED that's mode is "link_up" in ledctl_mode2.
4097 	 */
4098 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4099 		for (i = 0; i < 3; i++) {
4100 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4101 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4102 			    E1000_LEDCTL_MODE_LINK_UP)
4103 				continue;
4104 			if (led & E1000_PHY_LED0_IVRT)
4105 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4106 			else
4107 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4108 		}
4109 	}
4110 
4111 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4112 }
4113 
4114 /**
4115  *  e1000_led_off_pchlan - Turn LEDs off
4116  *  @hw: pointer to the HW structure
4117  *
4118  *  Turn off the LEDs.
4119  **/
4120 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4121 {
4122 	u16 data = (u16)hw->mac.ledctl_mode1;
4123 	u32 i, led;
4124 
4125 	DEBUGFUNC("e1000_led_off_pchlan");
4126 
4127 	/*
4128 	 * If no link, then turn LED off by clearing the invert bit
4129 	 * for each LED that's mode is "link_up" in ledctl_mode1.
4130 	 */
4131 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4132 		for (i = 0; i < 3; i++) {
4133 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4134 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4135 			    E1000_LEDCTL_MODE_LINK_UP)
4136 				continue;
4137 			if (led & E1000_PHY_LED0_IVRT)
4138 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4139 			else
4140 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4141 		}
4142 	}
4143 
4144 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4145 }
4146 
4147 /**
4148  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4149  *  @hw: pointer to the HW structure
4150  *
4151  *  Read appropriate register for the config done bit for completion status
4152  *  and configure the PHY through s/w for EEPROM-less parts.
4153  *
4154  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
4155  *  config done bit, so only an error is logged and continues.  If we were
4156  *  to return with error, EEPROM-less silicon would not be able to be reset
4157  *  or change link.
4158  **/
4159 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4160 {
4161 	s32 ret_val = E1000_SUCCESS;
4162 	u32 bank = 0;
4163 	u32 status;
4164 
4165 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4166 
4167 	e1000_get_cfg_done_generic(hw);
4168 
4169 	/* Wait for indication from h/w that it has completed basic config */
4170 	if (hw->mac.type >= e1000_ich10lan) {
4171 		e1000_lan_init_done_ich8lan(hw);
4172 	} else {
4173 		ret_val = e1000_get_auto_rd_done_generic(hw);
4174 		if (ret_val) {
4175 			/*
4176 			 * When auto config read does not complete, do not
4177 			 * return with an error. This can happen in situations
4178 			 * where there is no eeprom and prevents getting link.
4179 			 */
4180 			DEBUGOUT("Auto Read Done did not complete\n");
4181 			ret_val = E1000_SUCCESS;
4182 		}
4183 	}
4184 
4185 	/* Clear PHY Reset Asserted bit */
4186 	status = E1000_READ_REG(hw, E1000_STATUS);
4187 	if (status & E1000_STATUS_PHYRA)
4188 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4189 	else
4190 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4191 
4192 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
4193 	if (hw->mac.type <= e1000_ich9lan) {
4194 		if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
4195 		    (hw->phy.type == e1000_phy_igp_3)) {
4196 			e1000_phy_init_script_igp3(hw);
4197 		}
4198 	} else {
4199 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4200 			/* Maybe we should do a basic PHY config */
4201 			DEBUGOUT("EEPROM not present\n");
4202 			ret_val = -E1000_ERR_CONFIG;
4203 		}
4204 	}
4205 
4206 	return ret_val;
4207 }
4208 
4209 /**
4210  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4211  * @hw: pointer to the HW structure
4212  *
4213  * In the case of a PHY power down to save power, or to turn off link during a
4214  * driver unload, or wake on lan is not enabled, remove the link.
4215  **/
4216 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4217 {
4218 	/* If the management interface is not enabled, then power down */
4219 	if (!(hw->mac.ops.check_mng_mode(hw) ||
4220 	      hw->phy.ops.check_reset_block(hw)))
4221 		e1000_power_down_phy_copper(hw);
4222 
4223 	return;
4224 }
4225 
4226 /**
4227  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4228  *  @hw: pointer to the HW structure
4229  *
4230  *  Clears hardware counters specific to the silicon family and calls
4231  *  clear_hw_cntrs_generic to clear all general purpose counters.
4232  **/
4233 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4234 {
4235 	u16 phy_data;
4236 	s32 ret_val;
4237 
4238 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4239 
4240 	e1000_clear_hw_cntrs_base_generic(hw);
4241 
4242 	E1000_READ_REG(hw, E1000_ALGNERRC);
4243 	E1000_READ_REG(hw, E1000_RXERRC);
4244 	E1000_READ_REG(hw, E1000_TNCRS);
4245 	E1000_READ_REG(hw, E1000_CEXTERR);
4246 	E1000_READ_REG(hw, E1000_TSCTC);
4247 	E1000_READ_REG(hw, E1000_TSCTFC);
4248 
4249 	E1000_READ_REG(hw, E1000_MGTPRC);
4250 	E1000_READ_REG(hw, E1000_MGTPDC);
4251 	E1000_READ_REG(hw, E1000_MGTPTC);
4252 
4253 	E1000_READ_REG(hw, E1000_IAC);
4254 	E1000_READ_REG(hw, E1000_ICRXOC);
4255 
4256 	/* Clear PHY statistics registers */
4257 	if ((hw->phy.type == e1000_phy_82578) ||
4258 	    (hw->phy.type == e1000_phy_82579) ||
4259 	    (hw->phy.type == e1000_phy_82577)) {
4260 		ret_val = hw->phy.ops.acquire(hw);
4261 		if (ret_val)
4262 			return;
4263 		ret_val = hw->phy.ops.set_page(hw,
4264 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
4265 		if (ret_val)
4266 			goto release;
4267 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4268 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4269 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4270 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4271 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4272 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4273 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4274 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4275 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4276 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4277 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4278 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4279 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4280 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4281 release:
4282 		hw->phy.ops.release(hw);
4283 	}
4284 }
4285 
4286