xref: /freebsd/sys/dev/e1000/e1000_ich8lan.c (revision a3cf0ef5a295c885c895fabfd56470c0d1db322d)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2010, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 /*
36  * 82562G 10/100 Network Connection
37  * 82562G-2 10/100 Network Connection
38  * 82562GT 10/100 Network Connection
39  * 82562GT-2 10/100 Network Connection
40  * 82562V 10/100 Network Connection
41  * 82562V-2 10/100 Network Connection
42  * 82566DC-2 Gigabit Network Connection
43  * 82566DC Gigabit Network Connection
44  * 82566DM-2 Gigabit Network Connection
45  * 82566DM Gigabit Network Connection
46  * 82566MC Gigabit Network Connection
47  * 82566MM Gigabit Network Connection
48  * 82567LM Gigabit Network Connection
49  * 82567LF Gigabit Network Connection
50  * 82567V Gigabit Network Connection
51  * 82567LM-2 Gigabit Network Connection
52  * 82567LF-2 Gigabit Network Connection
53  * 82567V-2 Gigabit Network Connection
54  * 82567LF-3 Gigabit Network Connection
55  * 82567LM-3 Gigabit Network Connection
56  * 82567LM-4 Gigabit Network Connection
57  * 82577LM Gigabit Network Connection
58  * 82577LC Gigabit Network Connection
59  * 82578DM Gigabit Network Connection
60  * 82578DC Gigabit Network Connection
61  * 82579LM Gigabit Network Connection
62  * 82579V Gigabit Network Connection
63  */
64 
65 #include "e1000_api.h"
66 
67 static s32  e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
69 static s32  e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
70 static s32  e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
71 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
73 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
74 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
75 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
76 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
77 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
78 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
79 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
80 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
81 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
82                                             bool active);
83 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
84                                             bool active);
85 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
86                                    u16 words, u16 *data);
87 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
88                                     u16 words, u16 *data);
89 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
90 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
91 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
92                                             u16 *data);
93 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
94 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
95 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
96 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
97 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
98 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
99 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
100                                            u16 *speed, u16 *duplex);
101 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
102 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
103 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
104 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
105 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
106 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
107 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
108 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
109 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
111 static s32  e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
112 static s32  e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
113 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
114 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
115 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
116                                           u32 offset, u8 *data);
117 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
118                                           u8 size, u16 *data);
119 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
120                                           u32 offset, u16 *data);
121 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
122                                                  u32 offset, u8 byte);
123 static s32  e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
124                                            u32 offset, u8 data);
125 static s32  e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
126                                            u8 size, u16 data);
127 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
128 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
129 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
130 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
131 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
132 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
133 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
134 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
135 
136 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
137 /* Offset 04h HSFSTS */
138 union ich8_hws_flash_status {
139 	struct ich8_hsfsts {
140 		u16 flcdone    :1; /* bit 0 Flash Cycle Done */
141 		u16 flcerr     :1; /* bit 1 Flash Cycle Error */
142 		u16 dael       :1; /* bit 2 Direct Access error Log */
143 		u16 berasesz   :2; /* bit 4:3 Sector Erase Size */
144 		u16 flcinprog  :1; /* bit 5 flash cycle in Progress */
145 		u16 reserved1  :2; /* bit 13:6 Reserved */
146 		u16 reserved2  :6; /* bit 13:6 Reserved */
147 		u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
148 		u16 flockdn    :1; /* bit 15 Flash Config Lock-Down */
149 	} hsf_status;
150 	u16 regval;
151 };
152 
153 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
154 /* Offset 06h FLCTL */
155 union ich8_hws_flash_ctrl {
156 	struct ich8_hsflctl {
157 		u16 flcgo      :1;   /* 0 Flash Cycle Go */
158 		u16 flcycle    :2;   /* 2:1 Flash Cycle */
159 		u16 reserved   :5;   /* 7:3 Reserved  */
160 		u16 fldbcount  :2;   /* 9:8 Flash Data Byte Count */
161 		u16 flockdn    :6;   /* 15:10 Reserved */
162 	} hsf_ctrl;
163 	u16 regval;
164 };
165 
166 /* ICH Flash Region Access Permissions */
167 union ich8_hws_flash_regacc {
168 	struct ich8_flracc {
169 		u32 grra      :8; /* 0:7 GbE region Read Access */
170 		u32 grwa      :8; /* 8:15 GbE region Write Access */
171 		u32 gmrag     :8; /* 23:16 GbE Master Read Access Grant */
172 		u32 gmwag     :8; /* 31:24 GbE Master Write Access Grant */
173 	} hsf_flregacc;
174 	u16 regval;
175 };
176 
177 /**
178  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
179  *  @hw: pointer to the HW structure
180  *
181  *  Initialize family-specific PHY parameters and function pointers.
182  **/
183 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
184 {
185 	struct e1000_phy_info *phy = &hw->phy;
186 	u32 ctrl, fwsm;
187 	s32 ret_val = E1000_SUCCESS;
188 
189 	DEBUGFUNC("e1000_init_phy_params_pchlan");
190 
191 	phy->addr                     = 1;
192 	phy->reset_delay_us           = 100;
193 
194 	phy->ops.acquire              = e1000_acquire_swflag_ich8lan;
195 	phy->ops.check_reset_block    = e1000_check_reset_block_ich8lan;
196 	phy->ops.get_cfg_done         = e1000_get_cfg_done_ich8lan;
197 	phy->ops.read_reg             = e1000_read_phy_reg_hv;
198 	phy->ops.read_reg_locked      = e1000_read_phy_reg_hv_locked;
199 	phy->ops.release              = e1000_release_swflag_ich8lan;
200 	phy->ops.reset                = e1000_phy_hw_reset_ich8lan;
201 	phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
202 	phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
203 	phy->ops.write_reg            = e1000_write_phy_reg_hv;
204 	phy->ops.write_reg_locked     = e1000_write_phy_reg_hv_locked;
205 	phy->ops.power_up             = e1000_power_up_phy_copper;
206 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
207 	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
208 
209 	/*
210 	 * The MAC-PHY interconnect may still be in SMBus mode
211 	 * after Sx->S0.  If the manageability engine (ME) is
212 	 * disabled, then toggle the LANPHYPC Value bit to force
213 	 * the interconnect to PCIe mode.
214 	 */
215 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
216 	if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
217 		ctrl = E1000_READ_REG(hw, E1000_CTRL);
218 		ctrl |=  E1000_CTRL_LANPHYPC_OVERRIDE;
219 		ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
220 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
221 		usec_delay(10);
222 		ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
223 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
224 		msec_delay(50);
225 
226 		/*
227 		 * Gate automatic PHY configuration by hardware on
228 		 * non-managed 82579
229 		 */
230 		if (hw->mac.type == e1000_pch2lan)
231 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
232 	}
233 
234 	/*
235 	 * Reset the PHY before any acccess to it.  Doing so, ensures that
236 	 * the PHY is in a known good state before we read/write PHY registers.
237 	 * The generic reset is sufficient here, because we haven't determined
238 	 * the PHY type yet.
239 	 */
240 	ret_val = e1000_phy_hw_reset_generic(hw);
241 	if (ret_val)
242 		goto out;
243 
244 	/* Ungate automatic PHY configuration on non-managed 82579 */
245 	if ((hw->mac.type == e1000_pch2lan)  &&
246 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
247 		msec_delay(10);
248 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
249 	}
250 
251 	phy->id = e1000_phy_unknown;
252 	switch (hw->mac.type) {
253 	default:
254 		ret_val = e1000_get_phy_id(hw);
255 		if (ret_val)
256 			goto out;
257 		if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
258 			break;
259 		/* fall-through */
260 	case e1000_pch2lan:
261 		/*
262 		 * In case the PHY needs to be in mdio slow mode,
263 		 * set slow mode and try to get the PHY id again.
264 		 */
265 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
266 		if (ret_val)
267 			goto out;
268 		ret_val = e1000_get_phy_id(hw);
269 		if (ret_val)
270 			goto out;
271 		break;
272 	}
273 	phy->type = e1000_get_phy_type_from_id(phy->id);
274 
275 	switch (phy->type) {
276 	case e1000_phy_82577:
277 	case e1000_phy_82579:
278 		phy->ops.check_polarity = e1000_check_polarity_82577;
279 		phy->ops.force_speed_duplex =
280 			e1000_phy_force_speed_duplex_82577;
281 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
282 		phy->ops.get_info = e1000_get_phy_info_82577;
283 		phy->ops.commit = e1000_phy_sw_reset_generic;
284 		break;
285 	case e1000_phy_82578:
286 		phy->ops.check_polarity = e1000_check_polarity_m88;
287 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
288 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
289 		phy->ops.get_info = e1000_get_phy_info_m88;
290 		break;
291 	default:
292 		ret_val = -E1000_ERR_PHY;
293 		break;
294 	}
295 
296 out:
297 	return ret_val;
298 }
299 
300 /**
301  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
302  *  @hw: pointer to the HW structure
303  *
304  *  Initialize family-specific PHY parameters and function pointers.
305  **/
306 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
307 {
308 	struct e1000_phy_info *phy = &hw->phy;
309 	s32 ret_val = E1000_SUCCESS;
310 	u16 i = 0;
311 
312 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
313 
314 	phy->addr                     = 1;
315 	phy->reset_delay_us           = 100;
316 
317 	phy->ops.acquire              = e1000_acquire_swflag_ich8lan;
318 	phy->ops.check_reset_block    = e1000_check_reset_block_ich8lan;
319 	phy->ops.get_cable_length     = e1000_get_cable_length_igp_2;
320 	phy->ops.get_cfg_done         = e1000_get_cfg_done_ich8lan;
321 	phy->ops.read_reg             = e1000_read_phy_reg_igp;
322 	phy->ops.release              = e1000_release_swflag_ich8lan;
323 	phy->ops.reset                = e1000_phy_hw_reset_ich8lan;
324 	phy->ops.set_d0_lplu_state    = e1000_set_d0_lplu_state_ich8lan;
325 	phy->ops.set_d3_lplu_state    = e1000_set_d3_lplu_state_ich8lan;
326 	phy->ops.write_reg            = e1000_write_phy_reg_igp;
327 	phy->ops.power_up             = e1000_power_up_phy_copper;
328 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
329 
330 	/*
331 	 * We may need to do this twice - once for IGP and if that fails,
332 	 * we'll set BM func pointers and try again
333 	 */
334 	ret_val = e1000_determine_phy_address(hw);
335 	if (ret_val) {
336 		phy->ops.write_reg = e1000_write_phy_reg_bm;
337 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
338 		ret_val = e1000_determine_phy_address(hw);
339 		if (ret_val) {
340 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
341 			goto out;
342 		}
343 	}
344 
345 	phy->id = 0;
346 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
347 	       (i++ < 100)) {
348 		msec_delay(1);
349 		ret_val = e1000_get_phy_id(hw);
350 		if (ret_val)
351 			goto out;
352 	}
353 
354 	/* Verify phy id */
355 	switch (phy->id) {
356 	case IGP03E1000_E_PHY_ID:
357 		phy->type = e1000_phy_igp_3;
358 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
359 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
360 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
361 		phy->ops.get_info = e1000_get_phy_info_igp;
362 		phy->ops.check_polarity = e1000_check_polarity_igp;
363 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
364 		break;
365 	case IFE_E_PHY_ID:
366 	case IFE_PLUS_E_PHY_ID:
367 	case IFE_C_E_PHY_ID:
368 		phy->type = e1000_phy_ife;
369 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
370 		phy->ops.get_info = e1000_get_phy_info_ife;
371 		phy->ops.check_polarity = e1000_check_polarity_ife;
372 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
373 		break;
374 	case BME1000_E_PHY_ID:
375 		phy->type = e1000_phy_bm;
376 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
377 		phy->ops.read_reg = e1000_read_phy_reg_bm;
378 		phy->ops.write_reg = e1000_write_phy_reg_bm;
379 		phy->ops.commit = e1000_phy_sw_reset_generic;
380 		phy->ops.get_info = e1000_get_phy_info_m88;
381 		phy->ops.check_polarity = e1000_check_polarity_m88;
382 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
383 		break;
384 	default:
385 		ret_val = -E1000_ERR_PHY;
386 		goto out;
387 	}
388 
389 out:
390 	return ret_val;
391 }
392 
393 /**
394  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
395  *  @hw: pointer to the HW structure
396  *
397  *  Initialize family-specific NVM parameters and function
398  *  pointers.
399  **/
400 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
401 {
402 	struct e1000_nvm_info *nvm = &hw->nvm;
403 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
404 	u32 gfpreg, sector_base_addr, sector_end_addr;
405 	s32 ret_val = E1000_SUCCESS;
406 	u16 i;
407 
408 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
409 
410 	/* Can't read flash registers if the register set isn't mapped. */
411 	if (!hw->flash_address) {
412 		DEBUGOUT("ERROR: Flash registers not mapped\n");
413 		ret_val = -E1000_ERR_CONFIG;
414 		goto out;
415 	}
416 
417 	nvm->type = e1000_nvm_flash_sw;
418 
419 	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
420 
421 	/*
422 	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
423 	 * Add 1 to sector_end_addr since this sector is included in
424 	 * the overall size.
425 	 */
426 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
427 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
428 
429 	/* flash_base_addr is byte-aligned */
430 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
431 
432 	/*
433 	 * find total size of the NVM, then cut in half since the total
434 	 * size represents two separate NVM banks.
435 	 */
436 	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
437 	                          << FLASH_SECTOR_ADDR_SHIFT;
438 	nvm->flash_bank_size /= 2;
439 	/* Adjust to word count */
440 	nvm->flash_bank_size /= sizeof(u16);
441 
442 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
443 
444 	/* Clear shadow ram */
445 	for (i = 0; i < nvm->word_size; i++) {
446 		dev_spec->shadow_ram[i].modified = FALSE;
447 		dev_spec->shadow_ram[i].value    = 0xFFFF;
448 	}
449 
450 	E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
451 	E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
452 
453 	/* Function Pointers */
454 	nvm->ops.acquire       = e1000_acquire_nvm_ich8lan;
455 	nvm->ops.release       = e1000_release_nvm_ich8lan;
456 	nvm->ops.read          = e1000_read_nvm_ich8lan;
457 	nvm->ops.update        = e1000_update_nvm_checksum_ich8lan;
458 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
459 	nvm->ops.validate      = e1000_validate_nvm_checksum_ich8lan;
460 	nvm->ops.write         = e1000_write_nvm_ich8lan;
461 
462 out:
463 	return ret_val;
464 }
465 
466 /**
467  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
468  *  @hw: pointer to the HW structure
469  *
470  *  Initialize family-specific MAC parameters and function
471  *  pointers.
472  **/
473 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
474 {
475 	struct e1000_mac_info *mac = &hw->mac;
476 	u16 pci_cfg;
477 
478 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
479 
480 	/* Set media type function pointer */
481 	hw->phy.media_type = e1000_media_type_copper;
482 
483 	/* Set mta register count */
484 	mac->mta_reg_count = 32;
485 	/* Set rar entry count */
486 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
487 	if (mac->type == e1000_ich8lan)
488 		mac->rar_entry_count--;
489 	/* Set if part includes ASF firmware */
490 	mac->asf_firmware_present = TRUE;
491 	/* FWSM register */
492 	mac->has_fwsm = TRUE;
493 	/* ARC subsystem not supported */
494 	mac->arc_subsystem_valid = FALSE;
495 	/* Adaptive IFS supported */
496 	mac->adaptive_ifs = TRUE;
497 
498 	/* Function pointers */
499 
500 	/* bus type/speed/width */
501 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
502 	/* function id */
503 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
504 	/* reset */
505 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
506 	/* hw initialization */
507 	mac->ops.init_hw = e1000_init_hw_ich8lan;
508 	/* link setup */
509 	mac->ops.setup_link = e1000_setup_link_ich8lan;
510 	/* physical interface setup */
511 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
512 	/* check for link */
513 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
514 	/* link info */
515 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
516 	/* multicast address update */
517 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
518 	/* clear hardware counters */
519 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
520 
521 	/* LED operations */
522 	switch (mac->type) {
523 	case e1000_ich8lan:
524 	case e1000_ich9lan:
525 	case e1000_ich10lan:
526 		/* check management mode */
527 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
528 		/* ID LED init */
529 		mac->ops.id_led_init = e1000_id_led_init_generic;
530 		/* blink LED */
531 		mac->ops.blink_led = e1000_blink_led_generic;
532 		/* setup LED */
533 		mac->ops.setup_led = e1000_setup_led_generic;
534 		/* cleanup LED */
535 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
536 		/* turn on/off LED */
537 		mac->ops.led_on = e1000_led_on_ich8lan;
538 		mac->ops.led_off = e1000_led_off_ich8lan;
539 		break;
540 	case e1000_pch2lan:
541 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
542 		mac->ops.rar_set = e1000_rar_set_pch2lan;
543 		/* fall-through */
544 	case e1000_pchlan:
545 		/* save PCH revision_id */
546 		e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
547 		hw->revision_id = (u8)(pci_cfg &= 0x000F);
548 		/* check management mode */
549 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
550 		/* ID LED init */
551 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
552 		/* setup LED */
553 		mac->ops.setup_led = e1000_setup_led_pchlan;
554 		/* cleanup LED */
555 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
556 		/* turn on/off LED */
557 		mac->ops.led_on = e1000_led_on_pchlan;
558 		mac->ops.led_off = e1000_led_off_pchlan;
559 		break;
560 	default:
561 		break;
562 	}
563 
564 	/* Enable PCS Lock-loss workaround for ICH8 */
565 	if (mac->type == e1000_ich8lan)
566 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
567 
568 	/* Gate automatic PHY configuration by hardware on managed 82579 */
569 	if ((mac->type == e1000_pch2lan) &&
570 	    (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
571 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
572 
573 	return E1000_SUCCESS;
574 }
575 
576 /**
577  *  e1000_set_eee_pchlan - Enable/disable EEE support
578  *  @hw: pointer to the HW structure
579  *
580  *  Enable/disable EEE based on setting in dev_spec structure.  The bits in
581  *  the LPI Control register will remain set only if/when link is up.
582  **/
583 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
584 {
585 	s32 ret_val = E1000_SUCCESS;
586 	u16 phy_reg;
587 
588 	DEBUGFUNC("e1000_set_eee_pchlan");
589 
590 	if (hw->phy.type != e1000_phy_82579)
591 		goto out;
592 
593 	ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
594 	if (ret_val)
595 		goto out;
596 
597 	if (hw->dev_spec.ich8lan.eee_disable)
598 		phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
599 	else
600 		phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
601 
602 	ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
603 out:
604 	return ret_val;
605 }
606 
607 /**
608  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
609  *  @hw: pointer to the HW structure
610  *
611  *  Checks to see of the link status of the hardware has changed.  If a
612  *  change in link status has been detected, then we read the PHY registers
613  *  to get the current speed/duplex if link exists.
614  **/
615 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
616 {
617 	struct e1000_mac_info *mac = &hw->mac;
618 	s32 ret_val;
619 	bool link;
620 
621 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
622 
623 	/*
624 	 * We only want to go out to the PHY registers to see if Auto-Neg
625 	 * has completed and/or if our link status has changed.  The
626 	 * get_link_status flag is set upon receiving a Link Status
627 	 * Change or Rx Sequence Error interrupt.
628 	 */
629 	if (!mac->get_link_status) {
630 		ret_val = E1000_SUCCESS;
631 		goto out;
632 	}
633 
634 	/*
635 	 * First we want to see if the MII Status Register reports
636 	 * link.  If so, then we want to get the current speed/duplex
637 	 * of the PHY.
638 	 */
639 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
640 	if (ret_val)
641 		goto out;
642 
643 	if (hw->mac.type == e1000_pchlan) {
644 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
645 		if (ret_val)
646 			goto out;
647 	}
648 
649 	if (!link)
650 		goto out; /* No link detected */
651 
652 	mac->get_link_status = FALSE;
653 
654 	if (hw->phy.type == e1000_phy_82578) {
655 		ret_val = e1000_link_stall_workaround_hv(hw);
656 		if (ret_val)
657 			goto out;
658 	}
659 
660 	if (hw->mac.type == e1000_pch2lan) {
661 		ret_val = e1000_k1_workaround_lv(hw);
662 		if (ret_val)
663 			goto out;
664 	}
665 
666 	/*
667 	 * Check if there was DownShift, must be checked
668 	 * immediately after link-up
669 	 */
670 	e1000_check_downshift_generic(hw);
671 
672 	/* Enable/Disable EEE after link up */
673 	ret_val = e1000_set_eee_pchlan(hw);
674 	if (ret_val)
675 		goto out;
676 
677 	/*
678 	 * If we are forcing speed/duplex, then we simply return since
679 	 * we have already determined whether we have link or not.
680 	 */
681 	if (!mac->autoneg) {
682 		ret_val = -E1000_ERR_CONFIG;
683 		goto out;
684 	}
685 
686 	/*
687 	 * Auto-Neg is enabled.  Auto Speed Detection takes care
688 	 * of MAC speed/duplex configuration.  So we only need to
689 	 * configure Collision Distance in the MAC.
690 	 */
691 	e1000_config_collision_dist_generic(hw);
692 
693 	/*
694 	 * Configure Flow Control now that Auto-Neg has completed.
695 	 * First, we need to restore the desired flow control
696 	 * settings because we may have had to re-autoneg with a
697 	 * different link partner.
698 	 */
699 	ret_val = e1000_config_fc_after_link_up_generic(hw);
700 	if (ret_val)
701 		DEBUGOUT("Error configuring flow control\n");
702 
703 out:
704 	return ret_val;
705 }
706 
707 /**
708  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
709  *  @hw: pointer to the HW structure
710  *
711  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
712  **/
713 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
714 {
715 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
716 
717 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
718 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
719 	switch (hw->mac.type) {
720 	case e1000_ich8lan:
721 	case e1000_ich9lan:
722 	case e1000_ich10lan:
723 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
724 		break;
725 	case e1000_pchlan:
726 	case e1000_pch2lan:
727 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
728 		break;
729 	default:
730 		break;
731 	}
732 }
733 
734 /**
735  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
736  *  @hw: pointer to the HW structure
737  *
738  *  Acquires the mutex for performing NVM operations.
739  **/
740 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
741 {
742 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
743 
744 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
745 
746 	return E1000_SUCCESS;
747 }
748 
749 /**
750  *  e1000_release_nvm_ich8lan - Release NVM mutex
751  *  @hw: pointer to the HW structure
752  *
753  *  Releases the mutex used while performing NVM operations.
754  **/
755 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
756 {
757 	DEBUGFUNC("e1000_release_nvm_ich8lan");
758 
759 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
760 
761 	return;
762 }
763 
764 /**
765  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
766  *  @hw: pointer to the HW structure
767  *
768  *  Acquires the software control flag for performing PHY and select
769  *  MAC CSR accesses.
770  **/
771 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
772 {
773 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
774 	s32 ret_val = E1000_SUCCESS;
775 
776 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
777 
778 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
779 
780 	while (timeout) {
781 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
782 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
783 			break;
784 
785 		msec_delay_irq(1);
786 		timeout--;
787 	}
788 
789 	if (!timeout) {
790 		DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
791 		ret_val = -E1000_ERR_CONFIG;
792 		goto out;
793 	}
794 
795 	timeout = SW_FLAG_TIMEOUT;
796 
797 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
798 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
799 
800 	while (timeout) {
801 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
802 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
803 			break;
804 
805 		msec_delay_irq(1);
806 		timeout--;
807 	}
808 
809 	if (!timeout) {
810 		DEBUGOUT("Failed to acquire the semaphore.\n");
811 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
812 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
813 		ret_val = -E1000_ERR_CONFIG;
814 		goto out;
815 	}
816 
817 out:
818 	if (ret_val)
819 		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
820 
821 	return ret_val;
822 }
823 
824 /**
825  *  e1000_release_swflag_ich8lan - Release software control flag
826  *  @hw: pointer to the HW structure
827  *
828  *  Releases the software control flag for performing PHY and select
829  *  MAC CSR accesses.
830  **/
831 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
832 {
833 	u32 extcnf_ctrl;
834 
835 	DEBUGFUNC("e1000_release_swflag_ich8lan");
836 
837 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
838 	extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
839 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
840 
841 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
842 
843 	return;
844 }
845 
846 /**
847  *  e1000_check_mng_mode_ich8lan - Checks management mode
848  *  @hw: pointer to the HW structure
849  *
850  *  This checks if the adapter has any manageability enabled.
851  *  This is a function pointer entry point only called by read/write
852  *  routines for the PHY and NVM parts.
853  **/
854 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
855 {
856 	u32 fwsm;
857 
858 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
859 
860 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
861 
862 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
863 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
864 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
865 }
866 
867 /**
868  *  e1000_check_mng_mode_pchlan - Checks management mode
869  *  @hw: pointer to the HW structure
870  *
871  *  This checks if the adapter has iAMT enabled.
872  *  This is a function pointer entry point only called by read/write
873  *  routines for the PHY and NVM parts.
874  **/
875 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
876 {
877 	u32 fwsm;
878 
879 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
880 
881 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
882 
883 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
884 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
885 }
886 
887 /**
888  *  e1000_rar_set_pch2lan - Set receive address register
889  *  @hw: pointer to the HW structure
890  *  @addr: pointer to the receive address
891  *  @index: receive address array register
892  *
893  *  Sets the receive address array register at index to the address passed
894  *  in by addr.  For 82579, RAR[0] is the base address register that is to
895  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
896  *  Use SHRA[0-3] in place of those reserved for ME.
897  **/
898 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
899 {
900 	u32 rar_low, rar_high;
901 
902 	DEBUGFUNC("e1000_rar_set_pch2lan");
903 
904 	/*
905 	 * HW expects these in little endian so we reverse the byte order
906 	 * from network order (big endian) to little endian
907 	 */
908 	rar_low = ((u32) addr[0] |
909 	           ((u32) addr[1] << 8) |
910 	           ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
911 
912 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
913 
914 	/* If MAC address zero, no need to set the AV bit */
915 	if (rar_low || rar_high)
916 		rar_high |= E1000_RAH_AV;
917 
918 	if (index == 0) {
919 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
920 		E1000_WRITE_FLUSH(hw);
921 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
922 		E1000_WRITE_FLUSH(hw);
923 		return;
924 	}
925 
926 	if (index < hw->mac.rar_entry_count) {
927 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
928 		E1000_WRITE_FLUSH(hw);
929 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
930 		E1000_WRITE_FLUSH(hw);
931 
932 		/* verify the register updates */
933 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
934 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
935 			return;
936 
937 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
938 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
939 	}
940 
941 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
942 }
943 
944 /**
945  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
946  *  @hw: pointer to the HW structure
947  *
948  *  Checks if firmware is blocking the reset of the PHY.
949  *  This is a function pointer entry point only called by
950  *  reset routines.
951  **/
952 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
953 {
954 	u32 fwsm;
955 
956 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
957 
958 	if (hw->phy.reset_disable)
959 		return E1000_BLK_PHY_RESET;
960 
961 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
962 
963 	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
964 	                                        : E1000_BLK_PHY_RESET;
965 }
966 
967 /**
968  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
969  *  @hw: pointer to the HW structure
970  *
971  *  Assumes semaphore already acquired.
972  *
973  **/
974 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
975 {
976 	u16 phy_data;
977 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
978 	s32 ret_val = E1000_SUCCESS;
979 
980 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
981 
982 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
983 	if (ret_val)
984 		goto out;
985 
986 	phy_data &= ~HV_SMB_ADDR_MASK;
987 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
988 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
989 	ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
990 
991 out:
992 	return ret_val;
993 }
994 
995 /**
996  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
997  *  @hw:   pointer to the HW structure
998  *
999  *  SW should configure the LCD from the NVM extended configuration region
1000  *  as a workaround for certain parts.
1001  **/
1002 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1003 {
1004 	struct e1000_phy_info *phy = &hw->phy;
1005 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1006 	s32 ret_val = E1000_SUCCESS;
1007 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
1008 
1009 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1010 
1011 	/*
1012 	 * Initialize the PHY from the NVM on ICH platforms.  This
1013 	 * is needed due to an issue where the NVM configuration is
1014 	 * not properly autoloaded after power transitions.
1015 	 * Therefore, after each PHY reset, we will load the
1016 	 * configuration data out of the NVM manually.
1017 	 */
1018 	switch (hw->mac.type) {
1019 	case e1000_ich8lan:
1020 		if (phy->type != e1000_phy_igp_3)
1021 			return ret_val;
1022 
1023 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1024 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1025 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1026 			break;
1027 		}
1028 		/* Fall-thru */
1029 	case e1000_pchlan:
1030 	case e1000_pch2lan:
1031 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1032 		break;
1033 	default:
1034 		return ret_val;
1035 	}
1036 
1037 	ret_val = hw->phy.ops.acquire(hw);
1038 	if (ret_val)
1039 		return ret_val;
1040 
1041 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
1042 	if (!(data & sw_cfg_mask))
1043 		goto out;
1044 
1045 	/*
1046 	 * Make sure HW does not configure LCD from PHY
1047 	 * extended configuration before SW configuration
1048 	 */
1049 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1050 	if (!(hw->mac.type == e1000_pch2lan)) {
1051 		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1052 			goto out;
1053 	}
1054 
1055 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1056 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1057 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1058 	if (!cnf_size)
1059 		goto out;
1060 
1061 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1062 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1063 
1064 	if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1065 	    (hw->mac.type == e1000_pchlan)) ||
1066 	     (hw->mac.type == e1000_pch2lan)) {
1067 		/*
1068 		 * HW configures the SMBus address and LEDs when the
1069 		 * OEM and LCD Write Enable bits are set in the NVM.
1070 		 * When both NVM bits are cleared, SW will configure
1071 		 * them instead.
1072 		 */
1073 		ret_val = e1000_write_smbus_addr(hw);
1074 		if (ret_val)
1075 			goto out;
1076 
1077 		data = E1000_READ_REG(hw, E1000_LEDCTL);
1078 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1079 							(u16)data);
1080 		if (ret_val)
1081 			goto out;
1082 	}
1083 
1084 	/* Configure LCD from extended configuration region. */
1085 
1086 	/* cnf_base_addr is in DWORD */
1087 	word_addr = (u16)(cnf_base_addr << 1);
1088 
1089 	for (i = 0; i < cnf_size; i++) {
1090 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1091 					   &reg_data);
1092 		if (ret_val)
1093 			goto out;
1094 
1095 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1096 					   1, &reg_addr);
1097 		if (ret_val)
1098 			goto out;
1099 
1100 		/* Save off the PHY page for future writes. */
1101 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1102 			phy_page = reg_data;
1103 			continue;
1104 		}
1105 
1106 		reg_addr &= PHY_REG_MASK;
1107 		reg_addr |= phy_page;
1108 
1109 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1110 						    reg_data);
1111 		if (ret_val)
1112 			goto out;
1113 	}
1114 
1115 out:
1116 	hw->phy.ops.release(hw);
1117 	return ret_val;
1118 }
1119 
1120 /**
1121  *  e1000_k1_gig_workaround_hv - K1 Si workaround
1122  *  @hw:   pointer to the HW structure
1123  *  @link: link up bool flag
1124  *
1125  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1126  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
1127  *  If link is down, the function will restore the default K1 setting located
1128  *  in the NVM.
1129  **/
1130 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1131 {
1132 	s32 ret_val = E1000_SUCCESS;
1133 	u16 status_reg = 0;
1134 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1135 
1136 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
1137 
1138 	if (hw->mac.type != e1000_pchlan)
1139 		goto out;
1140 
1141 	/* Wrap the whole flow with the sw flag */
1142 	ret_val = hw->phy.ops.acquire(hw);
1143 	if (ret_val)
1144 		goto out;
1145 
1146 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1147 	if (link) {
1148 		if (hw->phy.type == e1000_phy_82578) {
1149 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1150 			                                      &status_reg);
1151 			if (ret_val)
1152 				goto release;
1153 
1154 			status_reg &= BM_CS_STATUS_LINK_UP |
1155 			              BM_CS_STATUS_RESOLVED |
1156 			              BM_CS_STATUS_SPEED_MASK;
1157 
1158 			if (status_reg == (BM_CS_STATUS_LINK_UP |
1159 			                   BM_CS_STATUS_RESOLVED |
1160 			                   BM_CS_STATUS_SPEED_1000))
1161 				k1_enable = FALSE;
1162 		}
1163 
1164 		if (hw->phy.type == e1000_phy_82577) {
1165 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1166 			                                      &status_reg);
1167 			if (ret_val)
1168 				goto release;
1169 
1170 			status_reg &= HV_M_STATUS_LINK_UP |
1171 			              HV_M_STATUS_AUTONEG_COMPLETE |
1172 			              HV_M_STATUS_SPEED_MASK;
1173 
1174 			if (status_reg == (HV_M_STATUS_LINK_UP |
1175 			                   HV_M_STATUS_AUTONEG_COMPLETE |
1176 			                   HV_M_STATUS_SPEED_1000))
1177 				k1_enable = FALSE;
1178 		}
1179 
1180 		/* Link stall fix for link up */
1181 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1182 		                                       0x0100);
1183 		if (ret_val)
1184 			goto release;
1185 
1186 	} else {
1187 		/* Link stall fix for link down */
1188 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1189 		                                       0x4100);
1190 		if (ret_val)
1191 			goto release;
1192 	}
1193 
1194 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1195 
1196 release:
1197 	hw->phy.ops.release(hw);
1198 out:
1199 	return ret_val;
1200 }
1201 
1202 /**
1203  *  e1000_configure_k1_ich8lan - Configure K1 power state
1204  *  @hw: pointer to the HW structure
1205  *  @enable: K1 state to configure
1206  *
1207  *  Configure the K1 power state based on the provided parameter.
1208  *  Assumes semaphore already acquired.
1209  *
1210  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1211  **/
1212 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1213 {
1214 	s32 ret_val = E1000_SUCCESS;
1215 	u32 ctrl_reg = 0;
1216 	u32 ctrl_ext = 0;
1217 	u32 reg = 0;
1218 	u16 kmrn_reg = 0;
1219 
1220 	DEBUGFUNC("e1000_configure_k1_ich8lan");
1221 
1222 	ret_val = e1000_read_kmrn_reg_locked(hw,
1223 	                                     E1000_KMRNCTRLSTA_K1_CONFIG,
1224 	                                     &kmrn_reg);
1225 	if (ret_val)
1226 		goto out;
1227 
1228 	if (k1_enable)
1229 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1230 	else
1231 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1232 
1233 	ret_val = e1000_write_kmrn_reg_locked(hw,
1234 	                                      E1000_KMRNCTRLSTA_K1_CONFIG,
1235 	                                      kmrn_reg);
1236 	if (ret_val)
1237 		goto out;
1238 
1239 	usec_delay(20);
1240 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1241 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1242 
1243 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1244 	reg |= E1000_CTRL_FRCSPD;
1245 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
1246 
1247 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1248 	usec_delay(20);
1249 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1250 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1251 	usec_delay(20);
1252 
1253 out:
1254 	return ret_val;
1255 }
1256 
1257 /**
1258  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1259  *  @hw:       pointer to the HW structure
1260  *  @d0_state: boolean if entering d0 or d3 device state
1261  *
1262  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1263  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
1264  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
1265  **/
1266 s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1267 {
1268 	s32 ret_val = 0;
1269 	u32 mac_reg;
1270 	u16 oem_reg;
1271 
1272 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1273 
1274 	if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1275 		return ret_val;
1276 
1277 	ret_val = hw->phy.ops.acquire(hw);
1278 	if (ret_val)
1279 		return ret_val;
1280 
1281 	if (!(hw->mac.type == e1000_pch2lan)) {
1282 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1283 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1284 			goto out;
1285 	}
1286 
1287 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1288 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1289 		goto out;
1290 
1291 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1292 
1293 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1294 	if (ret_val)
1295 		goto out;
1296 
1297 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1298 
1299 	if (d0_state) {
1300 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1301 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1302 
1303 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1304 			oem_reg |= HV_OEM_BITS_LPLU;
1305 	} else {
1306 		if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1307 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1308 
1309 		if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1310 			oem_reg |= HV_OEM_BITS_LPLU;
1311 	}
1312 	/* Restart auto-neg to activate the bits */
1313 	if (!hw->phy.ops.check_reset_block(hw))
1314 		oem_reg |= HV_OEM_BITS_RESTART_AN;
1315 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1316 
1317 out:
1318 	hw->phy.ops.release(hw);
1319 
1320 	return ret_val;
1321 }
1322 
1323 
1324 /**
1325  *  e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx
1326  *  @hw: pointer to the HW structure
1327  **/
1328 s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
1329 {
1330 	DEBUGFUNC("e1000_hv_phy_powerdown_workaround_ich8lan");
1331 
1332 	if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2))
1333 		return E1000_SUCCESS;
1334 
1335 	return hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444);
1336 }
1337 
1338 /**
1339  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1340  *  @hw:   pointer to the HW structure
1341  **/
1342 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1343 {
1344 	s32 ret_val;
1345 	u16 data;
1346 
1347 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1348 
1349 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1350 	if (ret_val)
1351 		return ret_val;
1352 
1353 	data |= HV_KMRN_MDIO_SLOW;
1354 
1355 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1356 
1357 	return ret_val;
1358 }
1359 
1360 /**
1361  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1362  *  done after every PHY reset.
1363  **/
1364 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1365 {
1366 	s32 ret_val = E1000_SUCCESS;
1367 	u16 phy_data;
1368 
1369 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1370 
1371 	if (hw->mac.type != e1000_pchlan)
1372 		goto out;
1373 
1374 	/* Set MDIO slow mode before any other MDIO access */
1375 	if (hw->phy.type == e1000_phy_82577) {
1376 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
1377 		if (ret_val)
1378 			goto out;
1379 	}
1380 
1381 	/* Hanksville M Phy init for IEEE. */
1382 	if ((hw->revision_id == 2) &&
1383 	    (hw->phy.type == e1000_phy_82577) &&
1384 	    ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1385 		hw->phy.ops.write_reg(hw, 0x10, 0x8823);
1386 		hw->phy.ops.write_reg(hw, 0x11, 0x0018);
1387 		hw->phy.ops.write_reg(hw, 0x10, 0x8824);
1388 		hw->phy.ops.write_reg(hw, 0x11, 0x0016);
1389 		hw->phy.ops.write_reg(hw, 0x10, 0x8825);
1390 		hw->phy.ops.write_reg(hw, 0x11, 0x001A);
1391 		hw->phy.ops.write_reg(hw, 0x10, 0x888C);
1392 		hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1393 		hw->phy.ops.write_reg(hw, 0x10, 0x888D);
1394 		hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1395 		hw->phy.ops.write_reg(hw, 0x10, 0x888E);
1396 		hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1397 		hw->phy.ops.write_reg(hw, 0x10, 0x8827);
1398 		hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1399 		hw->phy.ops.write_reg(hw, 0x10, 0x8835);
1400 		hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1401 		hw->phy.ops.write_reg(hw, 0x10, 0x8834);
1402 		hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1403 		hw->phy.ops.write_reg(hw, 0x10, 0x8833);
1404 		hw->phy.ops.write_reg(hw, 0x11, 0x0002);
1405 	}
1406 
1407 	if (((hw->phy.type == e1000_phy_82577) &&
1408 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1409 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1410 		/* Disable generation of early preamble */
1411 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1412 		if (ret_val)
1413 			goto out;
1414 
1415 		/* Preamble tuning for SSC */
1416 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1417 		if (ret_val)
1418 			goto out;
1419 	}
1420 
1421 	if (hw->phy.type == e1000_phy_82578) {
1422 		if (hw->revision_id < 3) {
1423 			/* PHY config */
1424 			ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29,
1425 			                                0x66C0);
1426 			if (ret_val)
1427 				goto out;
1428 
1429 			/* PHY config */
1430 			ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E,
1431 			                                0xFFFF);
1432 			if (ret_val)
1433 				goto out;
1434 		}
1435 
1436 		/*
1437 		 * Return registers to default by doing a soft reset then
1438 		 * writing 0x3140 to the control register.
1439 		 */
1440 		if (hw->phy.revision < 2) {
1441 			e1000_phy_sw_reset_generic(hw);
1442 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1443 			                                0x3140);
1444 		}
1445 	}
1446 
1447 	if ((hw->revision_id == 2) &&
1448 	    (hw->phy.type == e1000_phy_82577) &&
1449 	    ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1450 		/*
1451 		 * Workaround for OEM (GbE) not operating after reset -
1452 		 * restart AN (twice)
1453 		 */
1454 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1455 		if (ret_val)
1456 			goto out;
1457 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1458 		if (ret_val)
1459 			goto out;
1460 	}
1461 
1462 	/* Select page 0 */
1463 	ret_val = hw->phy.ops.acquire(hw);
1464 	if (ret_val)
1465 		goto out;
1466 
1467 	hw->phy.addr = 1;
1468 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1469 	hw->phy.ops.release(hw);
1470 	if (ret_val)
1471 		goto out;
1472 
1473 	/*
1474 	 * Configure the K1 Si workaround during phy reset assuming there is
1475 	 * link so that it disables K1 if link is in 1Gbps.
1476 	 */
1477 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1478 	if (ret_val)
1479 		goto out;
1480 
1481 	/* Workaround for link disconnects on a busy hub in half duplex */
1482 	ret_val = hw->phy.ops.acquire(hw);
1483 	if (ret_val)
1484 		goto out;
1485 	ret_val = hw->phy.ops.read_reg_locked(hw,
1486 	                                      PHY_REG(BM_PORT_CTRL_PAGE, 17),
1487 	                                      &phy_data);
1488 	if (ret_val)
1489 		goto release;
1490 	ret_val = hw->phy.ops.write_reg_locked(hw,
1491 	                                       PHY_REG(BM_PORT_CTRL_PAGE, 17),
1492 	                                       phy_data & 0x00FF);
1493 release:
1494 	hw->phy.ops.release(hw);
1495 out:
1496 	return ret_val;
1497 }
1498 
1499 /**
1500  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1501  *  @hw:   pointer to the HW structure
1502  **/
1503 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1504 {
1505 	u32 mac_reg;
1506 	u16 i;
1507 
1508 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1509 
1510 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1511 	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1512 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1513 		hw->phy.ops.write_reg(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
1514 		hw->phy.ops.write_reg(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
1515 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1516 		hw->phy.ops.write_reg(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
1517 		hw->phy.ops.write_reg(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
1518 	}
1519 }
1520 
1521 static u32 e1000_calc_rx_da_crc(u8 mac[])
1522 {
1523 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
1524 	u32 i, j, mask, crc;
1525 
1526 	DEBUGFUNC("e1000_calc_rx_da_crc");
1527 
1528 	crc = 0xffffffff;
1529 	for (i = 0; i < 6; i++) {
1530 		crc = crc ^ mac[i];
1531 		for (j = 8; j > 0; j--) {
1532 			mask = (crc & 1) * (-1);
1533 			crc = (crc >> 1) ^ (poly & mask);
1534 		}
1535 	}
1536 	return ~crc;
1537 }
1538 
1539 /**
1540  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1541  *  with 82579 PHY
1542  *  @hw: pointer to the HW structure
1543  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
1544  **/
1545 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1546 {
1547 	s32 ret_val = E1000_SUCCESS;
1548 	u16 phy_reg, data;
1549 	u32 mac_reg;
1550 	u16 i;
1551 
1552 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1553 
1554 	if (hw->mac.type != e1000_pch2lan)
1555 		goto out;
1556 
1557 	/* disable Rx path while enabling/disabling workaround */
1558 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1559 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1560 	if (ret_val)
1561 		goto out;
1562 
1563 	if (enable) {
1564 		/*
1565 		 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1566 		 * SHRAL/H) and initial CRC values to the MAC
1567 		 */
1568 		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1569 			u8 mac_addr[ETH_ADDR_LEN] = {0};
1570 			u32 addr_high, addr_low;
1571 
1572 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1573 			if (!(addr_high & E1000_RAH_AV))
1574 				continue;
1575 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1576 			mac_addr[0] = (addr_low & 0xFF);
1577 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
1578 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
1579 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
1580 			mac_addr[4] = (addr_high & 0xFF);
1581 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
1582 
1583 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1584 					e1000_calc_rx_da_crc(mac_addr));
1585 		}
1586 
1587 		/* Write Rx addresses to the PHY */
1588 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1589 
1590 		/* Enable jumbo frame workaround in the MAC */
1591 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1592 		mac_reg &= ~(1 << 14);
1593 		mac_reg |= (7 << 15);
1594 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1595 
1596 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1597 		mac_reg |= E1000_RCTL_SECRC;
1598 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1599 
1600 		ret_val = e1000_read_kmrn_reg_generic(hw,
1601 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1602 						&data);
1603 		if (ret_val)
1604 			goto out;
1605 		ret_val = e1000_write_kmrn_reg_generic(hw,
1606 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1607 						data | (1 << 0));
1608 		if (ret_val)
1609 			goto out;
1610 		ret_val = e1000_read_kmrn_reg_generic(hw,
1611 						E1000_KMRNCTRLSTA_HD_CTRL,
1612 						&data);
1613 		if (ret_val)
1614 			goto out;
1615 		data &= ~(0xF << 8);
1616 		data |= (0xB << 8);
1617 		ret_val = e1000_write_kmrn_reg_generic(hw,
1618 						E1000_KMRNCTRLSTA_HD_CTRL,
1619 						data);
1620 		if (ret_val)
1621 			goto out;
1622 
1623 		/* Enable jumbo frame workaround in the PHY */
1624 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1625 		data &= ~(0x7F << 5);
1626 		data |= (0x37 << 5);
1627 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1628 		if (ret_val)
1629 			goto out;
1630 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1631 		data &= ~(1 << 13);
1632 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1633 		if (ret_val)
1634 			goto out;
1635 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1636 		data &= ~(0x3FF << 2);
1637 		data |= (0x1A << 2);
1638 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1639 		if (ret_val)
1640 			goto out;
1641 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xFE00);
1642 		if (ret_val)
1643 			goto out;
1644 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1645 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | (1 << 10));
1646 		if (ret_val)
1647 			goto out;
1648 	} else {
1649 		/* Write MAC register values back to h/w defaults */
1650 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1651 		mac_reg &= ~(0xF << 14);
1652 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1653 
1654 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1655 		mac_reg &= ~E1000_RCTL_SECRC;
1656 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1657 
1658 		ret_val = e1000_read_kmrn_reg_generic(hw,
1659 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1660 						&data);
1661 		if (ret_val)
1662 			goto out;
1663 		ret_val = e1000_write_kmrn_reg_generic(hw,
1664 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1665 						data & ~(1 << 0));
1666 		if (ret_val)
1667 			goto out;
1668 		ret_val = e1000_read_kmrn_reg_generic(hw,
1669 						E1000_KMRNCTRLSTA_HD_CTRL,
1670 						&data);
1671 		if (ret_val)
1672 			goto out;
1673 		data &= ~(0xF << 8);
1674 		data |= (0xB << 8);
1675 		ret_val = e1000_write_kmrn_reg_generic(hw,
1676 						E1000_KMRNCTRLSTA_HD_CTRL,
1677 						data);
1678 		if (ret_val)
1679 			goto out;
1680 
1681 		/* Write PHY register values back to h/w defaults */
1682 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1683 		data &= ~(0x7F << 5);
1684 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1685 		if (ret_val)
1686 			goto out;
1687 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1688 		data |= (1 << 13);
1689 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1690 		if (ret_val)
1691 			goto out;
1692 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1693 		data &= ~(0x3FF << 2);
1694 		data |= (0x8 << 2);
1695 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1696 		if (ret_val)
1697 			goto out;
1698 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1699 		if (ret_val)
1700 			goto out;
1701 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1702 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & ~(1 << 10));
1703 		if (ret_val)
1704 			goto out;
1705 	}
1706 
1707 	/* re-enable Rx path after enabling/disabling workaround */
1708 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1709 
1710 out:
1711 	return ret_val;
1712 }
1713 
1714 /**
1715  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1716  *  done after every PHY reset.
1717  **/
1718 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1719 {
1720 	s32 ret_val = E1000_SUCCESS;
1721 
1722 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1723 
1724 	if (hw->mac.type != e1000_pch2lan)
1725 		goto out;
1726 
1727 	/* Set MDIO slow mode before any other MDIO access */
1728 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
1729 
1730 out:
1731 	return ret_val;
1732 }
1733 
1734 /**
1735  *  e1000_k1_gig_workaround_lv - K1 Si workaround
1736  *  @hw:   pointer to the HW structure
1737  *
1738  *  Workaround to set the K1 beacon duration for 82579 parts
1739  **/
1740 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1741 {
1742 	s32 ret_val = E1000_SUCCESS;
1743 	u16 status_reg = 0;
1744 	u32 mac_reg;
1745 
1746 	DEBUGFUNC("e1000_k1_workaround_lv");
1747 
1748 	if (hw->mac.type != e1000_pch2lan)
1749 		goto out;
1750 
1751 	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
1752 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1753 	if (ret_val)
1754 		goto out;
1755 
1756 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1757 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1758 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1759 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1760 
1761 		if (status_reg & HV_M_STATUS_SPEED_1000)
1762 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1763 		else
1764 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1765 
1766 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1767 	}
1768 
1769 out:
1770 	return ret_val;
1771 }
1772 
1773 /**
1774  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1775  *  @hw:   pointer to the HW structure
1776  *  @gate: boolean set to TRUE to gate, FALSE to un-gate
1777  *
1778  *  Gate/ungate the automatic PHY configuration via hardware; perform
1779  *  the configuration via software instead.
1780  **/
1781 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1782 {
1783 	u32 extcnf_ctrl;
1784 
1785 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
1786 
1787 	if (hw->mac.type != e1000_pch2lan)
1788 		return;
1789 
1790 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1791 
1792 	if (gate)
1793 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1794 	else
1795 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1796 
1797 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1798 	return;
1799 }
1800 
1801 /**
1802  *  e1000_hv_phy_tuning_workaround_ich8lan - This is a Phy tuning work around
1803  *  needed for Nahum3 + Hanksville testing, requested by HW team
1804  **/
1805 static s32 e1000_hv_phy_tuning_workaround_ich8lan(struct e1000_hw *hw)
1806 {
1807 	s32 ret_val = E1000_SUCCESS;
1808 
1809 	DEBUGFUNC("e1000_hv_phy_tuning_workaround_ich8lan");
1810 
1811 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1812 	if (ret_val)
1813 		goto out;
1814 
1815 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1816 	if (ret_val)
1817 		goto out;
1818 
1819 	ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29, 0x66C0);
1820 	if (ret_val)
1821 		goto out;
1822 
1823 	ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E, 0xFFFF);
1824 
1825 out:
1826 	return ret_val;
1827 }
1828 
1829 /**
1830  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
1831  *  @hw: pointer to the HW structure
1832  *
1833  *  Check the appropriate indication the MAC has finished configuring the
1834  *  PHY after a software reset.
1835  **/
1836 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1837 {
1838 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1839 
1840 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
1841 
1842 	/* Wait for basic configuration completes before proceeding */
1843 	do {
1844 		data = E1000_READ_REG(hw, E1000_STATUS);
1845 		data &= E1000_STATUS_LAN_INIT_DONE;
1846 		usec_delay(100);
1847 	} while ((!data) && --loop);
1848 
1849 	/*
1850 	 * If basic configuration is incomplete before the above loop
1851 	 * count reaches 0, loading the configuration from NVM will
1852 	 * leave the PHY in a bad state possibly resulting in no link.
1853 	 */
1854 	if (loop == 0)
1855 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1856 
1857 	/* Clear the Init Done bit for the next init event */
1858 	data = E1000_READ_REG(hw, E1000_STATUS);
1859 	data &= ~E1000_STATUS_LAN_INIT_DONE;
1860 	E1000_WRITE_REG(hw, E1000_STATUS, data);
1861 }
1862 
1863 /**
1864  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1865  *  @hw: pointer to the HW structure
1866  **/
1867 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1868 {
1869 	s32 ret_val = E1000_SUCCESS;
1870 	u16 reg;
1871 
1872 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
1873 
1874 	if (hw->phy.ops.check_reset_block(hw))
1875 		goto out;
1876 
1877 	/* Allow time for h/w to get to quiescent state after reset */
1878 	msec_delay(10);
1879 
1880 	/* Perform any necessary post-reset workarounds */
1881 	switch (hw->mac.type) {
1882 	case e1000_pchlan:
1883 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1884 		if (ret_val)
1885 			goto out;
1886 		break;
1887 	case e1000_pch2lan:
1888 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1889 		if (ret_val)
1890 			goto out;
1891 		break;
1892 	default:
1893 		break;
1894 	}
1895 
1896 	if (hw->device_id == E1000_DEV_ID_ICH10_HANKSVILLE) {
1897 		ret_val = e1000_hv_phy_tuning_workaround_ich8lan(hw);
1898 		if (ret_val)
1899 			goto out;
1900 	}
1901 
1902 	/* Dummy read to clear the phy wakeup bit after lcd reset */
1903 	if (hw->mac.type >= e1000_pchlan)
1904 		hw->phy.ops.read_reg(hw, BM_WUC, &reg);
1905 
1906 	/* Configure the LCD with the extended configuration region in NVM */
1907 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
1908 	if (ret_val)
1909 		goto out;
1910 
1911 	/* Configure the LCD with the OEM bits in NVM */
1912 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1913 
1914 	/* Ungate automatic PHY configuration on non-managed 82579 */
1915 	if ((hw->mac.type == e1000_pch2lan) &&
1916 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1917 		msec_delay(10);
1918 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
1919 	}
1920 
1921 out:
1922 	return ret_val;
1923 }
1924 
1925 /**
1926  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1927  *  @hw: pointer to the HW structure
1928  *
1929  *  Resets the PHY
1930  *  This is a function pointer entry point called by drivers
1931  *  or other shared routines.
1932  **/
1933 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1934 {
1935 	s32 ret_val = E1000_SUCCESS;
1936 
1937 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
1938 
1939 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
1940 	if ((hw->mac.type == e1000_pch2lan) &&
1941 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
1942 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
1943 
1944 	ret_val = e1000_phy_hw_reset_generic(hw);
1945 	if (ret_val)
1946 		goto out;
1947 
1948 	ret_val = e1000_post_phy_reset_ich8lan(hw);
1949 
1950 out:
1951 	return ret_val;
1952 }
1953 
1954 /**
1955  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1956  *  @hw: pointer to the HW structure
1957  *  @active: TRUE to enable LPLU, FALSE to disable
1958  *
1959  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
1960  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1961  *  the phy speed. This function will manually set the LPLU bit and restart
1962  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
1963  *  since it configures the same bit.
1964  **/
1965 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1966 {
1967 	s32 ret_val = E1000_SUCCESS;
1968 	u16 oem_reg;
1969 
1970 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
1971 
1972 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
1973 	if (ret_val)
1974 		goto out;
1975 
1976 	if (active)
1977 		oem_reg |= HV_OEM_BITS_LPLU;
1978 	else
1979 		oem_reg &= ~HV_OEM_BITS_LPLU;
1980 
1981 	oem_reg |= HV_OEM_BITS_RESTART_AN;
1982 	ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
1983 
1984 out:
1985 	return ret_val;
1986 }
1987 
1988 /**
1989  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1990  *  @hw: pointer to the HW structure
1991  *  @active: TRUE to enable LPLU, FALSE to disable
1992  *
1993  *  Sets the LPLU D0 state according to the active flag.  When
1994  *  activating LPLU this function also disables smart speed
1995  *  and vice versa.  LPLU will not be activated unless the
1996  *  device autonegotiation advertisement meets standards of
1997  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
1998  *  This is a function pointer entry point only called by
1999  *  PHY setup routines.
2000  **/
2001 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2002 {
2003 	struct e1000_phy_info *phy = &hw->phy;
2004 	u32 phy_ctrl;
2005 	s32 ret_val = E1000_SUCCESS;
2006 	u16 data;
2007 
2008 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2009 
2010 	if (phy->type == e1000_phy_ife)
2011 		goto out;
2012 
2013 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2014 
2015 	if (active) {
2016 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2017 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2018 
2019 		if (phy->type != e1000_phy_igp_3)
2020 			goto out;
2021 
2022 		/*
2023 		 * Call gig speed drop workaround on LPLU before accessing
2024 		 * any PHY registers
2025 		 */
2026 		if (hw->mac.type == e1000_ich8lan)
2027 			e1000_gig_downshift_workaround_ich8lan(hw);
2028 
2029 		/* When LPLU is enabled, we should disable SmartSpeed */
2030 		ret_val = phy->ops.read_reg(hw,
2031 		                            IGP01E1000_PHY_PORT_CONFIG,
2032 		                            &data);
2033 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2034 		ret_val = phy->ops.write_reg(hw,
2035 		                             IGP01E1000_PHY_PORT_CONFIG,
2036 		                             data);
2037 		if (ret_val)
2038 			goto out;
2039 	} else {
2040 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2041 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2042 
2043 		if (phy->type != e1000_phy_igp_3)
2044 			goto out;
2045 
2046 		/*
2047 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2048 		 * during Dx states where the power conservation is most
2049 		 * important.  During driver activity we should enable
2050 		 * SmartSpeed, so performance is maintained.
2051 		 */
2052 		if (phy->smart_speed == e1000_smart_speed_on) {
2053 			ret_val = phy->ops.read_reg(hw,
2054 			                            IGP01E1000_PHY_PORT_CONFIG,
2055 			                            &data);
2056 			if (ret_val)
2057 				goto out;
2058 
2059 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2060 			ret_val = phy->ops.write_reg(hw,
2061 			                             IGP01E1000_PHY_PORT_CONFIG,
2062 			                             data);
2063 			if (ret_val)
2064 				goto out;
2065 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2066 			ret_val = phy->ops.read_reg(hw,
2067 			                            IGP01E1000_PHY_PORT_CONFIG,
2068 			                            &data);
2069 			if (ret_val)
2070 				goto out;
2071 
2072 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2073 			ret_val = phy->ops.write_reg(hw,
2074 			                             IGP01E1000_PHY_PORT_CONFIG,
2075 			                             data);
2076 			if (ret_val)
2077 				goto out;
2078 		}
2079 	}
2080 
2081 out:
2082 	return ret_val;
2083 }
2084 
2085 /**
2086  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2087  *  @hw: pointer to the HW structure
2088  *  @active: TRUE to enable LPLU, FALSE to disable
2089  *
2090  *  Sets the LPLU D3 state according to the active flag.  When
2091  *  activating LPLU this function also disables smart speed
2092  *  and vice versa.  LPLU will not be activated unless the
2093  *  device autonegotiation advertisement meets standards of
2094  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2095  *  This is a function pointer entry point only called by
2096  *  PHY setup routines.
2097  **/
2098 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2099 {
2100 	struct e1000_phy_info *phy = &hw->phy;
2101 	u32 phy_ctrl;
2102 	s32 ret_val = E1000_SUCCESS;
2103 	u16 data;
2104 
2105 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2106 
2107 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2108 
2109 	if (!active) {
2110 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2111 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2112 
2113 		if (phy->type != e1000_phy_igp_3)
2114 			goto out;
2115 
2116 		/*
2117 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2118 		 * during Dx states where the power conservation is most
2119 		 * important.  During driver activity we should enable
2120 		 * SmartSpeed, so performance is maintained.
2121 		 */
2122 		if (phy->smart_speed == e1000_smart_speed_on) {
2123 			ret_val = phy->ops.read_reg(hw,
2124 			                            IGP01E1000_PHY_PORT_CONFIG,
2125 			                            &data);
2126 			if (ret_val)
2127 				goto out;
2128 
2129 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2130 			ret_val = phy->ops.write_reg(hw,
2131 			                             IGP01E1000_PHY_PORT_CONFIG,
2132 			                             data);
2133 			if (ret_val)
2134 				goto out;
2135 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2136 			ret_val = phy->ops.read_reg(hw,
2137 			                            IGP01E1000_PHY_PORT_CONFIG,
2138 			                            &data);
2139 			if (ret_val)
2140 				goto out;
2141 
2142 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2143 			ret_val = phy->ops.write_reg(hw,
2144 			                             IGP01E1000_PHY_PORT_CONFIG,
2145 			                             data);
2146 			if (ret_val)
2147 				goto out;
2148 		}
2149 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2150 	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2151 	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2152 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2153 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2154 
2155 		if (phy->type != e1000_phy_igp_3)
2156 			goto out;
2157 
2158 		/*
2159 		 * Call gig speed drop workaround on LPLU before accessing
2160 		 * any PHY registers
2161 		 */
2162 		if (hw->mac.type == e1000_ich8lan)
2163 			e1000_gig_downshift_workaround_ich8lan(hw);
2164 
2165 		/* When LPLU is enabled, we should disable SmartSpeed */
2166 		ret_val = phy->ops.read_reg(hw,
2167 		                            IGP01E1000_PHY_PORT_CONFIG,
2168 		                            &data);
2169 		if (ret_val)
2170 			goto out;
2171 
2172 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2173 		ret_val = phy->ops.write_reg(hw,
2174 		                             IGP01E1000_PHY_PORT_CONFIG,
2175 		                             data);
2176 	}
2177 
2178 out:
2179 	return ret_val;
2180 }
2181 
2182 /**
2183  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2184  *  @hw: pointer to the HW structure
2185  *  @bank:  pointer to the variable that returns the active bank
2186  *
2187  *  Reads signature byte from the NVM using the flash access registers.
2188  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2189  **/
2190 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2191 {
2192 	u32 eecd;
2193 	struct e1000_nvm_info *nvm = &hw->nvm;
2194 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2195 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2196 	u8 sig_byte = 0;
2197 	s32 ret_val = E1000_SUCCESS;
2198 
2199 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2200 
2201 	switch (hw->mac.type) {
2202 	case e1000_ich8lan:
2203 	case e1000_ich9lan:
2204 		eecd = E1000_READ_REG(hw, E1000_EECD);
2205 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2206 		    E1000_EECD_SEC1VAL_VALID_MASK) {
2207 			if (eecd & E1000_EECD_SEC1VAL)
2208 				*bank = 1;
2209 			else
2210 				*bank = 0;
2211 
2212 			goto out;
2213 		}
2214 		DEBUGOUT("Unable to determine valid NVM bank via EEC - "
2215 		         "reading flash signature\n");
2216 		/* fall-thru */
2217 	default:
2218 		/* set bank to 0 in case flash read fails */
2219 		*bank = 0;
2220 
2221 		/* Check bank 0 */
2222 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2223 		                                        &sig_byte);
2224 		if (ret_val)
2225 			goto out;
2226 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2227 		    E1000_ICH_NVM_SIG_VALUE) {
2228 			*bank = 0;
2229 			goto out;
2230 		}
2231 
2232 		/* Check bank 1 */
2233 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2234 		                                        bank1_offset,
2235 		                                        &sig_byte);
2236 		if (ret_val)
2237 			goto out;
2238 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2239 		    E1000_ICH_NVM_SIG_VALUE) {
2240 			*bank = 1;
2241 			goto out;
2242 		}
2243 
2244 		DEBUGOUT("ERROR: No valid NVM bank present\n");
2245 		ret_val = -E1000_ERR_NVM;
2246 		break;
2247 	}
2248 out:
2249 	return ret_val;
2250 }
2251 
2252 /**
2253  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
2254  *  @hw: pointer to the HW structure
2255  *  @offset: The offset (in bytes) of the word(s) to read.
2256  *  @words: Size of data to read in words
2257  *  @data: Pointer to the word(s) to read at offset.
2258  *
2259  *  Reads a word(s) from the NVM using the flash access registers.
2260  **/
2261 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2262                                   u16 *data)
2263 {
2264 	struct e1000_nvm_info *nvm = &hw->nvm;
2265 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2266 	u32 act_offset;
2267 	s32 ret_val = E1000_SUCCESS;
2268 	u32 bank = 0;
2269 	u16 i, word;
2270 
2271 	DEBUGFUNC("e1000_read_nvm_ich8lan");
2272 
2273 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2274 	    (words == 0)) {
2275 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2276 		ret_val = -E1000_ERR_NVM;
2277 		goto out;
2278 	}
2279 
2280 	nvm->ops.acquire(hw);
2281 
2282 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2283 	if (ret_val != E1000_SUCCESS) {
2284 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2285 		bank = 0;
2286 	}
2287 
2288 	act_offset = (bank) ? nvm->flash_bank_size : 0;
2289 	act_offset += offset;
2290 
2291 	ret_val = E1000_SUCCESS;
2292 	for (i = 0; i < words; i++) {
2293 		if ((dev_spec->shadow_ram) &&
2294 		    (dev_spec->shadow_ram[offset+i].modified)) {
2295 			data[i] = dev_spec->shadow_ram[offset+i].value;
2296 		} else {
2297 			ret_val = e1000_read_flash_word_ich8lan(hw,
2298 			                                        act_offset + i,
2299 			                                        &word);
2300 			if (ret_val)
2301 				break;
2302 			data[i] = word;
2303 		}
2304 	}
2305 
2306 	nvm->ops.release(hw);
2307 
2308 out:
2309 	if (ret_val)
2310 		DEBUGOUT1("NVM read error: %d\n", ret_val);
2311 
2312 	return ret_val;
2313 }
2314 
2315 /**
2316  *  e1000_flash_cycle_init_ich8lan - Initialize flash
2317  *  @hw: pointer to the HW structure
2318  *
2319  *  This function does initial flash setup so that a new read/write/erase cycle
2320  *  can be started.
2321  **/
2322 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2323 {
2324 	union ich8_hws_flash_status hsfsts;
2325 	s32 ret_val = -E1000_ERR_NVM;
2326 	s32 i = 0;
2327 
2328 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2329 
2330 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2331 
2332 	/* Check if the flash descriptor is valid */
2333 	if (hsfsts.hsf_status.fldesvalid == 0) {
2334 		DEBUGOUT("Flash descriptor invalid.  "
2335 		         "SW Sequencing must be used.");
2336 		goto out;
2337 	}
2338 
2339 	/* Clear FCERR and DAEL in hw status by writing 1 */
2340 	hsfsts.hsf_status.flcerr = 1;
2341 	hsfsts.hsf_status.dael = 1;
2342 
2343 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2344 
2345 	/*
2346 	 * Either we should have a hardware SPI cycle in progress
2347 	 * bit to check against, in order to start a new cycle or
2348 	 * FDONE bit should be changed in the hardware so that it
2349 	 * is 1 after hardware reset, which can then be used as an
2350 	 * indication whether a cycle is in progress or has been
2351 	 * completed.
2352 	 */
2353 
2354 	if (hsfsts.hsf_status.flcinprog == 0) {
2355 		/*
2356 		 * There is no cycle running at present,
2357 		 * so we can start a cycle.
2358 		 * Begin by setting Flash Cycle Done.
2359 		 */
2360 		hsfsts.hsf_status.flcdone = 1;
2361 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2362 		ret_val = E1000_SUCCESS;
2363 	} else {
2364 		/*
2365 		 * Otherwise poll for sometime so the current
2366 		 * cycle has a chance to end before giving up.
2367 		 */
2368 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2369 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2370 			                                      ICH_FLASH_HSFSTS);
2371 			if (hsfsts.hsf_status.flcinprog == 0) {
2372 				ret_val = E1000_SUCCESS;
2373 				break;
2374 			}
2375 			usec_delay(1);
2376 		}
2377 		if (ret_val == E1000_SUCCESS) {
2378 			/*
2379 			 * Successful in waiting for previous cycle to timeout,
2380 			 * now set the Flash Cycle Done.
2381 			 */
2382 			hsfsts.hsf_status.flcdone = 1;
2383 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2384 			                        hsfsts.regval);
2385 		} else {
2386 			DEBUGOUT("Flash controller busy, cannot get access");
2387 		}
2388 	}
2389 
2390 out:
2391 	return ret_val;
2392 }
2393 
2394 /**
2395  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2396  *  @hw: pointer to the HW structure
2397  *  @timeout: maximum time to wait for completion
2398  *
2399  *  This function starts a flash cycle and waits for its completion.
2400  **/
2401 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2402 {
2403 	union ich8_hws_flash_ctrl hsflctl;
2404 	union ich8_hws_flash_status hsfsts;
2405 	s32 ret_val = -E1000_ERR_NVM;
2406 	u32 i = 0;
2407 
2408 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
2409 
2410 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2411 	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2412 	hsflctl.hsf_ctrl.flcgo = 1;
2413 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2414 
2415 	/* wait till FDONE bit is set to 1 */
2416 	do {
2417 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2418 		if (hsfsts.hsf_status.flcdone == 1)
2419 			break;
2420 		usec_delay(1);
2421 	} while (i++ < timeout);
2422 
2423 	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2424 		ret_val = E1000_SUCCESS;
2425 
2426 	return ret_val;
2427 }
2428 
2429 /**
2430  *  e1000_read_flash_word_ich8lan - Read word from flash
2431  *  @hw: pointer to the HW structure
2432  *  @offset: offset to data location
2433  *  @data: pointer to the location for storing the data
2434  *
2435  *  Reads the flash word at offset into data.  Offset is converted
2436  *  to bytes before read.
2437  **/
2438 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2439                                          u16 *data)
2440 {
2441 	s32 ret_val;
2442 
2443 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
2444 
2445 	if (!data) {
2446 		ret_val = -E1000_ERR_NVM;
2447 		goto out;
2448 	}
2449 
2450 	/* Must convert offset into bytes. */
2451 	offset <<= 1;
2452 
2453 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2454 
2455 out:
2456 	return ret_val;
2457 }
2458 
2459 /**
2460  *  e1000_read_flash_byte_ich8lan - Read byte from flash
2461  *  @hw: pointer to the HW structure
2462  *  @offset: The offset of the byte to read.
2463  *  @data: Pointer to a byte to store the value read.
2464  *
2465  *  Reads a single byte from the NVM using the flash access registers.
2466  **/
2467 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2468                                          u8 *data)
2469 {
2470 	s32 ret_val = E1000_SUCCESS;
2471 	u16 word = 0;
2472 
2473 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2474 	if (ret_val)
2475 		goto out;
2476 
2477 	*data = (u8)word;
2478 
2479 out:
2480 	return ret_val;
2481 }
2482 
2483 /**
2484  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
2485  *  @hw: pointer to the HW structure
2486  *  @offset: The offset (in bytes) of the byte or word to read.
2487  *  @size: Size of data to read, 1=byte 2=word
2488  *  @data: Pointer to the word to store the value read.
2489  *
2490  *  Reads a byte or word from the NVM using the flash access registers.
2491  **/
2492 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2493                                          u8 size, u16 *data)
2494 {
2495 	union ich8_hws_flash_status hsfsts;
2496 	union ich8_hws_flash_ctrl hsflctl;
2497 	u32 flash_linear_addr;
2498 	u32 flash_data = 0;
2499 	s32 ret_val = -E1000_ERR_NVM;
2500 	u8 count = 0;
2501 
2502 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
2503 
2504 	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2505 		goto out;
2506 
2507 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2508 	                    hw->nvm.flash_base_addr;
2509 
2510 	do {
2511 		usec_delay(1);
2512 		/* Steps */
2513 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2514 		if (ret_val != E1000_SUCCESS)
2515 			break;
2516 
2517 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2518 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2519 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2520 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2521 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2522 
2523 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2524 
2525 		ret_val = e1000_flash_cycle_ich8lan(hw,
2526 		                                ICH_FLASH_READ_COMMAND_TIMEOUT);
2527 
2528 		/*
2529 		 * Check if FCERR is set to 1, if set to 1, clear it
2530 		 * and try the whole sequence a few more times, else
2531 		 * read in (shift in) the Flash Data0, the order is
2532 		 * least significant byte first msb to lsb
2533 		 */
2534 		if (ret_val == E1000_SUCCESS) {
2535 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2536 			if (size == 1)
2537 				*data = (u8)(flash_data & 0x000000FF);
2538 			else if (size == 2)
2539 				*data = (u16)(flash_data & 0x0000FFFF);
2540 			break;
2541 		} else {
2542 			/*
2543 			 * If we've gotten here, then things are probably
2544 			 * completely hosed, but if the error condition is
2545 			 * detected, it won't hurt to give it another try...
2546 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2547 			 */
2548 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2549 			                                      ICH_FLASH_HSFSTS);
2550 			if (hsfsts.hsf_status.flcerr == 1) {
2551 				/* Repeat for some time before giving up. */
2552 				continue;
2553 			} else if (hsfsts.hsf_status.flcdone == 0) {
2554 				DEBUGOUT("Timeout error - flash cycle "
2555 				         "did not complete.");
2556 				break;
2557 			}
2558 		}
2559 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2560 
2561 out:
2562 	return ret_val;
2563 }
2564 
2565 /**
2566  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
2567  *  @hw: pointer to the HW structure
2568  *  @offset: The offset (in bytes) of the word(s) to write.
2569  *  @words: Size of data to write in words
2570  *  @data: Pointer to the word(s) to write at offset.
2571  *
2572  *  Writes a byte or word to the NVM using the flash access registers.
2573  **/
2574 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2575                                    u16 *data)
2576 {
2577 	struct e1000_nvm_info *nvm = &hw->nvm;
2578 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2579 	s32 ret_val = E1000_SUCCESS;
2580 	u16 i;
2581 
2582 	DEBUGFUNC("e1000_write_nvm_ich8lan");
2583 
2584 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2585 	    (words == 0)) {
2586 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2587 		ret_val = -E1000_ERR_NVM;
2588 		goto out;
2589 	}
2590 
2591 	nvm->ops.acquire(hw);
2592 
2593 	for (i = 0; i < words; i++) {
2594 		dev_spec->shadow_ram[offset+i].modified = TRUE;
2595 		dev_spec->shadow_ram[offset+i].value = data[i];
2596 	}
2597 
2598 	nvm->ops.release(hw);
2599 
2600 out:
2601 	return ret_val;
2602 }
2603 
2604 /**
2605  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2606  *  @hw: pointer to the HW structure
2607  *
2608  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
2609  *  which writes the checksum to the shadow ram.  The changes in the shadow
2610  *  ram are then committed to the EEPROM by processing each bank at a time
2611  *  checking for the modified bit and writing only the pending changes.
2612  *  After a successful commit, the shadow ram is cleared and is ready for
2613  *  future writes.
2614  **/
2615 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2616 {
2617 	struct e1000_nvm_info *nvm = &hw->nvm;
2618 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2619 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2620 	s32 ret_val;
2621 	u16 data;
2622 
2623 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2624 
2625 	ret_val = e1000_update_nvm_checksum_generic(hw);
2626 	if (ret_val)
2627 		goto out;
2628 
2629 	if (nvm->type != e1000_nvm_flash_sw)
2630 		goto out;
2631 
2632 	nvm->ops.acquire(hw);
2633 
2634 	/*
2635 	 * We're writing to the opposite bank so if we're on bank 1,
2636 	 * write to bank 0 etc.  We also need to erase the segment that
2637 	 * is going to be written
2638 	 */
2639 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2640 	if (ret_val != E1000_SUCCESS) {
2641 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2642 		bank = 0;
2643 	}
2644 
2645 	if (bank == 0) {
2646 		new_bank_offset = nvm->flash_bank_size;
2647 		old_bank_offset = 0;
2648 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2649 		if (ret_val)
2650 			goto release;
2651 	} else {
2652 		old_bank_offset = nvm->flash_bank_size;
2653 		new_bank_offset = 0;
2654 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2655 		if (ret_val)
2656 			goto release;
2657 	}
2658 
2659 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2660 		/*
2661 		 * Determine whether to write the value stored
2662 		 * in the other NVM bank or a modified value stored
2663 		 * in the shadow RAM
2664 		 */
2665 		if (dev_spec->shadow_ram[i].modified) {
2666 			data = dev_spec->shadow_ram[i].value;
2667 		} else {
2668 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
2669 			                                        old_bank_offset,
2670 			                                        &data);
2671 			if (ret_val)
2672 				break;
2673 		}
2674 
2675 		/*
2676 		 * If the word is 0x13, then make sure the signature bits
2677 		 * (15:14) are 11b until the commit has completed.
2678 		 * This will allow us to write 10b which indicates the
2679 		 * signature is valid.  We want to do this after the write
2680 		 * has completed so that we don't mark the segment valid
2681 		 * while the write is still in progress
2682 		 */
2683 		if (i == E1000_ICH_NVM_SIG_WORD)
2684 			data |= E1000_ICH_NVM_SIG_MASK;
2685 
2686 		/* Convert offset to bytes. */
2687 		act_offset = (i + new_bank_offset) << 1;
2688 
2689 		usec_delay(100);
2690 		/* Write the bytes to the new bank. */
2691 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2692 		                                               act_offset,
2693 		                                               (u8)data);
2694 		if (ret_val)
2695 			break;
2696 
2697 		usec_delay(100);
2698 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2699 		                                          act_offset + 1,
2700 		                                          (u8)(data >> 8));
2701 		if (ret_val)
2702 			break;
2703 	}
2704 
2705 	/*
2706 	 * Don't bother writing the segment valid bits if sector
2707 	 * programming failed.
2708 	 */
2709 	if (ret_val) {
2710 		DEBUGOUT("Flash commit failed.\n");
2711 		goto release;
2712 	}
2713 
2714 	/*
2715 	 * Finally validate the new segment by setting bit 15:14
2716 	 * to 10b in word 0x13 , this can be done without an
2717 	 * erase as well since these bits are 11 to start with
2718 	 * and we need to change bit 14 to 0b
2719 	 */
2720 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2721 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2722 	if (ret_val)
2723 		goto release;
2724 
2725 	data &= 0xBFFF;
2726 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2727 	                                               act_offset * 2 + 1,
2728 	                                               (u8)(data >> 8));
2729 	if (ret_val)
2730 		goto release;
2731 
2732 	/*
2733 	 * And invalidate the previously valid segment by setting
2734 	 * its signature word (0x13) high_byte to 0b. This can be
2735 	 * done without an erase because flash erase sets all bits
2736 	 * to 1's. We can write 1's to 0's without an erase
2737 	 */
2738 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2739 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2740 	if (ret_val)
2741 		goto release;
2742 
2743 	/* Great!  Everything worked, we can now clear the cached entries. */
2744 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2745 		dev_spec->shadow_ram[i].modified = FALSE;
2746 		dev_spec->shadow_ram[i].value = 0xFFFF;
2747 	}
2748 
2749 release:
2750 	nvm->ops.release(hw);
2751 
2752 	/*
2753 	 * Reload the EEPROM, or else modifications will not appear
2754 	 * until after the next adapter reset.
2755 	 */
2756 	if (!ret_val) {
2757 		nvm->ops.reload(hw);
2758 		msec_delay(10);
2759 	}
2760 
2761 out:
2762 	if (ret_val)
2763 		DEBUGOUT1("NVM update error: %d\n", ret_val);
2764 
2765 	return ret_val;
2766 }
2767 
2768 /**
2769  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2770  *  @hw: pointer to the HW structure
2771  *
2772  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2773  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
2774  *  calculated, in which case we need to calculate the checksum and set bit 6.
2775  **/
2776 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2777 {
2778 	s32 ret_val = E1000_SUCCESS;
2779 	u16 data;
2780 
2781 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2782 
2783 	/*
2784 	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
2785 	 * needs to be fixed.  This bit is an indication that the NVM
2786 	 * was prepared by OEM software and did not calculate the
2787 	 * checksum...a likely scenario.
2788 	 */
2789 	ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2790 	if (ret_val)
2791 		goto out;
2792 
2793 	if ((data & 0x40) == 0) {
2794 		data |= 0x40;
2795 		ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2796 		if (ret_val)
2797 			goto out;
2798 		ret_val = hw->nvm.ops.update(hw);
2799 		if (ret_val)
2800 			goto out;
2801 	}
2802 
2803 	ret_val = e1000_validate_nvm_checksum_generic(hw);
2804 
2805 out:
2806 	return ret_val;
2807 }
2808 
2809 /**
2810  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2811  *  @hw: pointer to the HW structure
2812  *  @offset: The offset (in bytes) of the byte/word to read.
2813  *  @size: Size of data to read, 1=byte 2=word
2814  *  @data: The byte(s) to write to the NVM.
2815  *
2816  *  Writes one/two bytes to the NVM using the flash access registers.
2817  **/
2818 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2819                                           u8 size, u16 data)
2820 {
2821 	union ich8_hws_flash_status hsfsts;
2822 	union ich8_hws_flash_ctrl hsflctl;
2823 	u32 flash_linear_addr;
2824 	u32 flash_data = 0;
2825 	s32 ret_val = -E1000_ERR_NVM;
2826 	u8 count = 0;
2827 
2828 	DEBUGFUNC("e1000_write_ich8_data");
2829 
2830 	if (size < 1 || size > 2 || data > size * 0xff ||
2831 	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
2832 		goto out;
2833 
2834 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2835 	                    hw->nvm.flash_base_addr;
2836 
2837 	do {
2838 		usec_delay(1);
2839 		/* Steps */
2840 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2841 		if (ret_val != E1000_SUCCESS)
2842 			break;
2843 
2844 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2845 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2846 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2847 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2848 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2849 
2850 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2851 
2852 		if (size == 1)
2853 			flash_data = (u32)data & 0x00FF;
2854 		else
2855 			flash_data = (u32)data;
2856 
2857 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2858 
2859 		/*
2860 		 * check if FCERR is set to 1 , if set to 1, clear it
2861 		 * and try the whole sequence a few more times else done
2862 		 */
2863 		ret_val = e1000_flash_cycle_ich8lan(hw,
2864 		                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2865 		if (ret_val == E1000_SUCCESS)
2866 			break;
2867 
2868 		/*
2869 		 * If we're here, then things are most likely
2870 		 * completely hosed, but if the error condition
2871 		 * is detected, it won't hurt to give it another
2872 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2873 		 */
2874 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2875 		if (hsfsts.hsf_status.flcerr == 1)
2876 			/* Repeat for some time before giving up. */
2877 			continue;
2878 		if (hsfsts.hsf_status.flcdone == 0) {
2879 			DEBUGOUT("Timeout error - flash cycle "
2880 				 "did not complete.");
2881 			break;
2882 		}
2883 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2884 
2885 out:
2886 	return ret_val;
2887 }
2888 
2889 /**
2890  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2891  *  @hw: pointer to the HW structure
2892  *  @offset: The index of the byte to read.
2893  *  @data: The byte to write to the NVM.
2894  *
2895  *  Writes a single byte to the NVM using the flash access registers.
2896  **/
2897 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2898                                           u8 data)
2899 {
2900 	u16 word = (u16)data;
2901 
2902 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2903 
2904 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2905 }
2906 
2907 /**
2908  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2909  *  @hw: pointer to the HW structure
2910  *  @offset: The offset of the byte to write.
2911  *  @byte: The byte to write to the NVM.
2912  *
2913  *  Writes a single byte to the NVM using the flash access registers.
2914  *  Goes through a retry algorithm before giving up.
2915  **/
2916 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2917                                                 u32 offset, u8 byte)
2918 {
2919 	s32 ret_val;
2920 	u16 program_retries;
2921 
2922 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2923 
2924 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2925 	if (ret_val == E1000_SUCCESS)
2926 		goto out;
2927 
2928 	for (program_retries = 0; program_retries < 100; program_retries++) {
2929 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2930 		usec_delay(100);
2931 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2932 		if (ret_val == E1000_SUCCESS)
2933 			break;
2934 	}
2935 	if (program_retries == 100) {
2936 		ret_val = -E1000_ERR_NVM;
2937 		goto out;
2938 	}
2939 
2940 out:
2941 	return ret_val;
2942 }
2943 
2944 /**
2945  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2946  *  @hw: pointer to the HW structure
2947  *  @bank: 0 for first bank, 1 for second bank, etc.
2948  *
2949  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2950  *  bank N is 4096 * N + flash_reg_addr.
2951  **/
2952 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2953 {
2954 	struct e1000_nvm_info *nvm = &hw->nvm;
2955 	union ich8_hws_flash_status hsfsts;
2956 	union ich8_hws_flash_ctrl hsflctl;
2957 	u32 flash_linear_addr;
2958 	/* bank size is in 16bit words - adjust to bytes */
2959 	u32 flash_bank_size = nvm->flash_bank_size * 2;
2960 	s32 ret_val = E1000_SUCCESS;
2961 	s32 count = 0;
2962 	s32 j, iteration, sector_size;
2963 
2964 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
2965 
2966 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2967 
2968 	/*
2969 	 * Determine HW Sector size: Read BERASE bits of hw flash status
2970 	 * register
2971 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2972 	 *     consecutive sectors.  The start index for the nth Hw sector
2973 	 *     can be calculated as = bank * 4096 + n * 256
2974 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2975 	 *     The start index for the nth Hw sector can be calculated
2976 	 *     as = bank * 4096
2977 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2978 	 *     (ich9 only, otherwise error condition)
2979 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2980 	 */
2981 	switch (hsfsts.hsf_status.berasesz) {
2982 	case 0:
2983 		/* Hw sector size 256 */
2984 		sector_size = ICH_FLASH_SEG_SIZE_256;
2985 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2986 		break;
2987 	case 1:
2988 		sector_size = ICH_FLASH_SEG_SIZE_4K;
2989 		iteration = 1;
2990 		break;
2991 	case 2:
2992 		sector_size = ICH_FLASH_SEG_SIZE_8K;
2993 		iteration = 1;
2994 		break;
2995 	case 3:
2996 		sector_size = ICH_FLASH_SEG_SIZE_64K;
2997 		iteration = 1;
2998 		break;
2999 	default:
3000 		ret_val = -E1000_ERR_NVM;
3001 		goto out;
3002 	}
3003 
3004 	/* Start with the base address, then add the sector offset. */
3005 	flash_linear_addr = hw->nvm.flash_base_addr;
3006 	flash_linear_addr += (bank) ? flash_bank_size : 0;
3007 
3008 	for (j = 0; j < iteration ; j++) {
3009 		do {
3010 			/* Steps */
3011 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
3012 			if (ret_val)
3013 				goto out;
3014 
3015 			/*
3016 			 * Write a value 11 (block Erase) in Flash
3017 			 * Cycle field in hw flash control
3018 			 */
3019 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3020 			                                      ICH_FLASH_HSFCTL);
3021 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3022 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3023 			                        hsflctl.regval);
3024 
3025 			/*
3026 			 * Write the last 24 bits of an index within the
3027 			 * block into Flash Linear address field in Flash
3028 			 * Address.
3029 			 */
3030 			flash_linear_addr += (j * sector_size);
3031 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3032 			                      flash_linear_addr);
3033 
3034 			ret_val = e1000_flash_cycle_ich8lan(hw,
3035 			                       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3036 			if (ret_val == E1000_SUCCESS)
3037 				break;
3038 
3039 			/*
3040 			 * Check if FCERR is set to 1.  If 1,
3041 			 * clear it and try the whole sequence
3042 			 * a few more times else Done
3043 			 */
3044 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3045 						      ICH_FLASH_HSFSTS);
3046 			if (hsfsts.hsf_status.flcerr == 1)
3047 				/* repeat for some time before giving up */
3048 				continue;
3049 			else if (hsfsts.hsf_status.flcdone == 0)
3050 				goto out;
3051 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3052 	}
3053 
3054 out:
3055 	return ret_val;
3056 }
3057 
3058 /**
3059  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3060  *  @hw: pointer to the HW structure
3061  *  @data: Pointer to the LED settings
3062  *
3063  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3064  *  settings is all 0's or F's, set the LED default to a valid LED default
3065  *  setting.
3066  **/
3067 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3068 {
3069 	s32 ret_val;
3070 
3071 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
3072 
3073 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3074 	if (ret_val) {
3075 		DEBUGOUT("NVM Read Error\n");
3076 		goto out;
3077 	}
3078 
3079 	if (*data == ID_LED_RESERVED_0000 ||
3080 	    *data == ID_LED_RESERVED_FFFF)
3081 		*data = ID_LED_DEFAULT_ICH8LAN;
3082 
3083 out:
3084 	return ret_val;
3085 }
3086 
3087 /**
3088  *  e1000_id_led_init_pchlan - store LED configurations
3089  *  @hw: pointer to the HW structure
3090  *
3091  *  PCH does not control LEDs via the LEDCTL register, rather it uses
3092  *  the PHY LED configuration register.
3093  *
3094  *  PCH also does not have an "always on" or "always off" mode which
3095  *  complicates the ID feature.  Instead of using the "on" mode to indicate
3096  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3097  *  use "link_up" mode.  The LEDs will still ID on request if there is no
3098  *  link based on logic in e1000_led_[on|off]_pchlan().
3099  **/
3100 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3101 {
3102 	struct e1000_mac_info *mac = &hw->mac;
3103 	s32 ret_val;
3104 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3105 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3106 	u16 data, i, temp, shift;
3107 
3108 	DEBUGFUNC("e1000_id_led_init_pchlan");
3109 
3110 	/* Get default ID LED modes */
3111 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3112 	if (ret_val)
3113 		goto out;
3114 
3115 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3116 	mac->ledctl_mode1 = mac->ledctl_default;
3117 	mac->ledctl_mode2 = mac->ledctl_default;
3118 
3119 	for (i = 0; i < 4; i++) {
3120 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3121 		shift = (i * 5);
3122 		switch (temp) {
3123 		case ID_LED_ON1_DEF2:
3124 		case ID_LED_ON1_ON2:
3125 		case ID_LED_ON1_OFF2:
3126 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3127 			mac->ledctl_mode1 |= (ledctl_on << shift);
3128 			break;
3129 		case ID_LED_OFF1_DEF2:
3130 		case ID_LED_OFF1_ON2:
3131 		case ID_LED_OFF1_OFF2:
3132 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3133 			mac->ledctl_mode1 |= (ledctl_off << shift);
3134 			break;
3135 		default:
3136 			/* Do nothing */
3137 			break;
3138 		}
3139 		switch (temp) {
3140 		case ID_LED_DEF1_ON2:
3141 		case ID_LED_ON1_ON2:
3142 		case ID_LED_OFF1_ON2:
3143 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3144 			mac->ledctl_mode2 |= (ledctl_on << shift);
3145 			break;
3146 		case ID_LED_DEF1_OFF2:
3147 		case ID_LED_ON1_OFF2:
3148 		case ID_LED_OFF1_OFF2:
3149 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3150 			mac->ledctl_mode2 |= (ledctl_off << shift);
3151 			break;
3152 		default:
3153 			/* Do nothing */
3154 			break;
3155 		}
3156 	}
3157 
3158 out:
3159 	return ret_val;
3160 }
3161 
3162 /**
3163  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3164  *  @hw: pointer to the HW structure
3165  *
3166  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3167  *  register, so the the bus width is hard coded.
3168  **/
3169 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3170 {
3171 	struct e1000_bus_info *bus = &hw->bus;
3172 	s32 ret_val;
3173 
3174 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
3175 
3176 	ret_val = e1000_get_bus_info_pcie_generic(hw);
3177 
3178 	/*
3179 	 * ICH devices are "PCI Express"-ish.  They have
3180 	 * a configuration space, but do not contain
3181 	 * PCI Express Capability registers, so bus width
3182 	 * must be hardcoded.
3183 	 */
3184 	if (bus->width == e1000_bus_width_unknown)
3185 		bus->width = e1000_bus_width_pcie_x1;
3186 
3187 	return ret_val;
3188 }
3189 
3190 /**
3191  *  e1000_reset_hw_ich8lan - Reset the hardware
3192  *  @hw: pointer to the HW structure
3193  *
3194  *  Does a full reset of the hardware which includes a reset of the PHY and
3195  *  MAC.
3196  **/
3197 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3198 {
3199 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3200 	u16 reg;
3201 	u32 ctrl, icr, kab;
3202 	s32 ret_val;
3203 
3204 	DEBUGFUNC("e1000_reset_hw_ich8lan");
3205 
3206 	/*
3207 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
3208 	 * on the last TLP read/write transaction when MAC is reset.
3209 	 */
3210 	ret_val = e1000_disable_pcie_master_generic(hw);
3211 	if (ret_val)
3212 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
3213 
3214 	DEBUGOUT("Masking off all interrupts\n");
3215 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3216 
3217 	/*
3218 	 * Disable the Transmit and Receive units.  Then delay to allow
3219 	 * any pending transactions to complete before we hit the MAC
3220 	 * with the global reset.
3221 	 */
3222 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
3223 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3224 	E1000_WRITE_FLUSH(hw);
3225 
3226 	msec_delay(10);
3227 
3228 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
3229 	if (hw->mac.type == e1000_ich8lan) {
3230 		/* Set Tx and Rx buffer allocation to 8k apiece. */
3231 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3232 		/* Set Packet Buffer Size to 16k. */
3233 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3234 	}
3235 
3236 	if (hw->mac.type == e1000_pchlan) {
3237 		/* Save the NVM K1 bit setting*/
3238 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
3239 		if (ret_val)
3240 			return ret_val;
3241 
3242 		if (reg & E1000_NVM_K1_ENABLE)
3243 			dev_spec->nvm_k1_enabled = TRUE;
3244 		else
3245 			dev_spec->nvm_k1_enabled = FALSE;
3246 	}
3247 
3248 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3249 
3250 	if (!hw->phy.ops.check_reset_block(hw)) {
3251 		/*
3252 		 * Full-chip reset requires MAC and PHY reset at the same
3253 		 * time to make sure the interface between MAC and the
3254 		 * external PHY is reset.
3255 		 */
3256 		ctrl |= E1000_CTRL_PHY_RST;
3257 
3258 		/*
3259 		 * Gate automatic PHY configuration by hardware on
3260 		 * non-managed 82579
3261 		 */
3262 		if ((hw->mac.type == e1000_pch2lan) &&
3263 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3264 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3265 	}
3266 	ret_val = e1000_acquire_swflag_ich8lan(hw);
3267 	DEBUGOUT("Issuing a global reset to ich8lan\n");
3268 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3269 	msec_delay(20);
3270 
3271 	if (!ret_val)
3272 		e1000_release_swflag_ich8lan(hw);
3273 
3274 	if (ctrl & E1000_CTRL_PHY_RST) {
3275 		ret_val = hw->phy.ops.get_cfg_done(hw);
3276 		if (ret_val)
3277 			goto out;
3278 
3279 		ret_val = e1000_post_phy_reset_ich8lan(hw);
3280 		if (ret_val)
3281 			goto out;
3282 	}
3283 
3284 	/*
3285 	 * For PCH, this write will make sure that any noise
3286 	 * will be detected as a CRC error and be dropped rather than show up
3287 	 * as a bad packet to the DMA engine.
3288 	 */
3289 	if (hw->mac.type == e1000_pchlan)
3290 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3291 
3292 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3293 	icr = E1000_READ_REG(hw, E1000_ICR);
3294 
3295 	kab = E1000_READ_REG(hw, E1000_KABGTXD);
3296 	kab |= E1000_KABGTXD_BGSQLBIAS;
3297 	E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
3298 
3299 out:
3300 	return ret_val;
3301 }
3302 
3303 /**
3304  *  e1000_init_hw_ich8lan - Initialize the hardware
3305  *  @hw: pointer to the HW structure
3306  *
3307  *  Prepares the hardware for transmit and receive by doing the following:
3308  *   - initialize hardware bits
3309  *   - initialize LED identification
3310  *   - setup receive address registers
3311  *   - setup flow control
3312  *   - setup transmit descriptors
3313  *   - clear statistics
3314  **/
3315 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3316 {
3317 	struct e1000_mac_info *mac = &hw->mac;
3318 	u32 ctrl_ext, txdctl, snoop;
3319 	s32 ret_val;
3320 	u16 i;
3321 
3322 	DEBUGFUNC("e1000_init_hw_ich8lan");
3323 
3324 	e1000_initialize_hw_bits_ich8lan(hw);
3325 
3326 	/* Initialize identification LED */
3327 	ret_val = mac->ops.id_led_init(hw);
3328 	if (ret_val)
3329 		DEBUGOUT("Error initializing identification LED\n");
3330 		/* This is not fatal and we should not stop init due to this */
3331 
3332 	/* Setup the receive address. */
3333 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3334 
3335 	/* Zero out the Multicast HASH table */
3336 	DEBUGOUT("Zeroing the MTA\n");
3337 	for (i = 0; i < mac->mta_reg_count; i++)
3338 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3339 
3340 	/*
3341 	 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3342 	 * the ME.  Reading the BM_WUC register will clear the host wakeup bit.
3343 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3344 	 */
3345 	if (hw->phy.type == e1000_phy_82578) {
3346 		hw->phy.ops.read_reg(hw, BM_WUC, &i);
3347 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
3348 		if (ret_val)
3349 			return ret_val;
3350 	}
3351 
3352 	/* Setup link and flow control */
3353 	ret_val = mac->ops.setup_link(hw);
3354 
3355 	/* Set the transmit descriptor write-back policy for both queues */
3356 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3357 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3358 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3359 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3360 	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3361 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3362 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3363 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3364 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3365 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3366 	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3367 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3368 
3369 	/*
3370 	 * ICH8 has opposite polarity of no_snoop bits.
3371 	 * By default, we should use snoop behavior.
3372 	 */
3373 	if (mac->type == e1000_ich8lan)
3374 		snoop = PCIE_ICH8_SNOOP_ALL;
3375 	else
3376 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3377 	e1000_set_pcie_no_snoop_generic(hw, snoop);
3378 
3379 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3380 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3381 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3382 
3383 	/*
3384 	 * Clear all of the statistics registers (clear on read).  It is
3385 	 * important that we do this after we have tried to establish link
3386 	 * because the symbol error count will increment wildly if there
3387 	 * is no link.
3388 	 */
3389 	e1000_clear_hw_cntrs_ich8lan(hw);
3390 
3391 	return ret_val;
3392 }
3393 /**
3394  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3395  *  @hw: pointer to the HW structure
3396  *
3397  *  Sets/Clears required hardware bits necessary for correctly setting up the
3398  *  hardware for transmit and receive.
3399  **/
3400 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3401 {
3402 	u32 reg;
3403 
3404 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3405 
3406 	/* Extended Device Control */
3407 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3408 	reg |= (1 << 22);
3409 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3410 	if (hw->mac.type >= e1000_pchlan)
3411 		reg |= E1000_CTRL_EXT_PHYPDEN;
3412 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3413 
3414 	/* Transmit Descriptor Control 0 */
3415 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3416 	reg |= (1 << 22);
3417 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3418 
3419 	/* Transmit Descriptor Control 1 */
3420 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3421 	reg |= (1 << 22);
3422 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3423 
3424 	/* Transmit Arbitration Control 0 */
3425 	reg = E1000_READ_REG(hw, E1000_TARC(0));
3426 	if (hw->mac.type == e1000_ich8lan)
3427 		reg |= (1 << 28) | (1 << 29);
3428 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3429 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3430 
3431 	/* Transmit Arbitration Control 1 */
3432 	reg = E1000_READ_REG(hw, E1000_TARC(1));
3433 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3434 		reg &= ~(1 << 28);
3435 	else
3436 		reg |= (1 << 28);
3437 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
3438 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3439 
3440 	/* Device Status */
3441 	if (hw->mac.type == e1000_ich8lan) {
3442 		reg = E1000_READ_REG(hw, E1000_STATUS);
3443 		reg &= ~(1 << 31);
3444 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
3445 	}
3446 
3447 	/*
3448 	 * work-around descriptor data corruption issue during nfs v2 udp
3449 	 * traffic, just disable the nfs filtering capability
3450 	 */
3451 	reg = E1000_READ_REG(hw, E1000_RFCTL);
3452 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3453 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3454 
3455 	return;
3456 }
3457 
3458 /**
3459  *  e1000_setup_link_ich8lan - Setup flow control and link settings
3460  *  @hw: pointer to the HW structure
3461  *
3462  *  Determines which flow control settings to use, then configures flow
3463  *  control.  Calls the appropriate media-specific link configuration
3464  *  function.  Assuming the adapter has a valid link partner, a valid link
3465  *  should be established.  Assumes the hardware has previously been reset
3466  *  and the transmitter and receiver are not enabled.
3467  **/
3468 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3469 {
3470 	s32 ret_val = E1000_SUCCESS;
3471 
3472 	DEBUGFUNC("e1000_setup_link_ich8lan");
3473 
3474 	if (hw->phy.ops.check_reset_block(hw))
3475 		goto out;
3476 
3477 	/*
3478 	 * ICH parts do not have a word in the NVM to determine
3479 	 * the default flow control setting, so we explicitly
3480 	 * set it to full.
3481 	 */
3482 	if (hw->fc.requested_mode == e1000_fc_default)
3483 		hw->fc.requested_mode = e1000_fc_full;
3484 
3485 	/*
3486 	 * Save off the requested flow control mode for use later.  Depending
3487 	 * on the link partner's capabilities, we may or may not use this mode.
3488 	 */
3489 	hw->fc.current_mode = hw->fc.requested_mode;
3490 
3491 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3492 		hw->fc.current_mode);
3493 
3494 	/* Continue to configure the copper link. */
3495 	ret_val = hw->mac.ops.setup_physical_interface(hw);
3496 	if (ret_val)
3497 		goto out;
3498 
3499 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3500 	if ((hw->phy.type == e1000_phy_82578) ||
3501 	    (hw->phy.type == e1000_phy_82579) ||
3502 	    (hw->phy.type == e1000_phy_82577)) {
3503 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3504 
3505 		ret_val = hw->phy.ops.write_reg(hw,
3506 		                             PHY_REG(BM_PORT_CTRL_PAGE, 27),
3507 		                             hw->fc.pause_time);
3508 		if (ret_val)
3509 			goto out;
3510 	}
3511 
3512 	ret_val = e1000_set_fc_watermarks_generic(hw);
3513 
3514 out:
3515 	return ret_val;
3516 }
3517 
3518 /**
3519  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3520  *  @hw: pointer to the HW structure
3521  *
3522  *  Configures the kumeran interface to the PHY to wait the appropriate time
3523  *  when polling the PHY, then call the generic setup_copper_link to finish
3524  *  configuring the copper link.
3525  **/
3526 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3527 {
3528 	u32 ctrl;
3529 	s32 ret_val;
3530 	u16 reg_data;
3531 
3532 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3533 
3534 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3535 	ctrl |= E1000_CTRL_SLU;
3536 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3537 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3538 
3539 	/*
3540 	 * Set the mac to wait the maximum time between each iteration
3541 	 * and increase the max iterations when polling the phy;
3542 	 * this fixes erroneous timeouts at 10Mbps.
3543 	 */
3544 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3545 	                                       0xFFFF);
3546 	if (ret_val)
3547 		goto out;
3548 	ret_val = e1000_read_kmrn_reg_generic(hw,
3549 	                                      E1000_KMRNCTRLSTA_INBAND_PARAM,
3550 	                                      &reg_data);
3551 	if (ret_val)
3552 		goto out;
3553 	reg_data |= 0x3F;
3554 	ret_val = e1000_write_kmrn_reg_generic(hw,
3555 	                                       E1000_KMRNCTRLSTA_INBAND_PARAM,
3556 	                                       reg_data);
3557 	if (ret_val)
3558 		goto out;
3559 
3560 	switch (hw->phy.type) {
3561 	case e1000_phy_igp_3:
3562 		ret_val = e1000_copper_link_setup_igp(hw);
3563 		if (ret_val)
3564 			goto out;
3565 		break;
3566 	case e1000_phy_bm:
3567 	case e1000_phy_82578:
3568 		ret_val = e1000_copper_link_setup_m88(hw);
3569 		if (ret_val)
3570 			goto out;
3571 		break;
3572 	case e1000_phy_82577:
3573 	case e1000_phy_82579:
3574 		ret_val = e1000_copper_link_setup_82577(hw);
3575 		if (ret_val)
3576 			goto out;
3577 		break;
3578 	case e1000_phy_ife:
3579 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3580 		                               &reg_data);
3581 		if (ret_val)
3582 			goto out;
3583 
3584 		reg_data &= ~IFE_PMC_AUTO_MDIX;
3585 
3586 		switch (hw->phy.mdix) {
3587 		case 1:
3588 			reg_data &= ~IFE_PMC_FORCE_MDIX;
3589 			break;
3590 		case 2:
3591 			reg_data |= IFE_PMC_FORCE_MDIX;
3592 			break;
3593 		case 0:
3594 		default:
3595 			reg_data |= IFE_PMC_AUTO_MDIX;
3596 			break;
3597 		}
3598 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3599 		                                reg_data);
3600 		if (ret_val)
3601 			goto out;
3602 		break;
3603 	default:
3604 		break;
3605 	}
3606 	ret_val = e1000_setup_copper_link_generic(hw);
3607 
3608 out:
3609 	return ret_val;
3610 }
3611 
3612 /**
3613  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3614  *  @hw: pointer to the HW structure
3615  *  @speed: pointer to store current link speed
3616  *  @duplex: pointer to store the current link duplex
3617  *
3618  *  Calls the generic get_speed_and_duplex to retrieve the current link
3619  *  information and then calls the Kumeran lock loss workaround for links at
3620  *  gigabit speeds.
3621  **/
3622 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3623                                           u16 *duplex)
3624 {
3625 	s32 ret_val;
3626 
3627 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3628 
3629 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3630 	if (ret_val)
3631 		goto out;
3632 
3633 	if ((hw->mac.type == e1000_ich8lan) &&
3634 	    (hw->phy.type == e1000_phy_igp_3) &&
3635 	    (*speed == SPEED_1000)) {
3636 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3637 	}
3638 
3639 out:
3640 	return ret_val;
3641 }
3642 
3643 /**
3644  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3645  *  @hw: pointer to the HW structure
3646  *
3647  *  Work-around for 82566 Kumeran PCS lock loss:
3648  *  On link status change (i.e. PCI reset, speed change) and link is up and
3649  *  speed is gigabit-
3650  *    0) if workaround is optionally disabled do nothing
3651  *    1) wait 1ms for Kumeran link to come up
3652  *    2) check Kumeran Diagnostic register PCS lock loss bit
3653  *    3) if not set the link is locked (all is good), otherwise...
3654  *    4) reset the PHY
3655  *    5) repeat up to 10 times
3656  *  Note: this is only called for IGP3 copper when speed is 1gb.
3657  **/
3658 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3659 {
3660 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3661 	u32 phy_ctrl;
3662 	s32 ret_val = E1000_SUCCESS;
3663 	u16 i, data;
3664 	bool link;
3665 
3666 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3667 
3668 	if (!(dev_spec->kmrn_lock_loss_workaround_enabled))
3669 		goto out;
3670 
3671 	/*
3672 	 * Make sure link is up before proceeding.  If not just return.
3673 	 * Attempting this while link is negotiating fouled up link
3674 	 * stability
3675 	 */
3676 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3677 	if (!link) {
3678 		ret_val = E1000_SUCCESS;
3679 		goto out;
3680 	}
3681 
3682 	for (i = 0; i < 10; i++) {
3683 		/* read once to clear */
3684 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3685 		if (ret_val)
3686 			goto out;
3687 		/* and again to get new status */
3688 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3689 		if (ret_val)
3690 			goto out;
3691 
3692 		/* check for PCS lock */
3693 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3694 			ret_val = E1000_SUCCESS;
3695 			goto out;
3696 		}
3697 
3698 		/* Issue PHY reset */
3699 		hw->phy.ops.reset(hw);
3700 		msec_delay_irq(5);
3701 	}
3702 	/* Disable GigE link negotiation */
3703 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3704 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3705 	             E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3706 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3707 
3708 	/*
3709 	 * Call gig speed drop workaround on Gig disable before accessing
3710 	 * any PHY registers
3711 	 */
3712 	e1000_gig_downshift_workaround_ich8lan(hw);
3713 
3714 	/* unable to acquire PCS lock */
3715 	ret_val = -E1000_ERR_PHY;
3716 
3717 out:
3718 	return ret_val;
3719 }
3720 
3721 /**
3722  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3723  *  @hw: pointer to the HW structure
3724  *  @state: boolean value used to set the current Kumeran workaround state
3725  *
3726  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
3727  *  /disabled - FALSE).
3728  **/
3729 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3730                                                  bool state)
3731 {
3732 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3733 
3734 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3735 
3736 	if (hw->mac.type != e1000_ich8lan) {
3737 		DEBUGOUT("Workaround applies to ICH8 only.\n");
3738 		return;
3739 	}
3740 
3741 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
3742 
3743 	return;
3744 }
3745 
3746 /**
3747  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3748  *  @hw: pointer to the HW structure
3749  *
3750  *  Workaround for 82566 power-down on D3 entry:
3751  *    1) disable gigabit link
3752  *    2) write VR power-down enable
3753  *    3) read it back
3754  *  Continue if successful, else issue LCD reset and repeat
3755  **/
3756 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3757 {
3758 	u32 reg;
3759 	u16 data;
3760 	u8  retry = 0;
3761 
3762 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3763 
3764 	if (hw->phy.type != e1000_phy_igp_3)
3765 		goto out;
3766 
3767 	/* Try the workaround twice (if needed) */
3768 	do {
3769 		/* Disable link */
3770 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3771 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3772 		        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3773 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3774 
3775 		/*
3776 		 * Call gig speed drop workaround on Gig disable before
3777 		 * accessing any PHY registers
3778 		 */
3779 		if (hw->mac.type == e1000_ich8lan)
3780 			e1000_gig_downshift_workaround_ich8lan(hw);
3781 
3782 		/* Write VR power-down enable */
3783 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3784 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3785 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3786 		                   data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3787 
3788 		/* Read it back and test */
3789 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3790 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3791 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3792 			break;
3793 
3794 		/* Issue PHY reset and repeat at most one more time */
3795 		reg = E1000_READ_REG(hw, E1000_CTRL);
3796 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3797 		retry++;
3798 	} while (retry);
3799 
3800 out:
3801 	return;
3802 }
3803 
3804 /**
3805  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3806  *  @hw: pointer to the HW structure
3807  *
3808  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3809  *  LPLU, Gig disable, MDIC PHY reset):
3810  *    1) Set Kumeran Near-end loopback
3811  *    2) Clear Kumeran Near-end loopback
3812  *  Should only be called for ICH8[m] devices with IGP_3 Phy.
3813  **/
3814 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3815 {
3816 	s32 ret_val = E1000_SUCCESS;
3817 	u16 reg_data;
3818 
3819 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3820 
3821 	if ((hw->mac.type != e1000_ich8lan) ||
3822 	    (hw->phy.type != e1000_phy_igp_3))
3823 		goto out;
3824 
3825 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3826 	                                      &reg_data);
3827 	if (ret_val)
3828 		goto out;
3829 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3830 	ret_val = e1000_write_kmrn_reg_generic(hw,
3831 	                                       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3832 	                                       reg_data);
3833 	if (ret_val)
3834 		goto out;
3835 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3836 	ret_val = e1000_write_kmrn_reg_generic(hw,
3837 	                                       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3838 	                                       reg_data);
3839 out:
3840 	return;
3841 }
3842 
3843 /**
3844  *  e1000_disable_gig_wol_ich8lan - disable gig during WoL
3845  *  @hw: pointer to the HW structure
3846  *
3847  *  During S0 to Sx transition, it is possible the link remains at gig
3848  *  instead of negotiating to a lower speed.  Before going to Sx, set
3849  *  'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3850  *  to a lower speed.
3851  *
3852  *  Should only be called for applicable parts.
3853  **/
3854 void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3855 {
3856 	u32 phy_ctrl;
3857 	s32 ret_val;
3858 
3859 	DEBUGFUNC("e1000_disable_gig_wol_ich8lan");
3860 
3861 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3862 	phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3863 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3864 
3865 	if (hw->mac.type >= e1000_pchlan) {
3866 		e1000_oem_bits_config_ich8lan(hw, FALSE);
3867 		ret_val = hw->phy.ops.acquire(hw);
3868 		if (ret_val)
3869 			return;
3870 		e1000_write_smbus_addr(hw);
3871 		hw->phy.ops.release(hw);
3872 	}
3873 
3874 	return;
3875 }
3876 
3877 /**
3878  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
3879  *  @hw: pointer to the HW structure
3880  *
3881  *  Return the LED back to the default configuration.
3882  **/
3883 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3884 {
3885 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
3886 
3887 	if (hw->phy.type == e1000_phy_ife)
3888 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3889 		                             0);
3890 
3891 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
3892 	return E1000_SUCCESS;
3893 }
3894 
3895 /**
3896  *  e1000_led_on_ich8lan - Turn LEDs on
3897  *  @hw: pointer to the HW structure
3898  *
3899  *  Turn on the LEDs.
3900  **/
3901 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3902 {
3903 	DEBUGFUNC("e1000_led_on_ich8lan");
3904 
3905 	if (hw->phy.type == e1000_phy_ife)
3906 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3907 		                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3908 
3909 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
3910 	return E1000_SUCCESS;
3911 }
3912 
3913 /**
3914  *  e1000_led_off_ich8lan - Turn LEDs off
3915  *  @hw: pointer to the HW structure
3916  *
3917  *  Turn off the LEDs.
3918  **/
3919 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3920 {
3921 	DEBUGFUNC("e1000_led_off_ich8lan");
3922 
3923 	if (hw->phy.type == e1000_phy_ife)
3924 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3925 		               (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
3926 
3927 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
3928 	return E1000_SUCCESS;
3929 }
3930 
3931 /**
3932  *  e1000_setup_led_pchlan - Configures SW controllable LED
3933  *  @hw: pointer to the HW structure
3934  *
3935  *  This prepares the SW controllable LED for use.
3936  **/
3937 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3938 {
3939 	DEBUGFUNC("e1000_setup_led_pchlan");
3940 
3941 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3942 					(u16)hw->mac.ledctl_mode1);
3943 }
3944 
3945 /**
3946  *  e1000_cleanup_led_pchlan - Restore the default LED operation
3947  *  @hw: pointer to the HW structure
3948  *
3949  *  Return the LED back to the default configuration.
3950  **/
3951 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3952 {
3953 	DEBUGFUNC("e1000_cleanup_led_pchlan");
3954 
3955 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3956 					(u16)hw->mac.ledctl_default);
3957 }
3958 
3959 /**
3960  *  e1000_led_on_pchlan - Turn LEDs on
3961  *  @hw: pointer to the HW structure
3962  *
3963  *  Turn on the LEDs.
3964  **/
3965 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3966 {
3967 	u16 data = (u16)hw->mac.ledctl_mode2;
3968 	u32 i, led;
3969 
3970 	DEBUGFUNC("e1000_led_on_pchlan");
3971 
3972 	/*
3973 	 * If no link, then turn LED on by setting the invert bit
3974 	 * for each LED that's mode is "link_up" in ledctl_mode2.
3975 	 */
3976 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
3977 		for (i = 0; i < 3; i++) {
3978 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3979 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
3980 			    E1000_LEDCTL_MODE_LINK_UP)
3981 				continue;
3982 			if (led & E1000_PHY_LED0_IVRT)
3983 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3984 			else
3985 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
3986 		}
3987 	}
3988 
3989 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
3990 }
3991 
3992 /**
3993  *  e1000_led_off_pchlan - Turn LEDs off
3994  *  @hw: pointer to the HW structure
3995  *
3996  *  Turn off the LEDs.
3997  **/
3998 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3999 {
4000 	u16 data = (u16)hw->mac.ledctl_mode1;
4001 	u32 i, led;
4002 
4003 	DEBUGFUNC("e1000_led_off_pchlan");
4004 
4005 	/*
4006 	 * If no link, then turn LED off by clearing the invert bit
4007 	 * for each LED that's mode is "link_up" in ledctl_mode1.
4008 	 */
4009 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4010 		for (i = 0; i < 3; i++) {
4011 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4012 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4013 			    E1000_LEDCTL_MODE_LINK_UP)
4014 				continue;
4015 			if (led & E1000_PHY_LED0_IVRT)
4016 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4017 			else
4018 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4019 		}
4020 	}
4021 
4022 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4023 }
4024 
4025 /**
4026  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4027  *  @hw: pointer to the HW structure
4028  *
4029  *  Read appropriate register for the config done bit for completion status
4030  *  and configure the PHY through s/w for EEPROM-less parts.
4031  *
4032  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
4033  *  config done bit, so only an error is logged and continues.  If we were
4034  *  to return with error, EEPROM-less silicon would not be able to be reset
4035  *  or change link.
4036  **/
4037 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4038 {
4039 	s32 ret_val = E1000_SUCCESS;
4040 	u32 bank = 0;
4041 	u32 status;
4042 
4043 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4044 
4045 	e1000_get_cfg_done_generic(hw);
4046 
4047 	/* Wait for indication from h/w that it has completed basic config */
4048 	if (hw->mac.type >= e1000_ich10lan) {
4049 		e1000_lan_init_done_ich8lan(hw);
4050 	} else {
4051 		ret_val = e1000_get_auto_rd_done_generic(hw);
4052 		if (ret_val) {
4053 			/*
4054 			 * When auto config read does not complete, do not
4055 			 * return with an error. This can happen in situations
4056 			 * where there is no eeprom and prevents getting link.
4057 			 */
4058 			DEBUGOUT("Auto Read Done did not complete\n");
4059 			ret_val = E1000_SUCCESS;
4060 		}
4061 	}
4062 
4063 	/* Clear PHY Reset Asserted bit */
4064 	status = E1000_READ_REG(hw, E1000_STATUS);
4065 	if (status & E1000_STATUS_PHYRA)
4066 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4067 	else
4068 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4069 
4070 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
4071 	if (hw->mac.type <= e1000_ich9lan) {
4072 		if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
4073 		    (hw->phy.type == e1000_phy_igp_3)) {
4074 			e1000_phy_init_script_igp3(hw);
4075 		}
4076 	} else {
4077 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4078 			/* Maybe we should do a basic PHY config */
4079 			DEBUGOUT("EEPROM not present\n");
4080 			ret_val = -E1000_ERR_CONFIG;
4081 		}
4082 	}
4083 
4084 	return ret_val;
4085 }
4086 
4087 /**
4088  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4089  * @hw: pointer to the HW structure
4090  *
4091  * In the case of a PHY power down to save power, or to turn off link during a
4092  * driver unload, or wake on lan is not enabled, remove the link.
4093  **/
4094 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4095 {
4096 	/* If the management interface is not enabled, then power down */
4097 	if (!(hw->mac.ops.check_mng_mode(hw) ||
4098 	      hw->phy.ops.check_reset_block(hw)))
4099 		e1000_power_down_phy_copper(hw);
4100 
4101 	return;
4102 }
4103 
4104 /**
4105  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4106  *  @hw: pointer to the HW structure
4107  *
4108  *  Clears hardware counters specific to the silicon family and calls
4109  *  clear_hw_cntrs_generic to clear all general purpose counters.
4110  **/
4111 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4112 {
4113 	u16 phy_data;
4114 
4115 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4116 
4117 	e1000_clear_hw_cntrs_base_generic(hw);
4118 
4119 	E1000_READ_REG(hw, E1000_ALGNERRC);
4120 	E1000_READ_REG(hw, E1000_RXERRC);
4121 	E1000_READ_REG(hw, E1000_TNCRS);
4122 	E1000_READ_REG(hw, E1000_CEXTERR);
4123 	E1000_READ_REG(hw, E1000_TSCTC);
4124 	E1000_READ_REG(hw, E1000_TSCTFC);
4125 
4126 	E1000_READ_REG(hw, E1000_MGTPRC);
4127 	E1000_READ_REG(hw, E1000_MGTPDC);
4128 	E1000_READ_REG(hw, E1000_MGTPTC);
4129 
4130 	E1000_READ_REG(hw, E1000_IAC);
4131 	E1000_READ_REG(hw, E1000_ICRXOC);
4132 
4133 	/* Clear PHY statistics registers */
4134 	if ((hw->phy.type == e1000_phy_82578) ||
4135 	    (hw->phy.type == e1000_phy_82579) ||
4136 	    (hw->phy.type == e1000_phy_82577)) {
4137 		hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
4138 		hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
4139 		hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
4140 		hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
4141 		hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
4142 		hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
4143 		hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
4144 		hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
4145 		hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
4146 		hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
4147 		hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
4148 		hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
4149 		hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
4150 		hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
4151 	}
4152 }
4153 
4154