xref: /freebsd/sys/dev/e1000/e1000_ich8lan.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2009, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 /*
36  * 82562G 10/100 Network Connection
37  * 82562G-2 10/100 Network Connection
38  * 82562GT 10/100 Network Connection
39  * 82562GT-2 10/100 Network Connection
40  * 82562V 10/100 Network Connection
41  * 82562V-2 10/100 Network Connection
42  * 82566DC-2 Gigabit Network Connection
43  * 82566DC Gigabit Network Connection
44  * 82566DM-2 Gigabit Network Connection
45  * 82566DM Gigabit Network Connection
46  * 82566MC Gigabit Network Connection
47  * 82566MM Gigabit Network Connection
48  * 82567LM Gigabit Network Connection
49  * 82567LF Gigabit Network Connection
50  * 82567V Gigabit Network Connection
51  * 82567LM-2 Gigabit Network Connection
52  * 82567LF-2 Gigabit Network Connection
53  * 82567V-2 Gigabit Network Connection
54  * 82567LF-3 Gigabit Network Connection
55  * 82567LM-3 Gigabit Network Connection
56  * 82567LM-4 Gigabit Network Connection
57  * 82577LM Gigabit Network Connection
58  * 82577LC Gigabit Network Connection
59  * 82578DM Gigabit Network Connection
60  * 82578DC Gigabit Network Connection
61  */
62 
63 #include "e1000_api.h"
64 
65 static s32  e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
66 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
67 static s32  e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
68 static s32  e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
69 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
70 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
71 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
73 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
74 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
75 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
76 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
77 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
78                                             bool active);
79 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
80                                             bool active);
81 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
82                                    u16 words, u16 *data);
83 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
84                                     u16 words, u16 *data);
85 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
86 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
87 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
88                                             u16 *data);
89 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
90 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
91 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
92 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
93 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
94 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
95 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
96                                            u16 *speed, u16 *duplex);
97 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
98 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
99 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
101 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
102 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
103 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
104 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
105 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
107 static s32  e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
108 static s32  e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
109 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
111 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
112                                           u32 offset, u8 *data);
113 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
114                                           u8 size, u16 *data);
115 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
116                                           u32 offset, u16 *data);
117 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
118                                                  u32 offset, u8 byte);
119 static s32  e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
120                                            u32 offset, u8 data);
121 static s32  e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
122                                            u8 size, u16 data);
123 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
124 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
125 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
126 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
127 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
128 
129 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
130 /* Offset 04h HSFSTS */
131 union ich8_hws_flash_status {
132 	struct ich8_hsfsts {
133 		u16 flcdone    :1; /* bit 0 Flash Cycle Done */
134 		u16 flcerr     :1; /* bit 1 Flash Cycle Error */
135 		u16 dael       :1; /* bit 2 Direct Access error Log */
136 		u16 berasesz   :2; /* bit 4:3 Sector Erase Size */
137 		u16 flcinprog  :1; /* bit 5 flash cycle in Progress */
138 		u16 reserved1  :2; /* bit 13:6 Reserved */
139 		u16 reserved2  :6; /* bit 13:6 Reserved */
140 		u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
141 		u16 flockdn    :1; /* bit 15 Flash Config Lock-Down */
142 	} hsf_status;
143 	u16 regval;
144 };
145 
146 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
147 /* Offset 06h FLCTL */
148 union ich8_hws_flash_ctrl {
149 	struct ich8_hsflctl {
150 		u16 flcgo      :1;   /* 0 Flash Cycle Go */
151 		u16 flcycle    :2;   /* 2:1 Flash Cycle */
152 		u16 reserved   :5;   /* 7:3 Reserved  */
153 		u16 fldbcount  :2;   /* 9:8 Flash Data Byte Count */
154 		u16 flockdn    :6;   /* 15:10 Reserved */
155 	} hsf_ctrl;
156 	u16 regval;
157 };
158 
159 /* ICH Flash Region Access Permissions */
160 union ich8_hws_flash_regacc {
161 	struct ich8_flracc {
162 		u32 grra      :8; /* 0:7 GbE region Read Access */
163 		u32 grwa      :8; /* 8:15 GbE region Write Access */
164 		u32 gmrag     :8; /* 23:16 GbE Master Read Access Grant */
165 		u32 gmwag     :8; /* 31:24 GbE Master Write Access Grant */
166 	} hsf_flregacc;
167 	u16 regval;
168 };
169 
170 /**
171  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
172  *  @hw: pointer to the HW structure
173  *
174  *  Initialize family-specific PHY parameters and function pointers.
175  **/
176 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
177 {
178 	struct e1000_phy_info *phy = &hw->phy;
179 	s32 ret_val = E1000_SUCCESS;
180 
181 	DEBUGFUNC("e1000_init_phy_params_pchlan");
182 
183 	phy->addr                     = 1;
184 	phy->reset_delay_us           = 100;
185 
186 	phy->ops.acquire              = e1000_acquire_swflag_ich8lan;
187 	phy->ops.check_reset_block    = e1000_check_reset_block_ich8lan;
188 	phy->ops.get_cfg_done         = e1000_get_cfg_done_ich8lan;
189 	phy->ops.read_reg             = e1000_read_phy_reg_hv;
190 	phy->ops.read_reg_locked      = e1000_read_phy_reg_hv_locked;
191 	phy->ops.release              = e1000_release_swflag_ich8lan;
192 	phy->ops.reset                = e1000_phy_hw_reset_ich8lan;
193 	phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
194 	phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
195 	phy->ops.write_reg            = e1000_write_phy_reg_hv;
196 	phy->ops.write_reg_locked     = e1000_write_phy_reg_hv_locked;
197 	phy->ops.power_up             = e1000_power_up_phy_copper;
198 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
199 	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
200 
201 	phy->id = e1000_phy_unknown;
202 	e1000_get_phy_id(hw);
203 	phy->type = e1000_get_phy_type_from_id(phy->id);
204 
205 	switch (phy->type) {
206 	case e1000_phy_82577:
207 		phy->ops.check_polarity = e1000_check_polarity_82577;
208 		phy->ops.force_speed_duplex =
209 			e1000_phy_force_speed_duplex_82577;
210 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
211 		phy->ops.get_info = e1000_get_phy_info_82577;
212 		phy->ops.commit = e1000_phy_sw_reset_generic;
213 	case e1000_phy_82578:
214 		phy->ops.check_polarity = e1000_check_polarity_m88;
215 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
216 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
217 		phy->ops.get_info = e1000_get_phy_info_m88;
218 		break;
219 	default:
220 		ret_val = -E1000_ERR_PHY;
221 		break;
222 	}
223 
224 	return ret_val;
225 }
226 
227 /**
228  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
229  *  @hw: pointer to the HW structure
230  *
231  *  Initialize family-specific PHY parameters and function pointers.
232  **/
233 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
234 {
235 	struct e1000_phy_info *phy = &hw->phy;
236 	s32 ret_val = E1000_SUCCESS;
237 	u16 i = 0;
238 
239 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
240 
241 	phy->addr                     = 1;
242 	phy->reset_delay_us           = 100;
243 
244 	phy->ops.acquire              = e1000_acquire_swflag_ich8lan;
245 	phy->ops.check_reset_block    = e1000_check_reset_block_ich8lan;
246 	phy->ops.get_cable_length     = e1000_get_cable_length_igp_2;
247 	phy->ops.get_cfg_done         = e1000_get_cfg_done_ich8lan;
248 	phy->ops.read_reg             = e1000_read_phy_reg_igp;
249 	phy->ops.release              = e1000_release_swflag_ich8lan;
250 	phy->ops.reset                = e1000_phy_hw_reset_ich8lan;
251 	phy->ops.set_d0_lplu_state    = e1000_set_d0_lplu_state_ich8lan;
252 	phy->ops.set_d3_lplu_state    = e1000_set_d3_lplu_state_ich8lan;
253 	phy->ops.write_reg            = e1000_write_phy_reg_igp;
254 	phy->ops.power_up             = e1000_power_up_phy_copper;
255 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
256 
257 	/*
258 	 * We may need to do this twice - once for IGP and if that fails,
259 	 * we'll set BM func pointers and try again
260 	 */
261 	ret_val = e1000_determine_phy_address(hw);
262 	if (ret_val) {
263 		phy->ops.write_reg = e1000_write_phy_reg_bm;
264 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
265 		ret_val = e1000_determine_phy_address(hw);
266 		if (ret_val) {
267 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
268 			goto out;
269 		}
270 	}
271 
272 	phy->id = 0;
273 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
274 	       (i++ < 100)) {
275 		msec_delay(1);
276 		ret_val = e1000_get_phy_id(hw);
277 		if (ret_val)
278 			goto out;
279 	}
280 
281 	/* Verify phy id */
282 	switch (phy->id) {
283 	case IGP03E1000_E_PHY_ID:
284 		phy->type = e1000_phy_igp_3;
285 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
286 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
287 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
288 		phy->ops.get_info = e1000_get_phy_info_igp;
289 		phy->ops.check_polarity = e1000_check_polarity_igp;
290 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
291 		break;
292 	case IFE_E_PHY_ID:
293 	case IFE_PLUS_E_PHY_ID:
294 	case IFE_C_E_PHY_ID:
295 		phy->type = e1000_phy_ife;
296 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
297 		phy->ops.get_info = e1000_get_phy_info_ife;
298 		phy->ops.check_polarity = e1000_check_polarity_ife;
299 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
300 		break;
301 	case BME1000_E_PHY_ID:
302 		phy->type = e1000_phy_bm;
303 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
304 		phy->ops.read_reg = e1000_read_phy_reg_bm;
305 		phy->ops.write_reg = e1000_write_phy_reg_bm;
306 		phy->ops.commit = e1000_phy_sw_reset_generic;
307 		phy->ops.get_info = e1000_get_phy_info_m88;
308 		phy->ops.check_polarity = e1000_check_polarity_m88;
309 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
310 		break;
311 	default:
312 		ret_val = -E1000_ERR_PHY;
313 		goto out;
314 	}
315 
316 out:
317 	return ret_val;
318 }
319 
320 /**
321  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
322  *  @hw: pointer to the HW structure
323  *
324  *  Initialize family-specific NVM parameters and function
325  *  pointers.
326  **/
327 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
328 {
329 	struct e1000_nvm_info *nvm = &hw->nvm;
330 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
331 	u32 gfpreg, sector_base_addr, sector_end_addr;
332 	s32 ret_val = E1000_SUCCESS;
333 	u16 i;
334 
335 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
336 
337 	/* Can't read flash registers if the register set isn't mapped. */
338 	if (!hw->flash_address) {
339 		DEBUGOUT("ERROR: Flash registers not mapped\n");
340 		ret_val = -E1000_ERR_CONFIG;
341 		goto out;
342 	}
343 
344 	nvm->type = e1000_nvm_flash_sw;
345 
346 	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
347 
348 	/*
349 	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
350 	 * Add 1 to sector_end_addr since this sector is included in
351 	 * the overall size.
352 	 */
353 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
354 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
355 
356 	/* flash_base_addr is byte-aligned */
357 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
358 
359 	/*
360 	 * find total size of the NVM, then cut in half since the total
361 	 * size represents two separate NVM banks.
362 	 */
363 	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
364 	                          << FLASH_SECTOR_ADDR_SHIFT;
365 	nvm->flash_bank_size /= 2;
366 	/* Adjust to word count */
367 	nvm->flash_bank_size /= sizeof(u16);
368 
369 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
370 
371 	/* Clear shadow ram */
372 	for (i = 0; i < nvm->word_size; i++) {
373 		dev_spec->shadow_ram[i].modified = FALSE;
374 		dev_spec->shadow_ram[i].value    = 0xFFFF;
375 	}
376 
377 	E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
378 	E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
379 
380 	/* Function Pointers */
381 	nvm->ops.acquire       = e1000_acquire_nvm_ich8lan;
382 	nvm->ops.release       = e1000_release_nvm_ich8lan;
383 	nvm->ops.read          = e1000_read_nvm_ich8lan;
384 	nvm->ops.update        = e1000_update_nvm_checksum_ich8lan;
385 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
386 	nvm->ops.validate      = e1000_validate_nvm_checksum_ich8lan;
387 	nvm->ops.write         = e1000_write_nvm_ich8lan;
388 
389 out:
390 	return ret_val;
391 }
392 
393 /**
394  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
395  *  @hw: pointer to the HW structure
396  *
397  *  Initialize family-specific MAC parameters and function
398  *  pointers.
399  **/
400 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
401 {
402 	struct e1000_mac_info *mac = &hw->mac;
403 	u16 pci_cfg;
404 
405 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
406 
407 	/* Set media type function pointer */
408 	hw->phy.media_type = e1000_media_type_copper;
409 
410 	/* Set mta register count */
411 	mac->mta_reg_count = 32;
412 	/* Set rar entry count */
413 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
414 	if (mac->type == e1000_ich8lan)
415 		mac->rar_entry_count--;
416 	/* Set if part includes ASF firmware */
417 	mac->asf_firmware_present = TRUE;
418 	/* Set if manageability features are enabled. */
419 	mac->arc_subsystem_valid = TRUE;
420 	/* Adaptive IFS supported */
421 	mac->adaptive_ifs = TRUE;
422 
423 	/* Function pointers */
424 
425 	/* bus type/speed/width */
426 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
427 	/* function id */
428 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
429 	/* reset */
430 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
431 	/* hw initialization */
432 	mac->ops.init_hw = e1000_init_hw_ich8lan;
433 	/* link setup */
434 	mac->ops.setup_link = e1000_setup_link_ich8lan;
435 	/* physical interface setup */
436 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
437 	/* check for link */
438 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
439 	/* check management mode */
440 	mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
441 	/* link info */
442 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
443 	/* multicast address update */
444 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
445 	/* setting MTA */
446 	mac->ops.mta_set = e1000_mta_set_generic;
447 	/* clear hardware counters */
448 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
449 
450 	/* LED operations */
451 	switch (mac->type) {
452 	case e1000_ich8lan:
453 	case e1000_ich9lan:
454 	case e1000_ich10lan:
455 		/* ID LED init */
456 		mac->ops.id_led_init = e1000_id_led_init_generic;
457 		/* blink LED */
458 		mac->ops.blink_led = e1000_blink_led_generic;
459 		/* setup LED */
460 		mac->ops.setup_led = e1000_setup_led_generic;
461 		/* cleanup LED */
462 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
463 		/* turn on/off LED */
464 		mac->ops.led_on = e1000_led_on_ich8lan;
465 		mac->ops.led_off = e1000_led_off_ich8lan;
466 		break;
467 	case e1000_pchlan:
468 		/* save PCH revision_id */
469 		e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
470 		hw->revision_id = (u8)(pci_cfg &= 0x000F);
471 		/* ID LED init */
472 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
473 		/* setup LED */
474 		mac->ops.setup_led = e1000_setup_led_pchlan;
475 		/* cleanup LED */
476 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
477 		/* turn on/off LED */
478 		mac->ops.led_on = e1000_led_on_pchlan;
479 		mac->ops.led_off = e1000_led_off_pchlan;
480 		break;
481 	default:
482 		break;
483 	}
484 
485 	/* Enable PCS Lock-loss workaround for ICH8 */
486 	if (mac->type == e1000_ich8lan)
487 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
488 
489 	return E1000_SUCCESS;
490 }
491 
492 /**
493  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
494  *  @hw: pointer to the HW structure
495  *
496  *  Checks to see of the link status of the hardware has changed.  If a
497  *  change in link status has been detected, then we read the PHY registers
498  *  to get the current speed/duplex if link exists.
499  **/
500 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
501 {
502 	struct e1000_mac_info *mac = &hw->mac;
503 	s32 ret_val;
504 	bool link;
505 
506 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
507 
508 	/*
509 	 * We only want to go out to the PHY registers to see if Auto-Neg
510 	 * has completed and/or if our link status has changed.  The
511 	 * get_link_status flag is set upon receiving a Link Status
512 	 * Change or Rx Sequence Error interrupt.
513 	 */
514 	if (!mac->get_link_status) {
515 		ret_val = E1000_SUCCESS;
516 		goto out;
517 	}
518 
519 	/*
520 	 * First we want to see if the MII Status Register reports
521 	 * link.  If so, then we want to get the current speed/duplex
522 	 * of the PHY.
523 	 */
524 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
525 	if (ret_val)
526 		goto out;
527 
528 	if (hw->mac.type == e1000_pchlan) {
529 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
530 		if (ret_val)
531 			goto out;
532 	}
533 
534 	if (!link)
535 		goto out; /* No link detected */
536 
537 	mac->get_link_status = FALSE;
538 
539 	if (hw->phy.type == e1000_phy_82578) {
540 		ret_val = e1000_link_stall_workaround_hv(hw);
541 		if (ret_val)
542 			goto out;
543 	}
544 
545 	/*
546 	 * Check if there was DownShift, must be checked
547 	 * immediately after link-up
548 	 */
549 	e1000_check_downshift_generic(hw);
550 
551 	/*
552 	 * If we are forcing speed/duplex, then we simply return since
553 	 * we have already determined whether we have link or not.
554 	 */
555 	if (!mac->autoneg) {
556 		ret_val = -E1000_ERR_CONFIG;
557 		goto out;
558 	}
559 
560 	/*
561 	 * Auto-Neg is enabled.  Auto Speed Detection takes care
562 	 * of MAC speed/duplex configuration.  So we only need to
563 	 * configure Collision Distance in the MAC.
564 	 */
565 	e1000_config_collision_dist_generic(hw);
566 
567 	/*
568 	 * Configure Flow Control now that Auto-Neg has completed.
569 	 * First, we need to restore the desired flow control
570 	 * settings because we may have had to re-autoneg with a
571 	 * different link partner.
572 	 */
573 	ret_val = e1000_config_fc_after_link_up_generic(hw);
574 	if (ret_val)
575 		DEBUGOUT("Error configuring flow control\n");
576 
577 out:
578 	return ret_val;
579 }
580 
581 /**
582  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
583  *  @hw: pointer to the HW structure
584  *
585  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
586  **/
587 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
588 {
589 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
590 
591 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
592 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
593 	switch (hw->mac.type) {
594 	case e1000_ich8lan:
595 	case e1000_ich9lan:
596 	case e1000_ich10lan:
597 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
598 		break;
599 	case e1000_pchlan:
600 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
601 		break;
602 	default:
603 		break;
604 	}
605 }
606 
607 /**
608  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
609  *  @hw: pointer to the HW structure
610  *
611  *  Acquires the mutex for performing NVM operations.
612  **/
613 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
614 {
615 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
616 
617 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
618 
619 	return E1000_SUCCESS;
620 }
621 
622 /**
623  *  e1000_release_nvm_ich8lan - Release NVM mutex
624  *  @hw: pointer to the HW structure
625  *
626  *  Releases the mutex used while performing NVM operations.
627  **/
628 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
629 {
630 	DEBUGFUNC("e1000_release_nvm_ich8lan");
631 
632 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
633 
634 	return;
635 }
636 
637 /**
638  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
639  *  @hw: pointer to the HW structure
640  *
641  *  Acquires the software control flag for performing PHY and select
642  *  MAC CSR accesses.
643  **/
644 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
645 {
646 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
647 	s32 ret_val = E1000_SUCCESS;
648 
649 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
650 
651 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
652 
653 	while (timeout) {
654 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
655 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
656 			break;
657 
658 		msec_delay_irq(1);
659 		timeout--;
660 	}
661 
662 	if (!timeout) {
663 		DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
664 		ret_val = -E1000_ERR_CONFIG;
665 		goto out;
666 	}
667 
668 	timeout = SW_FLAG_TIMEOUT;
669 
670 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
671 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
672 
673 	while (timeout) {
674 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
675 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
676 			break;
677 
678 		msec_delay_irq(1);
679 		timeout--;
680 	}
681 
682 	if (!timeout) {
683 		DEBUGOUT("Failed to acquire the semaphore.\n");
684 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
685 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
686 		ret_val = -E1000_ERR_CONFIG;
687 		goto out;
688 	}
689 
690 out:
691 	if (ret_val)
692 		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
693 
694 	return ret_val;
695 }
696 
697 /**
698  *  e1000_release_swflag_ich8lan - Release software control flag
699  *  @hw: pointer to the HW structure
700  *
701  *  Releases the software control flag for performing PHY and select
702  *  MAC CSR accesses.
703  **/
704 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
705 {
706 	u32 extcnf_ctrl;
707 
708 	DEBUGFUNC("e1000_release_swflag_ich8lan");
709 
710 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
711 	extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
712 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
713 
714 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
715 
716 	return;
717 }
718 
719 /**
720  *  e1000_check_mng_mode_ich8lan - Checks management mode
721  *  @hw: pointer to the HW structure
722  *
723  *  This checks if the adapter has manageability enabled.
724  *  This is a function pointer entry point only called by read/write
725  *  routines for the PHY and NVM parts.
726  **/
727 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
728 {
729 	u32 fwsm;
730 
731 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
732 
733 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
734 
735 	return (fwsm & E1000_FWSM_MODE_MASK) ==
736 	        (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
737 }
738 
739 /**
740  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
741  *  @hw: pointer to the HW structure
742  *
743  *  Checks if firmware is blocking the reset of the PHY.
744  *  This is a function pointer entry point only called by
745  *  reset routines.
746  **/
747 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
748 {
749 	u32 fwsm;
750 
751 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
752 
753 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
754 
755 	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
756 	                                        : E1000_BLK_PHY_RESET;
757 }
758 
759 /**
760  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
761  *  @hw:   pointer to the HW structure
762  *
763  *  SW should configure the LCD from the NVM extended configuration region
764  *  as a workaround for certain parts.
765  **/
766 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
767 {
768 	struct e1000_phy_info *phy = &hw->phy;
769 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
770 	s32 ret_val;
771 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
772 
773 	ret_val = hw->phy.ops.acquire(hw);
774 	if (ret_val)
775 		return ret_val;
776 
777 	/*
778 	 * Initialize the PHY from the NVM on ICH platforms.  This
779 	 * is needed due to an issue where the NVM configuration is
780 	 * not properly autoloaded after power transitions.
781 	 * Therefore, after each PHY reset, we will load the
782 	 * configuration data out of the NVM manually.
783 	 */
784 	if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) ||
785 		(hw->mac.type == e1000_pchlan)) {
786 		/* Check if SW needs to configure the PHY */
787 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
788 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_M) ||
789 		    (hw->mac.type == e1000_pchlan))
790 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
791 		else
792 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
793 
794 		data = E1000_READ_REG(hw, E1000_FEXTNVM);
795 		if (!(data & sw_cfg_mask))
796 			goto out;
797 
798 		/* Wait for basic configuration completes before proceeding */
799 		e1000_lan_init_done_ich8lan(hw);
800 
801 		/*
802 		 * Make sure HW does not configure LCD from PHY
803 		 * extended configuration before SW configuration
804 		 */
805 		data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
806 		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
807 			goto out;
808 
809 		cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
810 		cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
811 		cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
812 		if (!cnf_size)
813 			goto out;
814 
815 		cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
816 		cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
817 
818 		if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
819 		    (hw->mac.type == e1000_pchlan)) {
820 			/*
821 			 * HW configures the SMBus address and LEDs when the
822 			 * OEM and LCD Write Enable bits are set in the NVM.
823 			 * When both NVM bits are cleared, SW will configure
824 			 * them instead.
825 			 */
826 			data = E1000_READ_REG(hw, E1000_STRAP);
827 			data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
828 			reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
829 			reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
830 			ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
831 			                                        reg_data);
832 			if (ret_val)
833 				goto out;
834 
835 			data = E1000_READ_REG(hw, E1000_LEDCTL);
836 			ret_val = e1000_write_phy_reg_hv_locked(hw,
837 			                                        HV_LED_CONFIG,
838 			                                        (u16)data);
839 			if (ret_val)
840 				goto out;
841 		}
842 
843 		/* Configure LCD from extended configuration region. */
844 
845 		/* cnf_base_addr is in DWORD */
846 		word_addr = (u16)(cnf_base_addr << 1);
847 
848 		for (i = 0; i < cnf_size; i++) {
849 			ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
850 			                           &reg_data);
851 			if (ret_val)
852 				goto out;
853 
854 			ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
855 			                           1, &reg_addr);
856 			if (ret_val)
857 				goto out;
858 
859 			/* Save off the PHY page for future writes. */
860 			if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
861 				phy_page = reg_data;
862 				continue;
863 			}
864 
865 			reg_addr &= PHY_REG_MASK;
866 			reg_addr |= phy_page;
867 
868 			ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
869 			                                    reg_data);
870 			if (ret_val)
871 				goto out;
872 		}
873 	}
874 
875 out:
876 	hw->phy.ops.release(hw);
877 	return ret_val;
878 }
879 
880 /**
881  *  e1000_k1_gig_workaround_hv - K1 Si workaround
882  *  @hw:   pointer to the HW structure
883  *  @link: link up bool flag
884  *
885  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
886  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
887  *  If link is down, the function will restore the default K1 setting located
888  *  in the NVM.
889  **/
890 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
891 {
892 	s32 ret_val = E1000_SUCCESS;
893 	u16 status_reg = 0;
894 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
895 
896 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
897 
898 	if (hw->mac.type != e1000_pchlan)
899 		goto out;
900 
901 	/* Wrap the whole flow with the sw flag */
902 	ret_val = hw->phy.ops.acquire(hw);
903 	if (ret_val)
904 		goto out;
905 
906 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
907 	if (link) {
908 		if (hw->phy.type == e1000_phy_82578) {
909 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
910 			                                      &status_reg);
911 			if (ret_val)
912 				goto release;
913 
914 			status_reg &= BM_CS_STATUS_LINK_UP |
915 			              BM_CS_STATUS_RESOLVED |
916 			              BM_CS_STATUS_SPEED_MASK;
917 
918 			if (status_reg == (BM_CS_STATUS_LINK_UP |
919 			                   BM_CS_STATUS_RESOLVED |
920 			                   BM_CS_STATUS_SPEED_1000))
921 				k1_enable = FALSE;
922 		}
923 
924 		if (hw->phy.type == e1000_phy_82577) {
925 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
926 			                                      &status_reg);
927 			if (ret_val)
928 				goto release;
929 
930 			status_reg &= HV_M_STATUS_LINK_UP |
931 			              HV_M_STATUS_AUTONEG_COMPLETE |
932 			              HV_M_STATUS_SPEED_MASK;
933 
934 			if (status_reg == (HV_M_STATUS_LINK_UP |
935 			                   HV_M_STATUS_AUTONEG_COMPLETE |
936 			                   HV_M_STATUS_SPEED_1000))
937 				k1_enable = FALSE;
938 		}
939 
940 		/* Link stall fix for link up */
941 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
942 		                                       0x0100);
943 		if (ret_val)
944 			goto release;
945 
946 	} else {
947 		/* Link stall fix for link down */
948 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
949 		                                       0x4100);
950 		if (ret_val)
951 			goto release;
952 	}
953 
954 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
955 
956 release:
957 	hw->phy.ops.release(hw);
958 out:
959 	return ret_val;
960 }
961 
962 /**
963  *  e1000_configure_k1_ich8lan - Configure K1 power state
964  *  @hw: pointer to the HW structure
965  *  @enable: K1 state to configure
966  *
967  *  Configure the K1 power state based on the provided parameter.
968  *  Assumes semaphore already acquired.
969  *
970  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
971  **/
972 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
973 {
974 	s32 ret_val = E1000_SUCCESS;
975 	u32 ctrl_reg = 0;
976 	u32 ctrl_ext = 0;
977 	u32 reg = 0;
978 	u16 kmrn_reg = 0;
979 
980 	ret_val = e1000_read_kmrn_reg_locked(hw,
981 	                                     E1000_KMRNCTRLSTA_K1_CONFIG,
982 	                                     &kmrn_reg);
983 	if (ret_val)
984 		goto out;
985 
986 	if (k1_enable)
987 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
988 	else
989 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
990 
991 	ret_val = e1000_write_kmrn_reg_locked(hw,
992 	                                      E1000_KMRNCTRLSTA_K1_CONFIG,
993 	                                      kmrn_reg);
994 	if (ret_val)
995 		goto out;
996 
997 	usec_delay(20);
998 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
999 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1000 
1001 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1002 	reg |= E1000_CTRL_FRCSPD;
1003 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
1004 
1005 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1006 	usec_delay(20);
1007 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1008 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1009 	usec_delay(20);
1010 
1011 out:
1012 	return ret_val;
1013 }
1014 
1015 /**
1016  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1017  *  @hw:       pointer to the HW structure
1018  *  @d0_state: boolean if entering d0 or d3 device state
1019  *
1020  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1021  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
1022  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
1023  **/
1024 s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1025 {
1026 	s32 ret_val = 0;
1027 	u32 mac_reg;
1028 	u16 oem_reg;
1029 
1030 	if (hw->mac.type != e1000_pchlan)
1031 		return ret_val;
1032 
1033 	ret_val = hw->phy.ops.acquire(hw);
1034 	if (ret_val)
1035 		return ret_val;
1036 
1037 	mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1038 	if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1039 		goto out;
1040 
1041 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1042 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1043 		goto out;
1044 
1045 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1046 
1047 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1048 	if (ret_val)
1049 		goto out;
1050 
1051 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1052 
1053 	if (d0_state) {
1054 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1055 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1056 
1057 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1058 			oem_reg |= HV_OEM_BITS_LPLU;
1059 	} else {
1060 		if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1061 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1062 
1063 		if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1064 			oem_reg |= HV_OEM_BITS_LPLU;
1065 	}
1066 	/* Restart auto-neg to activate the bits */
1067 	if (!hw->phy.ops.check_reset_block(hw))
1068 		oem_reg |= HV_OEM_BITS_RESTART_AN;
1069 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1070 
1071 out:
1072 	hw->phy.ops.release(hw);
1073 
1074 	return ret_val;
1075 }
1076 
1077 
1078 /**
1079  *  e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx
1080  *  @hw: pointer to the HW structure
1081  **/
1082 s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
1083 {
1084 	if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2))
1085 		return E1000_SUCCESS;
1086 
1087 	return hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444);
1088 }
1089 
1090 /**
1091  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1092  *  done after every PHY reset.
1093  **/
1094 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1095 {
1096 	s32 ret_val = E1000_SUCCESS;
1097 
1098 	if (hw->mac.type != e1000_pchlan)
1099 		goto out;
1100 
1101 	/* Hanksville M Phy init for IEEE. */
1102 	if ((hw->revision_id == 2) &&
1103 	    (hw->phy.type == e1000_phy_82577) &&
1104 	    ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1105 		hw->phy.ops.write_reg(hw, 0x10, 0x8823);
1106 		hw->phy.ops.write_reg(hw, 0x11, 0x0018);
1107 		hw->phy.ops.write_reg(hw, 0x10, 0x8824);
1108 		hw->phy.ops.write_reg(hw, 0x11, 0x0016);
1109 		hw->phy.ops.write_reg(hw, 0x10, 0x8825);
1110 		hw->phy.ops.write_reg(hw, 0x11, 0x001A);
1111 		hw->phy.ops.write_reg(hw, 0x10, 0x888C);
1112 		hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1113 		hw->phy.ops.write_reg(hw, 0x10, 0x888D);
1114 		hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1115 		hw->phy.ops.write_reg(hw, 0x10, 0x888E);
1116 		hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1117 		hw->phy.ops.write_reg(hw, 0x10, 0x8827);
1118 		hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1119 		hw->phy.ops.write_reg(hw, 0x10, 0x8835);
1120 		hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1121 		hw->phy.ops.write_reg(hw, 0x10, 0x8834);
1122 		hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1123 		hw->phy.ops.write_reg(hw, 0x10, 0x8833);
1124 		hw->phy.ops.write_reg(hw, 0x11, 0x0002);
1125 	}
1126 
1127 	if (((hw->phy.type == e1000_phy_82577) &&
1128 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1129 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1130 		/* Disable generation of early preamble */
1131 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1132 		if (ret_val)
1133 			goto out;
1134 
1135 		/* Preamble tuning for SSC */
1136 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1137 		if (ret_val)
1138 			goto out;
1139 	}
1140 
1141 	if (hw->phy.type == e1000_phy_82578) {
1142 		if (hw->revision_id < 3) {
1143 			/* PHY config */
1144 			ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29,
1145 			                                0x66C0);
1146 			if (ret_val)
1147 				goto out;
1148 
1149 			/* PHY config */
1150 			ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E,
1151 			                                0xFFFF);
1152 			if (ret_val)
1153 				goto out;
1154 		}
1155 
1156 		/*
1157 		 * Return registers to default by doing a soft reset then
1158 		 * writing 0x3140 to the control register.
1159 		 */
1160 		if (hw->phy.revision < 2) {
1161 			e1000_phy_sw_reset_generic(hw);
1162 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1163 			                                0x3140);
1164 		}
1165 	}
1166 
1167 	if ((hw->revision_id == 2) &&
1168 	    (hw->phy.type == e1000_phy_82577) &&
1169 	    ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1170 		/*
1171 		 * Workaround for OEM (GbE) not operating after reset -
1172 		 * restart AN (twice)
1173 		 */
1174 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1175 		if (ret_val)
1176 			goto out;
1177 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1178 		if (ret_val)
1179 			goto out;
1180 	}
1181 
1182 	/* Select page 0 */
1183 	ret_val = hw->phy.ops.acquire(hw);
1184 	if (ret_val)
1185 		goto out;
1186 
1187 	hw->phy.addr = 1;
1188 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1189 	if (ret_val)
1190 		goto out;
1191 	hw->phy.ops.release(hw);
1192 
1193 	/*
1194 	 * Configure the K1 Si workaround during phy reset assuming there is
1195 	 * link so that it disables K1 if link is in 1Gbps.
1196 	 */
1197 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1198 
1199 out:
1200 	return ret_val;
1201 }
1202 
1203 /**
1204  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
1205  *  @hw: pointer to the HW structure
1206  *
1207  *  Check the appropriate indication the MAC has finished configuring the
1208  *  PHY after a software reset.
1209  **/
1210 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1211 {
1212 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1213 
1214 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
1215 
1216 	/* Wait for basic configuration completes before proceeding */
1217 	do {
1218 		data = E1000_READ_REG(hw, E1000_STATUS);
1219 		data &= E1000_STATUS_LAN_INIT_DONE;
1220 		usec_delay(100);
1221 	} while ((!data) && --loop);
1222 
1223 	/*
1224 	 * If basic configuration is incomplete before the above loop
1225 	 * count reaches 0, loading the configuration from NVM will
1226 	 * leave the PHY in a bad state possibly resulting in no link.
1227 	 */
1228 	if (loop == 0)
1229 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1230 
1231 	/* Clear the Init Done bit for the next init event */
1232 	data = E1000_READ_REG(hw, E1000_STATUS);
1233 	data &= ~E1000_STATUS_LAN_INIT_DONE;
1234 	E1000_WRITE_REG(hw, E1000_STATUS, data);
1235 }
1236 
1237 /**
1238  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1239  *  @hw: pointer to the HW structure
1240  *
1241  *  Resets the PHY
1242  *  This is a function pointer entry point called by drivers
1243  *  or other shared routines.
1244  **/
1245 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1246 {
1247 	s32 ret_val = E1000_SUCCESS;
1248 	u16 reg;
1249 
1250 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
1251 
1252 	ret_val = e1000_phy_hw_reset_generic(hw);
1253 	if (ret_val)
1254 		goto out;
1255 
1256 	/* Allow time for h/w to get to a quiescent state after reset */
1257 	msec_delay(10);
1258 
1259 	if (hw->mac.type == e1000_pchlan) {
1260 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1261 		if (ret_val)
1262 			goto out;
1263 	}
1264 
1265 	/* Dummy read to clear the phy wakeup bit after lcd reset */
1266 	if (hw->mac.type == e1000_pchlan)
1267 		hw->phy.ops.read_reg(hw, BM_WUC, &reg);
1268 
1269 	/* Configure the LCD with the extended configuration region in NVM */
1270 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
1271 	if (ret_val)
1272 		goto out;
1273 
1274 	/* Configure the LCD with the OEM bits in NVM */
1275 	if (hw->mac.type == e1000_pchlan)
1276 		ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1277 
1278 out:
1279 	return ret_val;
1280 }
1281 
1282 /**
1283  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1284  *  @hw: pointer to the HW structure
1285  *  @active: TRUE to enable LPLU, FALSE to disable
1286  *
1287  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
1288  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1289  *  the phy speed. This function will manually set the LPLU bit and restart
1290  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
1291  *  since it configures the same bit.
1292  **/
1293 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1294 {
1295 	s32 ret_val = E1000_SUCCESS;
1296 	u16 oem_reg;
1297 
1298 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
1299 
1300 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
1301 	if (ret_val)
1302 		goto out;
1303 
1304 	if (active)
1305 		oem_reg |= HV_OEM_BITS_LPLU;
1306 	else
1307 		oem_reg &= ~HV_OEM_BITS_LPLU;
1308 
1309 	oem_reg |= HV_OEM_BITS_RESTART_AN;
1310 	ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
1311 
1312 out:
1313 	return ret_val;
1314 }
1315 
1316 /**
1317  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1318  *  @hw: pointer to the HW structure
1319  *  @active: TRUE to enable LPLU, FALSE to disable
1320  *
1321  *  Sets the LPLU D0 state according to the active flag.  When
1322  *  activating LPLU this function also disables smart speed
1323  *  and vice versa.  LPLU will not be activated unless the
1324  *  device autonegotiation advertisement meets standards of
1325  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
1326  *  This is a function pointer entry point only called by
1327  *  PHY setup routines.
1328  **/
1329 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1330 {
1331 	struct e1000_phy_info *phy = &hw->phy;
1332 	u32 phy_ctrl;
1333 	s32 ret_val = E1000_SUCCESS;
1334 	u16 data;
1335 
1336 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
1337 
1338 	if (phy->type == e1000_phy_ife)
1339 		goto out;
1340 
1341 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
1342 
1343 	if (active) {
1344 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
1345 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
1346 
1347 		if (phy->type != e1000_phy_igp_3)
1348 			goto out;
1349 
1350 		/*
1351 		 * Call gig speed drop workaround on LPLU before accessing
1352 		 * any PHY registers
1353 		 */
1354 		if (hw->mac.type == e1000_ich8lan)
1355 			e1000_gig_downshift_workaround_ich8lan(hw);
1356 
1357 		/* When LPLU is enabled, we should disable SmartSpeed */
1358 		ret_val = phy->ops.read_reg(hw,
1359 		                            IGP01E1000_PHY_PORT_CONFIG,
1360 		                            &data);
1361 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1362 		ret_val = phy->ops.write_reg(hw,
1363 		                             IGP01E1000_PHY_PORT_CONFIG,
1364 		                             data);
1365 		if (ret_val)
1366 			goto out;
1367 	} else {
1368 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
1369 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
1370 
1371 		if (phy->type != e1000_phy_igp_3)
1372 			goto out;
1373 
1374 		/*
1375 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
1376 		 * during Dx states where the power conservation is most
1377 		 * important.  During driver activity we should enable
1378 		 * SmartSpeed, so performance is maintained.
1379 		 */
1380 		if (phy->smart_speed == e1000_smart_speed_on) {
1381 			ret_val = phy->ops.read_reg(hw,
1382 			                            IGP01E1000_PHY_PORT_CONFIG,
1383 			                            &data);
1384 			if (ret_val)
1385 				goto out;
1386 
1387 			data |= IGP01E1000_PSCFR_SMART_SPEED;
1388 			ret_val = phy->ops.write_reg(hw,
1389 			                             IGP01E1000_PHY_PORT_CONFIG,
1390 			                             data);
1391 			if (ret_val)
1392 				goto out;
1393 		} else if (phy->smart_speed == e1000_smart_speed_off) {
1394 			ret_val = phy->ops.read_reg(hw,
1395 			                            IGP01E1000_PHY_PORT_CONFIG,
1396 			                            &data);
1397 			if (ret_val)
1398 				goto out;
1399 
1400 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1401 			ret_val = phy->ops.write_reg(hw,
1402 			                             IGP01E1000_PHY_PORT_CONFIG,
1403 			                             data);
1404 			if (ret_val)
1405 				goto out;
1406 		}
1407 	}
1408 
1409 out:
1410 	return ret_val;
1411 }
1412 
1413 /**
1414  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
1415  *  @hw: pointer to the HW structure
1416  *  @active: TRUE to enable LPLU, FALSE to disable
1417  *
1418  *  Sets the LPLU D3 state according to the active flag.  When
1419  *  activating LPLU this function also disables smart speed
1420  *  and vice versa.  LPLU will not be activated unless the
1421  *  device autonegotiation advertisement meets standards of
1422  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
1423  *  This is a function pointer entry point only called by
1424  *  PHY setup routines.
1425  **/
1426 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1427 {
1428 	struct e1000_phy_info *phy = &hw->phy;
1429 	u32 phy_ctrl;
1430 	s32 ret_val = E1000_SUCCESS;
1431 	u16 data;
1432 
1433 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
1434 
1435 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
1436 
1437 	if (!active) {
1438 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
1439 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
1440 
1441 		if (phy->type != e1000_phy_igp_3)
1442 			goto out;
1443 
1444 		/*
1445 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
1446 		 * during Dx states where the power conservation is most
1447 		 * important.  During driver activity we should enable
1448 		 * SmartSpeed, so performance is maintained.
1449 		 */
1450 		if (phy->smart_speed == e1000_smart_speed_on) {
1451 			ret_val = phy->ops.read_reg(hw,
1452 			                            IGP01E1000_PHY_PORT_CONFIG,
1453 			                            &data);
1454 			if (ret_val)
1455 				goto out;
1456 
1457 			data |= IGP01E1000_PSCFR_SMART_SPEED;
1458 			ret_val = phy->ops.write_reg(hw,
1459 			                             IGP01E1000_PHY_PORT_CONFIG,
1460 			                             data);
1461 			if (ret_val)
1462 				goto out;
1463 		} else if (phy->smart_speed == e1000_smart_speed_off) {
1464 			ret_val = phy->ops.read_reg(hw,
1465 			                            IGP01E1000_PHY_PORT_CONFIG,
1466 			                            &data);
1467 			if (ret_val)
1468 				goto out;
1469 
1470 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1471 			ret_val = phy->ops.write_reg(hw,
1472 			                             IGP01E1000_PHY_PORT_CONFIG,
1473 			                             data);
1474 			if (ret_val)
1475 				goto out;
1476 		}
1477 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
1478 	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1479 	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1480 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
1481 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
1482 
1483 		if (phy->type != e1000_phy_igp_3)
1484 			goto out;
1485 
1486 		/*
1487 		 * Call gig speed drop workaround on LPLU before accessing
1488 		 * any PHY registers
1489 		 */
1490 		if (hw->mac.type == e1000_ich8lan)
1491 			e1000_gig_downshift_workaround_ich8lan(hw);
1492 
1493 		/* When LPLU is enabled, we should disable SmartSpeed */
1494 		ret_val = phy->ops.read_reg(hw,
1495 		                            IGP01E1000_PHY_PORT_CONFIG,
1496 		                            &data);
1497 		if (ret_val)
1498 			goto out;
1499 
1500 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1501 		ret_val = phy->ops.write_reg(hw,
1502 		                             IGP01E1000_PHY_PORT_CONFIG,
1503 		                             data);
1504 	}
1505 
1506 out:
1507 	return ret_val;
1508 }
1509 
1510 /**
1511  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
1512  *  @hw: pointer to the HW structure
1513  *  @bank:  pointer to the variable that returns the active bank
1514  *
1515  *  Reads signature byte from the NVM using the flash access registers.
1516  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
1517  **/
1518 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
1519 {
1520 	u32 eecd;
1521 	struct e1000_nvm_info *nvm = &hw->nvm;
1522 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
1523 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
1524 	u8 sig_byte = 0;
1525 	s32 ret_val = E1000_SUCCESS;
1526 
1527 	switch (hw->mac.type) {
1528 	case e1000_ich8lan:
1529 	case e1000_ich9lan:
1530 		eecd = E1000_READ_REG(hw, E1000_EECD);
1531 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
1532 		    E1000_EECD_SEC1VAL_VALID_MASK) {
1533 			if (eecd & E1000_EECD_SEC1VAL)
1534 				*bank = 1;
1535 			else
1536 				*bank = 0;
1537 
1538 			goto out;
1539 		}
1540 		DEBUGOUT("Unable to determine valid NVM bank via EEC - "
1541 		         "reading flash signature\n");
1542 		/* fall-thru */
1543 	default:
1544 		/* set bank to 0 in case flash read fails */
1545 		*bank = 0;
1546 
1547 		/* Check bank 0 */
1548 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
1549 		                                        &sig_byte);
1550 		if (ret_val)
1551 			goto out;
1552 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
1553 		    E1000_ICH_NVM_SIG_VALUE) {
1554 			*bank = 0;
1555 			goto out;
1556 		}
1557 
1558 		/* Check bank 1 */
1559 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
1560 		                                        bank1_offset,
1561 		                                        &sig_byte);
1562 		if (ret_val)
1563 			goto out;
1564 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
1565 		    E1000_ICH_NVM_SIG_VALUE) {
1566 			*bank = 1;
1567 			goto out;
1568 		}
1569 
1570 		DEBUGOUT("ERROR: No valid NVM bank present\n");
1571 		ret_val = -E1000_ERR_NVM;
1572 		break;
1573 	}
1574 out:
1575 	return ret_val;
1576 }
1577 
1578 /**
1579  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
1580  *  @hw: pointer to the HW structure
1581  *  @offset: The offset (in bytes) of the word(s) to read.
1582  *  @words: Size of data to read in words
1583  *  @data: Pointer to the word(s) to read at offset.
1584  *
1585  *  Reads a word(s) from the NVM using the flash access registers.
1586  **/
1587 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1588                                   u16 *data)
1589 {
1590 	struct e1000_nvm_info *nvm = &hw->nvm;
1591 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1592 	u32 act_offset;
1593 	s32 ret_val = E1000_SUCCESS;
1594 	u32 bank = 0;
1595 	u16 i, word;
1596 
1597 	DEBUGFUNC("e1000_read_nvm_ich8lan");
1598 
1599 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
1600 	    (words == 0)) {
1601 		DEBUGOUT("nvm parameter(s) out of bounds\n");
1602 		ret_val = -E1000_ERR_NVM;
1603 		goto out;
1604 	}
1605 
1606 	nvm->ops.acquire(hw);
1607 
1608 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1609 	if (ret_val != E1000_SUCCESS) {
1610 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
1611 		bank = 0;
1612 	}
1613 
1614 	act_offset = (bank) ? nvm->flash_bank_size : 0;
1615 	act_offset += offset;
1616 
1617 	ret_val = E1000_SUCCESS;
1618 	for (i = 0; i < words; i++) {
1619 		if ((dev_spec->shadow_ram) &&
1620 		    (dev_spec->shadow_ram[offset+i].modified)) {
1621 			data[i] = dev_spec->shadow_ram[offset+i].value;
1622 		} else {
1623 			ret_val = e1000_read_flash_word_ich8lan(hw,
1624 			                                        act_offset + i,
1625 			                                        &word);
1626 			if (ret_val)
1627 				break;
1628 			data[i] = word;
1629 		}
1630 	}
1631 
1632 	nvm->ops.release(hw);
1633 
1634 out:
1635 	if (ret_val)
1636 		DEBUGOUT1("NVM read error: %d\n", ret_val);
1637 
1638 	return ret_val;
1639 }
1640 
1641 /**
1642  *  e1000_flash_cycle_init_ich8lan - Initialize flash
1643  *  @hw: pointer to the HW structure
1644  *
1645  *  This function does initial flash setup so that a new read/write/erase cycle
1646  *  can be started.
1647  **/
1648 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
1649 {
1650 	union ich8_hws_flash_status hsfsts;
1651 	s32 ret_val = -E1000_ERR_NVM;
1652 	s32 i = 0;
1653 
1654 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
1655 
1656 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
1657 
1658 	/* Check if the flash descriptor is valid */
1659 	if (hsfsts.hsf_status.fldesvalid == 0) {
1660 		DEBUGOUT("Flash descriptor invalid.  "
1661 		         "SW Sequencing must be used.");
1662 		goto out;
1663 	}
1664 
1665 	/* Clear FCERR and DAEL in hw status by writing 1 */
1666 	hsfsts.hsf_status.flcerr = 1;
1667 	hsfsts.hsf_status.dael = 1;
1668 
1669 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
1670 
1671 	/*
1672 	 * Either we should have a hardware SPI cycle in progress
1673 	 * bit to check against, in order to start a new cycle or
1674 	 * FDONE bit should be changed in the hardware so that it
1675 	 * is 1 after hardware reset, which can then be used as an
1676 	 * indication whether a cycle is in progress or has been
1677 	 * completed.
1678 	 */
1679 
1680 	if (hsfsts.hsf_status.flcinprog == 0) {
1681 		/*
1682 		 * There is no cycle running at present,
1683 		 * so we can start a cycle.
1684 		 * Begin by setting Flash Cycle Done.
1685 		 */
1686 		hsfsts.hsf_status.flcdone = 1;
1687 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
1688 		ret_val = E1000_SUCCESS;
1689 	} else {
1690 		/*
1691 		 * Otherwise poll for sometime so the current
1692 		 * cycle has a chance to end before giving up.
1693 		 */
1694 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
1695 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
1696 			                                      ICH_FLASH_HSFSTS);
1697 			if (hsfsts.hsf_status.flcinprog == 0) {
1698 				ret_val = E1000_SUCCESS;
1699 				break;
1700 			}
1701 			usec_delay(1);
1702 		}
1703 		if (ret_val == E1000_SUCCESS) {
1704 			/*
1705 			 * Successful in waiting for previous cycle to timeout,
1706 			 * now set the Flash Cycle Done.
1707 			 */
1708 			hsfsts.hsf_status.flcdone = 1;
1709 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
1710 			                        hsfsts.regval);
1711 		} else {
1712 			DEBUGOUT("Flash controller busy, cannot get access");
1713 		}
1714 	}
1715 
1716 out:
1717 	return ret_val;
1718 }
1719 
1720 /**
1721  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
1722  *  @hw: pointer to the HW structure
1723  *  @timeout: maximum time to wait for completion
1724  *
1725  *  This function starts a flash cycle and waits for its completion.
1726  **/
1727 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
1728 {
1729 	union ich8_hws_flash_ctrl hsflctl;
1730 	union ich8_hws_flash_status hsfsts;
1731 	s32 ret_val = -E1000_ERR_NVM;
1732 	u32 i = 0;
1733 
1734 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
1735 
1736 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
1737 	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
1738 	hsflctl.hsf_ctrl.flcgo = 1;
1739 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
1740 
1741 	/* wait till FDONE bit is set to 1 */
1742 	do {
1743 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
1744 		if (hsfsts.hsf_status.flcdone == 1)
1745 			break;
1746 		usec_delay(1);
1747 	} while (i++ < timeout);
1748 
1749 	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
1750 		ret_val = E1000_SUCCESS;
1751 
1752 	return ret_val;
1753 }
1754 
1755 /**
1756  *  e1000_read_flash_word_ich8lan - Read word from flash
1757  *  @hw: pointer to the HW structure
1758  *  @offset: offset to data location
1759  *  @data: pointer to the location for storing the data
1760  *
1761  *  Reads the flash word at offset into data.  Offset is converted
1762  *  to bytes before read.
1763  **/
1764 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
1765                                          u16 *data)
1766 {
1767 	s32 ret_val;
1768 
1769 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
1770 
1771 	if (!data) {
1772 		ret_val = -E1000_ERR_NVM;
1773 		goto out;
1774 	}
1775 
1776 	/* Must convert offset into bytes. */
1777 	offset <<= 1;
1778 
1779 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
1780 
1781 out:
1782 	return ret_val;
1783 }
1784 
1785 /**
1786  *  e1000_read_flash_byte_ich8lan - Read byte from flash
1787  *  @hw: pointer to the HW structure
1788  *  @offset: The offset of the byte to read.
1789  *  @data: Pointer to a byte to store the value read.
1790  *
1791  *  Reads a single byte from the NVM using the flash access registers.
1792  **/
1793 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
1794                                          u8 *data)
1795 {
1796 	s32 ret_val = E1000_SUCCESS;
1797 	u16 word = 0;
1798 
1799 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
1800 	if (ret_val)
1801 		goto out;
1802 
1803 	*data = (u8)word;
1804 
1805 out:
1806 	return ret_val;
1807 }
1808 
1809 /**
1810  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
1811  *  @hw: pointer to the HW structure
1812  *  @offset: The offset (in bytes) of the byte or word to read.
1813  *  @size: Size of data to read, 1=byte 2=word
1814  *  @data: Pointer to the word to store the value read.
1815  *
1816  *  Reads a byte or word from the NVM using the flash access registers.
1817  **/
1818 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1819                                          u8 size, u16 *data)
1820 {
1821 	union ich8_hws_flash_status hsfsts;
1822 	union ich8_hws_flash_ctrl hsflctl;
1823 	u32 flash_linear_addr;
1824 	u32 flash_data = 0;
1825 	s32 ret_val = -E1000_ERR_NVM;
1826 	u8 count = 0;
1827 
1828 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
1829 
1830 	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
1831 		goto out;
1832 
1833 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
1834 	                    hw->nvm.flash_base_addr;
1835 
1836 	do {
1837 		usec_delay(1);
1838 		/* Steps */
1839 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
1840 		if (ret_val != E1000_SUCCESS)
1841 			break;
1842 
1843 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
1844 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
1845 		hsflctl.hsf_ctrl.fldbcount = size - 1;
1846 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
1847 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
1848 
1849 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
1850 
1851 		ret_val = e1000_flash_cycle_ich8lan(hw,
1852 		                                ICH_FLASH_READ_COMMAND_TIMEOUT);
1853 
1854 		/*
1855 		 * Check if FCERR is set to 1, if set to 1, clear it
1856 		 * and try the whole sequence a few more times, else
1857 		 * read in (shift in) the Flash Data0, the order is
1858 		 * least significant byte first msb to lsb
1859 		 */
1860 		if (ret_val == E1000_SUCCESS) {
1861 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
1862 			if (size == 1)
1863 				*data = (u8)(flash_data & 0x000000FF);
1864 			else if (size == 2)
1865 				*data = (u16)(flash_data & 0x0000FFFF);
1866 			break;
1867 		} else {
1868 			/*
1869 			 * If we've gotten here, then things are probably
1870 			 * completely hosed, but if the error condition is
1871 			 * detected, it won't hurt to give it another try...
1872 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
1873 			 */
1874 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
1875 			                                      ICH_FLASH_HSFSTS);
1876 			if (hsfsts.hsf_status.flcerr == 1) {
1877 				/* Repeat for some time before giving up. */
1878 				continue;
1879 			} else if (hsfsts.hsf_status.flcdone == 0) {
1880 				DEBUGOUT("Timeout error - flash cycle "
1881 				         "did not complete.");
1882 				break;
1883 			}
1884 		}
1885 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
1886 
1887 out:
1888 	return ret_val;
1889 }
1890 
1891 /**
1892  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
1893  *  @hw: pointer to the HW structure
1894  *  @offset: The offset (in bytes) of the word(s) to write.
1895  *  @words: Size of data to write in words
1896  *  @data: Pointer to the word(s) to write at offset.
1897  *
1898  *  Writes a byte or word to the NVM using the flash access registers.
1899  **/
1900 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1901                                    u16 *data)
1902 {
1903 	struct e1000_nvm_info *nvm = &hw->nvm;
1904 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1905 	s32 ret_val = E1000_SUCCESS;
1906 	u16 i;
1907 
1908 	DEBUGFUNC("e1000_write_nvm_ich8lan");
1909 
1910 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
1911 	    (words == 0)) {
1912 		DEBUGOUT("nvm parameter(s) out of bounds\n");
1913 		ret_val = -E1000_ERR_NVM;
1914 		goto out;
1915 	}
1916 
1917 	nvm->ops.acquire(hw);
1918 
1919 	for (i = 0; i < words; i++) {
1920 		dev_spec->shadow_ram[offset+i].modified = TRUE;
1921 		dev_spec->shadow_ram[offset+i].value = data[i];
1922 	}
1923 
1924 	nvm->ops.release(hw);
1925 
1926 out:
1927 	return ret_val;
1928 }
1929 
1930 /**
1931  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
1932  *  @hw: pointer to the HW structure
1933  *
1934  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
1935  *  which writes the checksum to the shadow ram.  The changes in the shadow
1936  *  ram are then committed to the EEPROM by processing each bank at a time
1937  *  checking for the modified bit and writing only the pending changes.
1938  *  After a successful commit, the shadow ram is cleared and is ready for
1939  *  future writes.
1940  **/
1941 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1942 {
1943 	struct e1000_nvm_info *nvm = &hw->nvm;
1944 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1945 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
1946 	s32 ret_val;
1947 	u16 data;
1948 
1949 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
1950 
1951 	ret_val = e1000_update_nvm_checksum_generic(hw);
1952 	if (ret_val)
1953 		goto out;
1954 
1955 	if (nvm->type != e1000_nvm_flash_sw)
1956 		goto out;
1957 
1958 	nvm->ops.acquire(hw);
1959 
1960 	/*
1961 	 * We're writing to the opposite bank so if we're on bank 1,
1962 	 * write to bank 0 etc.  We also need to erase the segment that
1963 	 * is going to be written
1964 	 */
1965 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1966 	if (ret_val != E1000_SUCCESS) {
1967 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
1968 		bank = 0;
1969 	}
1970 
1971 	if (bank == 0) {
1972 		new_bank_offset = nvm->flash_bank_size;
1973 		old_bank_offset = 0;
1974 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
1975 		if (ret_val) {
1976 			nvm->ops.release(hw);
1977 			goto out;
1978 		}
1979 	} else {
1980 		old_bank_offset = nvm->flash_bank_size;
1981 		new_bank_offset = 0;
1982 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
1983 		if (ret_val) {
1984 			nvm->ops.release(hw);
1985 			goto out;
1986 		}
1987 	}
1988 
1989 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
1990 		/*
1991 		 * Determine whether to write the value stored
1992 		 * in the other NVM bank or a modified value stored
1993 		 * in the shadow RAM
1994 		 */
1995 		if (dev_spec->shadow_ram[i].modified) {
1996 			data = dev_spec->shadow_ram[i].value;
1997 		} else {
1998 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
1999 			                                        old_bank_offset,
2000 			                                        &data);
2001 			if (ret_val)
2002 				break;
2003 		}
2004 
2005 		/*
2006 		 * If the word is 0x13, then make sure the signature bits
2007 		 * (15:14) are 11b until the commit has completed.
2008 		 * This will allow us to write 10b which indicates the
2009 		 * signature is valid.  We want to do this after the write
2010 		 * has completed so that we don't mark the segment valid
2011 		 * while the write is still in progress
2012 		 */
2013 		if (i == E1000_ICH_NVM_SIG_WORD)
2014 			data |= E1000_ICH_NVM_SIG_MASK;
2015 
2016 		/* Convert offset to bytes. */
2017 		act_offset = (i + new_bank_offset) << 1;
2018 
2019 		usec_delay(100);
2020 		/* Write the bytes to the new bank. */
2021 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2022 		                                               act_offset,
2023 		                                               (u8)data);
2024 		if (ret_val)
2025 			break;
2026 
2027 		usec_delay(100);
2028 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2029 		                                          act_offset + 1,
2030 		                                          (u8)(data >> 8));
2031 		if (ret_val)
2032 			break;
2033 	}
2034 
2035 	/*
2036 	 * Don't bother writing the segment valid bits if sector
2037 	 * programming failed.
2038 	 */
2039 	if (ret_val) {
2040 		DEBUGOUT("Flash commit failed.\n");
2041 		nvm->ops.release(hw);
2042 		goto out;
2043 	}
2044 
2045 	/*
2046 	 * Finally validate the new segment by setting bit 15:14
2047 	 * to 10b in word 0x13 , this can be done without an
2048 	 * erase as well since these bits are 11 to start with
2049 	 * and we need to change bit 14 to 0b
2050 	 */
2051 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2052 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2053 	if (ret_val) {
2054 		nvm->ops.release(hw);
2055 		goto out;
2056 	}
2057 
2058 	data &= 0xBFFF;
2059 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2060 	                                               act_offset * 2 + 1,
2061 	                                               (u8)(data >> 8));
2062 	if (ret_val) {
2063 		nvm->ops.release(hw);
2064 		goto out;
2065 	}
2066 
2067 	/*
2068 	 * And invalidate the previously valid segment by setting
2069 	 * its signature word (0x13) high_byte to 0b. This can be
2070 	 * done without an erase because flash erase sets all bits
2071 	 * to 1's. We can write 1's to 0's without an erase
2072 	 */
2073 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2074 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2075 	if (ret_val) {
2076 		nvm->ops.release(hw);
2077 		goto out;
2078 	}
2079 
2080 	/* Great!  Everything worked, we can now clear the cached entries. */
2081 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2082 		dev_spec->shadow_ram[i].modified = FALSE;
2083 		dev_spec->shadow_ram[i].value = 0xFFFF;
2084 	}
2085 
2086 	nvm->ops.release(hw);
2087 
2088 	/*
2089 	 * Reload the EEPROM, or else modifications will not appear
2090 	 * until after the next adapter reset.
2091 	 */
2092 	nvm->ops.reload(hw);
2093 	msec_delay(10);
2094 
2095 out:
2096 	if (ret_val)
2097 		DEBUGOUT1("NVM update error: %d\n", ret_val);
2098 
2099 	return ret_val;
2100 }
2101 
2102 /**
2103  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2104  *  @hw: pointer to the HW structure
2105  *
2106  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2107  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
2108  *  calculated, in which case we need to calculate the checksum and set bit 6.
2109  **/
2110 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2111 {
2112 	s32 ret_val = E1000_SUCCESS;
2113 	u16 data;
2114 
2115 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2116 
2117 	/*
2118 	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
2119 	 * needs to be fixed.  This bit is an indication that the NVM
2120 	 * was prepared by OEM software and did not calculate the
2121 	 * checksum...a likely scenario.
2122 	 */
2123 	ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2124 	if (ret_val)
2125 		goto out;
2126 
2127 	if ((data & 0x40) == 0) {
2128 		data |= 0x40;
2129 		ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2130 		if (ret_val)
2131 			goto out;
2132 		ret_val = hw->nvm.ops.update(hw);
2133 		if (ret_val)
2134 			goto out;
2135 	}
2136 
2137 	ret_val = e1000_validate_nvm_checksum_generic(hw);
2138 
2139 out:
2140 	return ret_val;
2141 }
2142 
2143 /**
2144  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2145  *  @hw: pointer to the HW structure
2146  *  @offset: The offset (in bytes) of the byte/word to read.
2147  *  @size: Size of data to read, 1=byte 2=word
2148  *  @data: The byte(s) to write to the NVM.
2149  *
2150  *  Writes one/two bytes to the NVM using the flash access registers.
2151  **/
2152 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2153                                           u8 size, u16 data)
2154 {
2155 	union ich8_hws_flash_status hsfsts;
2156 	union ich8_hws_flash_ctrl hsflctl;
2157 	u32 flash_linear_addr;
2158 	u32 flash_data = 0;
2159 	s32 ret_val = -E1000_ERR_NVM;
2160 	u8 count = 0;
2161 
2162 	DEBUGFUNC("e1000_write_ich8_data");
2163 
2164 	if (size < 1 || size > 2 || data > size * 0xff ||
2165 	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
2166 		goto out;
2167 
2168 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2169 	                    hw->nvm.flash_base_addr;
2170 
2171 	do {
2172 		usec_delay(1);
2173 		/* Steps */
2174 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2175 		if (ret_val != E1000_SUCCESS)
2176 			break;
2177 
2178 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2179 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2180 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2181 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2182 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2183 
2184 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2185 
2186 		if (size == 1)
2187 			flash_data = (u32)data & 0x00FF;
2188 		else
2189 			flash_data = (u32)data;
2190 
2191 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2192 
2193 		/*
2194 		 * check if FCERR is set to 1 , if set to 1, clear it
2195 		 * and try the whole sequence a few more times else done
2196 		 */
2197 		ret_val = e1000_flash_cycle_ich8lan(hw,
2198 		                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2199 		if (ret_val == E1000_SUCCESS)
2200 			break;
2201 
2202 		/*
2203 		 * If we're here, then things are most likely
2204 		 * completely hosed, but if the error condition
2205 		 * is detected, it won't hurt to give it another
2206 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2207 		 */
2208 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2209 		if (hsfsts.hsf_status.flcerr == 1)
2210 			/* Repeat for some time before giving up. */
2211 			continue;
2212 		if (hsfsts.hsf_status.flcdone == 0) {
2213 			DEBUGOUT("Timeout error - flash cycle "
2214 				 "did not complete.");
2215 			break;
2216 		}
2217 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2218 
2219 out:
2220 	return ret_val;
2221 }
2222 
2223 /**
2224  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2225  *  @hw: pointer to the HW structure
2226  *  @offset: The index of the byte to read.
2227  *  @data: The byte to write to the NVM.
2228  *
2229  *  Writes a single byte to the NVM using the flash access registers.
2230  **/
2231 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2232                                           u8 data)
2233 {
2234 	u16 word = (u16)data;
2235 
2236 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2237 
2238 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2239 }
2240 
2241 /**
2242  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2243  *  @hw: pointer to the HW structure
2244  *  @offset: The offset of the byte to write.
2245  *  @byte: The byte to write to the NVM.
2246  *
2247  *  Writes a single byte to the NVM using the flash access registers.
2248  *  Goes through a retry algorithm before giving up.
2249  **/
2250 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2251                                                 u32 offset, u8 byte)
2252 {
2253 	s32 ret_val;
2254 	u16 program_retries;
2255 
2256 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2257 
2258 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2259 	if (ret_val == E1000_SUCCESS)
2260 		goto out;
2261 
2262 	for (program_retries = 0; program_retries < 100; program_retries++) {
2263 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2264 		usec_delay(100);
2265 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2266 		if (ret_val == E1000_SUCCESS)
2267 			break;
2268 	}
2269 	if (program_retries == 100) {
2270 		ret_val = -E1000_ERR_NVM;
2271 		goto out;
2272 	}
2273 
2274 out:
2275 	return ret_val;
2276 }
2277 
2278 /**
2279  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2280  *  @hw: pointer to the HW structure
2281  *  @bank: 0 for first bank, 1 for second bank, etc.
2282  *
2283  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2284  *  bank N is 4096 * N + flash_reg_addr.
2285  **/
2286 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2287 {
2288 	struct e1000_nvm_info *nvm = &hw->nvm;
2289 	union ich8_hws_flash_status hsfsts;
2290 	union ich8_hws_flash_ctrl hsflctl;
2291 	u32 flash_linear_addr;
2292 	/* bank size is in 16bit words - adjust to bytes */
2293 	u32 flash_bank_size = nvm->flash_bank_size * 2;
2294 	s32 ret_val = E1000_SUCCESS;
2295 	s32 count = 0;
2296 	s32 j, iteration, sector_size;
2297 
2298 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
2299 
2300 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2301 
2302 	/*
2303 	 * Determine HW Sector size: Read BERASE bits of hw flash status
2304 	 * register
2305 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2306 	 *     consecutive sectors.  The start index for the nth Hw sector
2307 	 *     can be calculated as = bank * 4096 + n * 256
2308 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2309 	 *     The start index for the nth Hw sector can be calculated
2310 	 *     as = bank * 4096
2311 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2312 	 *     (ich9 only, otherwise error condition)
2313 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2314 	 */
2315 	switch (hsfsts.hsf_status.berasesz) {
2316 	case 0:
2317 		/* Hw sector size 256 */
2318 		sector_size = ICH_FLASH_SEG_SIZE_256;
2319 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2320 		break;
2321 	case 1:
2322 		sector_size = ICH_FLASH_SEG_SIZE_4K;
2323 		iteration = 1;
2324 		break;
2325 	case 2:
2326 		sector_size = ICH_FLASH_SEG_SIZE_8K;
2327 		iteration = 1;
2328 		break;
2329 	case 3:
2330 		sector_size = ICH_FLASH_SEG_SIZE_64K;
2331 		iteration = 1;
2332 		break;
2333 	default:
2334 		ret_val = -E1000_ERR_NVM;
2335 		goto out;
2336 	}
2337 
2338 	/* Start with the base address, then add the sector offset. */
2339 	flash_linear_addr = hw->nvm.flash_base_addr;
2340 	flash_linear_addr += (bank) ? flash_bank_size : 0;
2341 
2342 	for (j = 0; j < iteration ; j++) {
2343 		do {
2344 			/* Steps */
2345 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
2346 			if (ret_val)
2347 				goto out;
2348 
2349 			/*
2350 			 * Write a value 11 (block Erase) in Flash
2351 			 * Cycle field in hw flash control
2352 			 */
2353 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
2354 			                                      ICH_FLASH_HSFCTL);
2355 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
2356 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
2357 			                        hsflctl.regval);
2358 
2359 			/*
2360 			 * Write the last 24 bits of an index within the
2361 			 * block into Flash Linear address field in Flash
2362 			 * Address.
2363 			 */
2364 			flash_linear_addr += (j * sector_size);
2365 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
2366 			                      flash_linear_addr);
2367 
2368 			ret_val = e1000_flash_cycle_ich8lan(hw,
2369 			                       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
2370 			if (ret_val == E1000_SUCCESS)
2371 				break;
2372 
2373 			/*
2374 			 * Check if FCERR is set to 1.  If 1,
2375 			 * clear it and try the whole sequence
2376 			 * a few more times else Done
2377 			 */
2378 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2379 						      ICH_FLASH_HSFSTS);
2380 			if (hsfsts.hsf_status.flcerr == 1)
2381 				/* repeat for some time before giving up */
2382 				continue;
2383 			else if (hsfsts.hsf_status.flcdone == 0)
2384 				goto out;
2385 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
2386 	}
2387 
2388 out:
2389 	return ret_val;
2390 }
2391 
2392 /**
2393  *  e1000_valid_led_default_ich8lan - Set the default LED settings
2394  *  @hw: pointer to the HW structure
2395  *  @data: Pointer to the LED settings
2396  *
2397  *  Reads the LED default settings from the NVM to data.  If the NVM LED
2398  *  settings is all 0's or F's, set the LED default to a valid LED default
2399  *  setting.
2400  **/
2401 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
2402 {
2403 	s32 ret_val;
2404 
2405 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
2406 
2407 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
2408 	if (ret_val) {
2409 		DEBUGOUT("NVM Read Error\n");
2410 		goto out;
2411 	}
2412 
2413 	if (*data == ID_LED_RESERVED_0000 ||
2414 	    *data == ID_LED_RESERVED_FFFF)
2415 		*data = ID_LED_DEFAULT_ICH8LAN;
2416 
2417 out:
2418 	return ret_val;
2419 }
2420 
2421 /**
2422  *  e1000_id_led_init_pchlan - store LED configurations
2423  *  @hw: pointer to the HW structure
2424  *
2425  *  PCH does not control LEDs via the LEDCTL register, rather it uses
2426  *  the PHY LED configuration register.
2427  *
2428  *  PCH also does not have an "always on" or "always off" mode which
2429  *  complicates the ID feature.  Instead of using the "on" mode to indicate
2430  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
2431  *  use "link_up" mode.  The LEDs will still ID on request if there is no
2432  *  link based on logic in e1000_led_[on|off]_pchlan().
2433  **/
2434 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
2435 {
2436 	struct e1000_mac_info *mac = &hw->mac;
2437 	s32 ret_val;
2438 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
2439 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
2440 	u16 data, i, temp, shift;
2441 
2442 	DEBUGFUNC("e1000_id_led_init_pchlan");
2443 
2444 	/* Get default ID LED modes */
2445 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
2446 	if (ret_val)
2447 		goto out;
2448 
2449 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
2450 	mac->ledctl_mode1 = mac->ledctl_default;
2451 	mac->ledctl_mode2 = mac->ledctl_default;
2452 
2453 	for (i = 0; i < 4; i++) {
2454 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
2455 		shift = (i * 5);
2456 		switch (temp) {
2457 		case ID_LED_ON1_DEF2:
2458 		case ID_LED_ON1_ON2:
2459 		case ID_LED_ON1_OFF2:
2460 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
2461 			mac->ledctl_mode1 |= (ledctl_on << shift);
2462 			break;
2463 		case ID_LED_OFF1_DEF2:
2464 		case ID_LED_OFF1_ON2:
2465 		case ID_LED_OFF1_OFF2:
2466 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
2467 			mac->ledctl_mode1 |= (ledctl_off << shift);
2468 			break;
2469 		default:
2470 			/* Do nothing */
2471 			break;
2472 		}
2473 		switch (temp) {
2474 		case ID_LED_DEF1_ON2:
2475 		case ID_LED_ON1_ON2:
2476 		case ID_LED_OFF1_ON2:
2477 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
2478 			mac->ledctl_mode2 |= (ledctl_on << shift);
2479 			break;
2480 		case ID_LED_DEF1_OFF2:
2481 		case ID_LED_ON1_OFF2:
2482 		case ID_LED_OFF1_OFF2:
2483 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
2484 			mac->ledctl_mode2 |= (ledctl_off << shift);
2485 			break;
2486 		default:
2487 			/* Do nothing */
2488 			break;
2489 		}
2490 	}
2491 
2492 out:
2493 	return ret_val;
2494 }
2495 
2496 /**
2497  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
2498  *  @hw: pointer to the HW structure
2499  *
2500  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
2501  *  register, so the the bus width is hard coded.
2502  **/
2503 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
2504 {
2505 	struct e1000_bus_info *bus = &hw->bus;
2506 	s32 ret_val;
2507 
2508 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
2509 
2510 	ret_val = e1000_get_bus_info_pcie_generic(hw);
2511 
2512 	/*
2513 	 * ICH devices are "PCI Express"-ish.  They have
2514 	 * a configuration space, but do not contain
2515 	 * PCI Express Capability registers, so bus width
2516 	 * must be hardcoded.
2517 	 */
2518 	if (bus->width == e1000_bus_width_unknown)
2519 		bus->width = e1000_bus_width_pcie_x1;
2520 
2521 	return ret_val;
2522 }
2523 
2524 /**
2525  *  e1000_reset_hw_ich8lan - Reset the hardware
2526  *  @hw: pointer to the HW structure
2527  *
2528  *  Does a full reset of the hardware which includes a reset of the PHY and
2529  *  MAC.
2530  **/
2531 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2532 {
2533 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2534 	u16 reg;
2535 	u32 ctrl, icr, kab;
2536 	s32 ret_val;
2537 
2538 	DEBUGFUNC("e1000_reset_hw_ich8lan");
2539 
2540 	/*
2541 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
2542 	 * on the last TLP read/write transaction when MAC is reset.
2543 	 */
2544 	ret_val = e1000_disable_pcie_master_generic(hw);
2545 	if (ret_val)
2546 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
2547 
2548 	DEBUGOUT("Masking off all interrupts\n");
2549 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
2550 
2551 	/*
2552 	 * Disable the Transmit and Receive units.  Then delay to allow
2553 	 * any pending transactions to complete before we hit the MAC
2554 	 * with the global reset.
2555 	 */
2556 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
2557 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
2558 	E1000_WRITE_FLUSH(hw);
2559 
2560 	msec_delay(10);
2561 
2562 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
2563 	if (hw->mac.type == e1000_ich8lan) {
2564 		/* Set Tx and Rx buffer allocation to 8k apiece. */
2565 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
2566 		/* Set Packet Buffer Size to 16k. */
2567 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
2568 	}
2569 
2570 	if (hw->mac.type == e1000_pchlan) {
2571 		/* Save the NVM K1 bit setting*/
2572 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
2573 		if (ret_val)
2574 			return ret_val;
2575 
2576 		if (reg & E1000_NVM_K1_ENABLE)
2577 			dev_spec->nvm_k1_enabled = TRUE;
2578 		else
2579 			dev_spec->nvm_k1_enabled = FALSE;
2580 	}
2581 
2582 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
2583 
2584 	if (!hw->phy.ops.check_reset_block(hw) && !hw->phy.reset_disable) {
2585 		/* Clear PHY Reset Asserted bit */
2586 		if (hw->mac.type >= e1000_pchlan) {
2587 			u32 status = E1000_READ_REG(hw, E1000_STATUS);
2588 			E1000_WRITE_REG(hw, E1000_STATUS, status &
2589 			                ~E1000_STATUS_PHYRA);
2590 		}
2591 
2592 		/*
2593 		 * PHY HW reset requires MAC CORE reset at the same
2594 		 * time to make sure the interface between MAC and the
2595 		 * external PHY is reset.
2596 		 */
2597 		ctrl |= E1000_CTRL_PHY_RST;
2598 	}
2599 	ret_val = e1000_acquire_swflag_ich8lan(hw);
2600 	DEBUGOUT("Issuing a global reset to ich8lan\n");
2601 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
2602 	msec_delay(20);
2603 
2604 	if (!ret_val)
2605 		e1000_release_swflag_ich8lan(hw);
2606 
2607 	if (ctrl & E1000_CTRL_PHY_RST)
2608 		ret_val = hw->phy.ops.get_cfg_done(hw);
2609 
2610 	if (hw->mac.type >= e1000_ich10lan) {
2611 		e1000_lan_init_done_ich8lan(hw);
2612 	} else {
2613 		ret_val = e1000_get_auto_rd_done_generic(hw);
2614 		if (ret_val) {
2615 			/*
2616 			 * When auto config read does not complete, do not
2617 			 * return with an error. This can happen in situations
2618 			 * where there is no eeprom and prevents getting link.
2619 			 */
2620 			DEBUGOUT("Auto Read Done did not complete\n");
2621 		}
2622 	}
2623 	/* Dummy read to clear the phy wakeup bit after lcd reset */
2624 	if (hw->mac.type == e1000_pchlan)
2625 		hw->phy.ops.read_reg(hw, BM_WUC, &reg);
2626 
2627 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
2628 	if (ret_val)
2629 		goto out;
2630 
2631 	if (hw->mac.type == e1000_pchlan) {
2632 		ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
2633 		if (ret_val)
2634 			goto out;
2635 	}
2636 	/*
2637 	 * For PCH, this write will make sure that any noise
2638 	 * will be detected as a CRC error and be dropped rather than show up
2639 	 * as a bad packet to the DMA engine.
2640 	 */
2641 	if (hw->mac.type == e1000_pchlan)
2642 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
2643 
2644 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
2645 	icr = E1000_READ_REG(hw, E1000_ICR);
2646 
2647 	kab = E1000_READ_REG(hw, E1000_KABGTXD);
2648 	kab |= E1000_KABGTXD_BGSQLBIAS;
2649 	E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
2650 
2651 	if (hw->mac.type == e1000_pchlan)
2652 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2653 
2654 out:
2655 	return ret_val;
2656 }
2657 
2658 /**
2659  *  e1000_init_hw_ich8lan - Initialize the hardware
2660  *  @hw: pointer to the HW structure
2661  *
2662  *  Prepares the hardware for transmit and receive by doing the following:
2663  *   - initialize hardware bits
2664  *   - initialize LED identification
2665  *   - setup receive address registers
2666  *   - setup flow control
2667  *   - setup transmit descriptors
2668  *   - clear statistics
2669  **/
2670 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
2671 {
2672 	struct e1000_mac_info *mac = &hw->mac;
2673 	u32 ctrl_ext, txdctl, snoop;
2674 	s32 ret_val;
2675 	u16 i;
2676 
2677 	DEBUGFUNC("e1000_init_hw_ich8lan");
2678 
2679 	e1000_initialize_hw_bits_ich8lan(hw);
2680 
2681 	/* Initialize identification LED */
2682 	ret_val = mac->ops.id_led_init(hw);
2683 	if (ret_val)
2684 		DEBUGOUT("Error initializing identification LED\n");
2685 		/* This is not fatal and we should not stop init due to this */
2686 
2687 	/* Setup the receive address. */
2688 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
2689 
2690 	/* Zero out the Multicast HASH table */
2691 	DEBUGOUT("Zeroing the MTA\n");
2692 	for (i = 0; i < mac->mta_reg_count; i++)
2693 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
2694 
2695 	/*
2696 	 * The 82578 Rx buffer will stall if wakeup is enabled in host and
2697 	 * the ME.  Reading the BM_WUC register will clear the host wakeup bit.
2698 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
2699 	 */
2700 	if (hw->phy.type == e1000_phy_82578) {
2701 		hw->phy.ops.read_reg(hw, BM_WUC, &i);
2702 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
2703 		if (ret_val)
2704 			return ret_val;
2705 	}
2706 
2707 	/* Setup link and flow control */
2708 	ret_val = mac->ops.setup_link(hw);
2709 
2710 	/* Set the transmit descriptor write-back policy for both queues */
2711 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
2712 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
2713 		 E1000_TXDCTL_FULL_TX_DESC_WB;
2714 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
2715 	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
2716 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
2717 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
2718 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
2719 		 E1000_TXDCTL_FULL_TX_DESC_WB;
2720 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
2721 	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
2722 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
2723 
2724 	/*
2725 	 * ICH8 has opposite polarity of no_snoop bits.
2726 	 * By default, we should use snoop behavior.
2727 	 */
2728 	if (mac->type == e1000_ich8lan)
2729 		snoop = PCIE_ICH8_SNOOP_ALL;
2730 	else
2731 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
2732 	e1000_set_pcie_no_snoop_generic(hw, snoop);
2733 
2734 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2735 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
2736 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2737 
2738 	/*
2739 	 * Clear all of the statistics registers (clear on read).  It is
2740 	 * important that we do this after we have tried to establish link
2741 	 * because the symbol error count will increment wildly if there
2742 	 * is no link.
2743 	 */
2744 	e1000_clear_hw_cntrs_ich8lan(hw);
2745 
2746 	return ret_val;
2747 }
2748 /**
2749  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
2750  *  @hw: pointer to the HW structure
2751  *
2752  *  Sets/Clears required hardware bits necessary for correctly setting up the
2753  *  hardware for transmit and receive.
2754  **/
2755 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
2756 {
2757 	u32 reg;
2758 
2759 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
2760 
2761 	/* Extended Device Control */
2762 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2763 	reg |= (1 << 22);
2764 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
2765 	if (hw->mac.type >= e1000_pchlan)
2766 		reg |= E1000_CTRL_EXT_PHYPDEN;
2767 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2768 
2769 	/* Transmit Descriptor Control 0 */
2770 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
2771 	reg |= (1 << 22);
2772 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
2773 
2774 	/* Transmit Descriptor Control 1 */
2775 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
2776 	reg |= (1 << 22);
2777 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
2778 
2779 	/* Transmit Arbitration Control 0 */
2780 	reg = E1000_READ_REG(hw, E1000_TARC(0));
2781 	if (hw->mac.type == e1000_ich8lan)
2782 		reg |= (1 << 28) | (1 << 29);
2783 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
2784 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
2785 
2786 	/* Transmit Arbitration Control 1 */
2787 	reg = E1000_READ_REG(hw, E1000_TARC(1));
2788 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
2789 		reg &= ~(1 << 28);
2790 	else
2791 		reg |= (1 << 28);
2792 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
2793 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
2794 
2795 	/* Device Status */
2796 	if (hw->mac.type == e1000_ich8lan) {
2797 		reg = E1000_READ_REG(hw, E1000_STATUS);
2798 		reg &= ~(1 << 31);
2799 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
2800 	}
2801 
2802 	return;
2803 }
2804 
2805 /**
2806  *  e1000_setup_link_ich8lan - Setup flow control and link settings
2807  *  @hw: pointer to the HW structure
2808  *
2809  *  Determines which flow control settings to use, then configures flow
2810  *  control.  Calls the appropriate media-specific link configuration
2811  *  function.  Assuming the adapter has a valid link partner, a valid link
2812  *  should be established.  Assumes the hardware has previously been reset
2813  *  and the transmitter and receiver are not enabled.
2814  **/
2815 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
2816 {
2817 	s32 ret_val = E1000_SUCCESS;
2818 
2819 	DEBUGFUNC("e1000_setup_link_ich8lan");
2820 
2821 	if (hw->phy.ops.check_reset_block(hw))
2822 		goto out;
2823 
2824 	/*
2825 	 * ICH parts do not have a word in the NVM to determine
2826 	 * the default flow control setting, so we explicitly
2827 	 * set it to full.
2828 	 */
2829 	if (hw->fc.requested_mode == e1000_fc_default)
2830 		hw->fc.requested_mode = e1000_fc_full;
2831 
2832 	/*
2833 	 * Save off the requested flow control mode for use later.  Depending
2834 	 * on the link partner's capabilities, we may or may not use this mode.
2835 	 */
2836 	hw->fc.current_mode = hw->fc.requested_mode;
2837 
2838 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
2839 		hw->fc.current_mode);
2840 
2841 	/* Continue to configure the copper link. */
2842 	ret_val = hw->mac.ops.setup_physical_interface(hw);
2843 	if (ret_val)
2844 		goto out;
2845 
2846 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
2847 	if ((hw->phy.type == e1000_phy_82578) ||
2848 	    (hw->phy.type == e1000_phy_82577)) {
2849 		ret_val = hw->phy.ops.write_reg(hw,
2850 		                             PHY_REG(BM_PORT_CTRL_PAGE, 27),
2851 		                             hw->fc.pause_time);
2852 		if (ret_val)
2853 			goto out;
2854 	}
2855 
2856 	ret_val = e1000_set_fc_watermarks_generic(hw);
2857 
2858 out:
2859 	return ret_val;
2860 }
2861 
2862 /**
2863  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
2864  *  @hw: pointer to the HW structure
2865  *
2866  *  Configures the kumeran interface to the PHY to wait the appropriate time
2867  *  when polling the PHY, then call the generic setup_copper_link to finish
2868  *  configuring the copper link.
2869  **/
2870 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
2871 {
2872 	u32 ctrl;
2873 	s32 ret_val;
2874 	u16 reg_data;
2875 
2876 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
2877 
2878 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
2879 	ctrl |= E1000_CTRL_SLU;
2880 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
2881 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
2882 
2883 	/*
2884 	 * Set the mac to wait the maximum time between each iteration
2885 	 * and increase the max iterations when polling the phy;
2886 	 * this fixes erroneous timeouts at 10Mbps.
2887 	 */
2888 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
2889 	                                       0xFFFF);
2890 	if (ret_val)
2891 		goto out;
2892 	ret_val = e1000_read_kmrn_reg_generic(hw,
2893 	                                      E1000_KMRNCTRLSTA_INBAND_PARAM,
2894 	                                      &reg_data);
2895 	if (ret_val)
2896 		goto out;
2897 	reg_data |= 0x3F;
2898 	ret_val = e1000_write_kmrn_reg_generic(hw,
2899 	                                       E1000_KMRNCTRLSTA_INBAND_PARAM,
2900 	                                       reg_data);
2901 	if (ret_val)
2902 		goto out;
2903 
2904 	switch (hw->phy.type) {
2905 	case e1000_phy_igp_3:
2906 		ret_val = e1000_copper_link_setup_igp(hw);
2907 		if (ret_val)
2908 			goto out;
2909 		break;
2910 	case e1000_phy_bm:
2911 	case e1000_phy_82578:
2912 		ret_val = e1000_copper_link_setup_m88(hw);
2913 		if (ret_val)
2914 			goto out;
2915 		break;
2916 	case e1000_phy_82577:
2917 		ret_val = e1000_copper_link_setup_82577(hw);
2918 		if (ret_val)
2919 			goto out;
2920 		break;
2921 	case e1000_phy_ife:
2922 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
2923 		                               &reg_data);
2924 		if (ret_val)
2925 			goto out;
2926 
2927 		reg_data &= ~IFE_PMC_AUTO_MDIX;
2928 
2929 		switch (hw->phy.mdix) {
2930 		case 1:
2931 			reg_data &= ~IFE_PMC_FORCE_MDIX;
2932 			break;
2933 		case 2:
2934 			reg_data |= IFE_PMC_FORCE_MDIX;
2935 			break;
2936 		case 0:
2937 		default:
2938 			reg_data |= IFE_PMC_AUTO_MDIX;
2939 			break;
2940 		}
2941 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
2942 		                                reg_data);
2943 		if (ret_val)
2944 			goto out;
2945 		break;
2946 	default:
2947 		break;
2948 	}
2949 	ret_val = e1000_setup_copper_link_generic(hw);
2950 
2951 out:
2952 	return ret_val;
2953 }
2954 
2955 /**
2956  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
2957  *  @hw: pointer to the HW structure
2958  *  @speed: pointer to store current link speed
2959  *  @duplex: pointer to store the current link duplex
2960  *
2961  *  Calls the generic get_speed_and_duplex to retrieve the current link
2962  *  information and then calls the Kumeran lock loss workaround for links at
2963  *  gigabit speeds.
2964  **/
2965 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
2966                                           u16 *duplex)
2967 {
2968 	s32 ret_val;
2969 
2970 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
2971 
2972 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
2973 	if (ret_val)
2974 		goto out;
2975 
2976 	if ((hw->mac.type == e1000_ich8lan) &&
2977 	    (hw->phy.type == e1000_phy_igp_3) &&
2978 	    (*speed == SPEED_1000)) {
2979 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
2980 	}
2981 
2982 out:
2983 	return ret_val;
2984 }
2985 
2986 /**
2987  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
2988  *  @hw: pointer to the HW structure
2989  *
2990  *  Work-around for 82566 Kumeran PCS lock loss:
2991  *  On link status change (i.e. PCI reset, speed change) and link is up and
2992  *  speed is gigabit-
2993  *    0) if workaround is optionally disabled do nothing
2994  *    1) wait 1ms for Kumeran link to come up
2995  *    2) check Kumeran Diagnostic register PCS lock loss bit
2996  *    3) if not set the link is locked (all is good), otherwise...
2997  *    4) reset the PHY
2998  *    5) repeat up to 10 times
2999  *  Note: this is only called for IGP3 copper when speed is 1gb.
3000  **/
3001 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3002 {
3003 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3004 	u32 phy_ctrl;
3005 	s32 ret_val = E1000_SUCCESS;
3006 	u16 i, data;
3007 	bool link;
3008 
3009 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3010 
3011 	if (!(dev_spec->kmrn_lock_loss_workaround_enabled))
3012 		goto out;
3013 
3014 	/*
3015 	 * Make sure link is up before proceeding.  If not just return.
3016 	 * Attempting this while link is negotiating fouled up link
3017 	 * stability
3018 	 */
3019 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3020 	if (!link) {
3021 		ret_val = E1000_SUCCESS;
3022 		goto out;
3023 	}
3024 
3025 	for (i = 0; i < 10; i++) {
3026 		/* read once to clear */
3027 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3028 		if (ret_val)
3029 			goto out;
3030 		/* and again to get new status */
3031 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3032 		if (ret_val)
3033 			goto out;
3034 
3035 		/* check for PCS lock */
3036 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3037 			ret_val = E1000_SUCCESS;
3038 			goto out;
3039 		}
3040 
3041 		/* Issue PHY reset */
3042 		hw->phy.ops.reset(hw);
3043 		msec_delay_irq(5);
3044 	}
3045 	/* Disable GigE link negotiation */
3046 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3047 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3048 	             E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3049 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3050 
3051 	/*
3052 	 * Call gig speed drop workaround on Gig disable before accessing
3053 	 * any PHY registers
3054 	 */
3055 	e1000_gig_downshift_workaround_ich8lan(hw);
3056 
3057 	/* unable to acquire PCS lock */
3058 	ret_val = -E1000_ERR_PHY;
3059 
3060 out:
3061 	return ret_val;
3062 }
3063 
3064 /**
3065  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3066  *  @hw: pointer to the HW structure
3067  *  @state: boolean value used to set the current Kumeran workaround state
3068  *
3069  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
3070  *  /disabled - FALSE).
3071  **/
3072 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3073                                                  bool state)
3074 {
3075 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3076 
3077 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3078 
3079 	if (hw->mac.type != e1000_ich8lan) {
3080 		DEBUGOUT("Workaround applies to ICH8 only.\n");
3081 		return;
3082 	}
3083 
3084 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
3085 
3086 	return;
3087 }
3088 
3089 /**
3090  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3091  *  @hw: pointer to the HW structure
3092  *
3093  *  Workaround for 82566 power-down on D3 entry:
3094  *    1) disable gigabit link
3095  *    2) write VR power-down enable
3096  *    3) read it back
3097  *  Continue if successful, else issue LCD reset and repeat
3098  **/
3099 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3100 {
3101 	u32 reg;
3102 	u16 data;
3103 	u8  retry = 0;
3104 
3105 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3106 
3107 	if (hw->phy.type != e1000_phy_igp_3)
3108 		goto out;
3109 
3110 	/* Try the workaround twice (if needed) */
3111 	do {
3112 		/* Disable link */
3113 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3114 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3115 		        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3116 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3117 
3118 		/*
3119 		 * Call gig speed drop workaround on Gig disable before
3120 		 * accessing any PHY registers
3121 		 */
3122 		if (hw->mac.type == e1000_ich8lan)
3123 			e1000_gig_downshift_workaround_ich8lan(hw);
3124 
3125 		/* Write VR power-down enable */
3126 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3127 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3128 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3129 		                   data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3130 
3131 		/* Read it back and test */
3132 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3133 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3134 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3135 			break;
3136 
3137 		/* Issue PHY reset and repeat at most one more time */
3138 		reg = E1000_READ_REG(hw, E1000_CTRL);
3139 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3140 		retry++;
3141 	} while (retry);
3142 
3143 out:
3144 	return;
3145 }
3146 
3147 /**
3148  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3149  *  @hw: pointer to the HW structure
3150  *
3151  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3152  *  LPLU, Gig disable, MDIC PHY reset):
3153  *    1) Set Kumeran Near-end loopback
3154  *    2) Clear Kumeran Near-end loopback
3155  *  Should only be called for ICH8[m] devices with IGP_3 Phy.
3156  **/
3157 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3158 {
3159 	s32 ret_val = E1000_SUCCESS;
3160 	u16 reg_data;
3161 
3162 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3163 
3164 	if ((hw->mac.type != e1000_ich8lan) ||
3165 	    (hw->phy.type != e1000_phy_igp_3))
3166 		goto out;
3167 
3168 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3169 	                                      &reg_data);
3170 	if (ret_val)
3171 		goto out;
3172 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3173 	ret_val = e1000_write_kmrn_reg_generic(hw,
3174 	                                       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3175 	                                       reg_data);
3176 	if (ret_val)
3177 		goto out;
3178 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3179 	ret_val = e1000_write_kmrn_reg_generic(hw,
3180 	                                       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3181 	                                       reg_data);
3182 out:
3183 	return;
3184 }
3185 
3186 /**
3187  *  e1000_disable_gig_wol_ich8lan - disable gig during WoL
3188  *  @hw: pointer to the HW structure
3189  *
3190  *  During S0 to Sx transition, it is possible the link remains at gig
3191  *  instead of negotiating to a lower speed.  Before going to Sx, set
3192  *  'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3193  *  to a lower speed.
3194  *
3195  *  Should only be called for applicable parts.
3196  **/
3197 void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3198 {
3199 	u32 phy_ctrl;
3200 
3201 	switch (hw->mac.type) {
3202 	case e1000_ich8lan:
3203 	case e1000_ich9lan:
3204 	case e1000_ich10lan:
3205 	case e1000_pchlan:
3206 		phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3207 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
3208 		            E1000_PHY_CTRL_GBE_DISABLE;
3209 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3210 
3211 		if (hw->mac.type == e1000_pchlan)
3212 			e1000_phy_hw_reset_ich8lan(hw);
3213 	default:
3214 		break;
3215 	}
3216 
3217 	return;
3218 }
3219 
3220 /**
3221  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
3222  *  @hw: pointer to the HW structure
3223  *
3224  *  Return the LED back to the default configuration.
3225  **/
3226 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3227 {
3228 	s32 ret_val = E1000_SUCCESS;
3229 
3230 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
3231 
3232 	if (hw->phy.type == e1000_phy_ife)
3233 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3234 		                              0);
3235 	else
3236 		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
3237 
3238 	return ret_val;
3239 }
3240 
3241 /**
3242  *  e1000_led_on_ich8lan - Turn LEDs on
3243  *  @hw: pointer to the HW structure
3244  *
3245  *  Turn on the LEDs.
3246  **/
3247 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3248 {
3249 	s32 ret_val = E1000_SUCCESS;
3250 
3251 	DEBUGFUNC("e1000_led_on_ich8lan");
3252 
3253 	if (hw->phy.type == e1000_phy_ife)
3254 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3255 		                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3256 	else
3257 		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
3258 
3259 	return ret_val;
3260 }
3261 
3262 /**
3263  *  e1000_led_off_ich8lan - Turn LEDs off
3264  *  @hw: pointer to the HW structure
3265  *
3266  *  Turn off the LEDs.
3267  **/
3268 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3269 {
3270 	s32 ret_val = E1000_SUCCESS;
3271 
3272 	DEBUGFUNC("e1000_led_off_ich8lan");
3273 
3274 	if (hw->phy.type == e1000_phy_ife)
3275 		ret_val = hw->phy.ops.write_reg(hw,
3276 		               IFE_PHY_SPECIAL_CONTROL_LED,
3277 		               (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
3278 	else
3279 		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
3280 
3281 	return ret_val;
3282 }
3283 
3284 /**
3285  *  e1000_setup_led_pchlan - Configures SW controllable LED
3286  *  @hw: pointer to the HW structure
3287  *
3288  *  This prepares the SW controllable LED for use.
3289  **/
3290 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3291 {
3292 	DEBUGFUNC("e1000_setup_led_pchlan");
3293 
3294 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3295 					(u16)hw->mac.ledctl_mode1);
3296 }
3297 
3298 /**
3299  *  e1000_cleanup_led_pchlan - Restore the default LED operation
3300  *  @hw: pointer to the HW structure
3301  *
3302  *  Return the LED back to the default configuration.
3303  **/
3304 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3305 {
3306 	DEBUGFUNC("e1000_cleanup_led_pchlan");
3307 
3308 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3309 					(u16)hw->mac.ledctl_default);
3310 }
3311 
3312 /**
3313  *  e1000_led_on_pchlan - Turn LEDs on
3314  *  @hw: pointer to the HW structure
3315  *
3316  *  Turn on the LEDs.
3317  **/
3318 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3319 {
3320 	u16 data = (u16)hw->mac.ledctl_mode2;
3321 	u32 i, led;
3322 
3323 	DEBUGFUNC("e1000_led_on_pchlan");
3324 
3325 	/*
3326 	 * If no link, then turn LED on by setting the invert bit
3327 	 * for each LED that's mode is "link_up" in ledctl_mode2.
3328 	 */
3329 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
3330 		for (i = 0; i < 3; i++) {
3331 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3332 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
3333 			    E1000_LEDCTL_MODE_LINK_UP)
3334 				continue;
3335 			if (led & E1000_PHY_LED0_IVRT)
3336 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3337 			else
3338 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
3339 		}
3340 	}
3341 
3342 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
3343 }
3344 
3345 /**
3346  *  e1000_led_off_pchlan - Turn LEDs off
3347  *  @hw: pointer to the HW structure
3348  *
3349  *  Turn off the LEDs.
3350  **/
3351 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3352 {
3353 	u16 data = (u16)hw->mac.ledctl_mode1;
3354 	u32 i, led;
3355 
3356 	DEBUGFUNC("e1000_led_off_pchlan");
3357 
3358 	/*
3359 	 * If no link, then turn LED off by clearing the invert bit
3360 	 * for each LED that's mode is "link_up" in ledctl_mode1.
3361 	 */
3362 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
3363 		for (i = 0; i < 3; i++) {
3364 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3365 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
3366 			    E1000_LEDCTL_MODE_LINK_UP)
3367 				continue;
3368 			if (led & E1000_PHY_LED0_IVRT)
3369 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3370 			else
3371 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
3372 		}
3373 	}
3374 
3375 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
3376 }
3377 
3378 /**
3379  *  e1000_get_cfg_done_ich8lan - Read config done bit
3380  *  @hw: pointer to the HW structure
3381  *
3382  *  Read the management control register for the config done bit for
3383  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
3384  *  to read the config done bit, so an error is *ONLY* logged and returns
3385  *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
3386  *  would not be able to be reset or change link.
3387  **/
3388 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3389 {
3390 	s32 ret_val = E1000_SUCCESS;
3391 	u32 bank = 0;
3392 
3393 	if (hw->mac.type >= e1000_pchlan) {
3394 		u32 status = E1000_READ_REG(hw, E1000_STATUS);
3395 
3396 		if (status & E1000_STATUS_PHYRA)
3397 			E1000_WRITE_REG(hw, E1000_STATUS, status &
3398 			                ~E1000_STATUS_PHYRA);
3399 		else
3400 			DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
3401 	}
3402 
3403 	e1000_get_cfg_done_generic(hw);
3404 
3405 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
3406 	if (hw->mac.type <= e1000_ich9lan) {
3407 		if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
3408 		    (hw->phy.type == e1000_phy_igp_3)) {
3409 			e1000_phy_init_script_igp3(hw);
3410 		}
3411 	} else {
3412 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
3413 			/* Maybe we should do a basic PHY config */
3414 			DEBUGOUT("EEPROM not present\n");
3415 			ret_val = -E1000_ERR_CONFIG;
3416 		}
3417 	}
3418 
3419 	return ret_val;
3420 }
3421 
3422 /**
3423  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
3424  * @hw: pointer to the HW structure
3425  *
3426  * In the case of a PHY power down to save power, or to turn off link during a
3427  * driver unload, or wake on lan is not enabled, remove the link.
3428  **/
3429 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
3430 {
3431 	/* If the management interface is not enabled, then power down */
3432 	if (!(hw->mac.ops.check_mng_mode(hw) ||
3433 	      hw->phy.ops.check_reset_block(hw)))
3434 		e1000_power_down_phy_copper(hw);
3435 
3436 	return;
3437 }
3438 
3439 /**
3440  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
3441  *  @hw: pointer to the HW structure
3442  *
3443  *  Clears hardware counters specific to the silicon family and calls
3444  *  clear_hw_cntrs_generic to clear all general purpose counters.
3445  **/
3446 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3447 {
3448 	u16 phy_data;
3449 
3450 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
3451 
3452 	e1000_clear_hw_cntrs_base_generic(hw);
3453 
3454 	E1000_READ_REG(hw, E1000_ALGNERRC);
3455 	E1000_READ_REG(hw, E1000_RXERRC);
3456 	E1000_READ_REG(hw, E1000_TNCRS);
3457 	E1000_READ_REG(hw, E1000_CEXTERR);
3458 	E1000_READ_REG(hw, E1000_TSCTC);
3459 	E1000_READ_REG(hw, E1000_TSCTFC);
3460 
3461 	E1000_READ_REG(hw, E1000_MGTPRC);
3462 	E1000_READ_REG(hw, E1000_MGTPDC);
3463 	E1000_READ_REG(hw, E1000_MGTPTC);
3464 
3465 	E1000_READ_REG(hw, E1000_IAC);
3466 	E1000_READ_REG(hw, E1000_ICRXOC);
3467 
3468 	/* Clear PHY statistics registers */
3469 	if ((hw->phy.type == e1000_phy_82578) ||
3470 	    (hw->phy.type == e1000_phy_82577)) {
3471 		hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
3472 		hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
3473 		hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
3474 		hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
3475 		hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
3476 		hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
3477 		hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
3478 		hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
3479 		hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
3480 		hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
3481 		hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
3482 		hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
3483 		hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
3484 		hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
3485 	}
3486 }
3487 
3488