xref: /freebsd/sys/dev/e1000/e1000_ich8lan.c (revision 38f0b757fd84d17d0fc24739a7cda160c4516d81)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2013, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 /* 82562G 10/100 Network Connection
36  * 82562G-2 10/100 Network Connection
37  * 82562GT 10/100 Network Connection
38  * 82562GT-2 10/100 Network Connection
39  * 82562V 10/100 Network Connection
40  * 82562V-2 10/100 Network Connection
41  * 82566DC-2 Gigabit Network Connection
42  * 82566DC Gigabit Network Connection
43  * 82566DM-2 Gigabit Network Connection
44  * 82566DM Gigabit Network Connection
45  * 82566MC Gigabit Network Connection
46  * 82566MM Gigabit Network Connection
47  * 82567LM Gigabit Network Connection
48  * 82567LF Gigabit Network Connection
49  * 82567V Gigabit Network Connection
50  * 82567LM-2 Gigabit Network Connection
51  * 82567LF-2 Gigabit Network Connection
52  * 82567V-2 Gigabit Network Connection
53  * 82567LF-3 Gigabit Network Connection
54  * 82567LM-3 Gigabit Network Connection
55  * 82567LM-4 Gigabit Network Connection
56  * 82577LM Gigabit Network Connection
57  * 82577LC Gigabit Network Connection
58  * 82578DM Gigabit Network Connection
59  * 82578DC Gigabit Network Connection
60  * 82579LM Gigabit Network Connection
61  * 82579V Gigabit Network Connection
62  * Ethernet Connection I217-LM
63  * Ethernet Connection I217-V
64  * Ethernet Connection I218-V
65  * Ethernet Connection I218-LM
66  */
67 
68 #include "e1000_api.h"
69 
70 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
71 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
72 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
73 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
74 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
75 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
76 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
77 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
78 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
79 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
80 					      u8 *mc_addr_list,
81 					      u32 mc_addr_count);
82 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
83 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
84 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
85 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
86 					    bool active);
87 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
88 					    bool active);
89 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
90 				   u16 words, u16 *data);
91 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
92 				    u16 words, u16 *data);
93 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
94 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
95 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
96 					    u16 *data);
97 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
98 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
99 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
101 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
102 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
103 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
104 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
105 					   u16 *speed, u16 *duplex);
106 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
108 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
109 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
110 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
111 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
112 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
113 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
114 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
115 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
116 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
117 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
118 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
119 					  u32 offset, u8 *data);
120 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
121 					  u8 size, u16 *data);
122 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
123 					  u32 offset, u16 *data);
124 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
125 						 u32 offset, u8 byte);
126 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
127 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
128 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
129 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
130 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
131 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
132 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
133 
134 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
135 /* Offset 04h HSFSTS */
136 union ich8_hws_flash_status {
137 	struct ich8_hsfsts {
138 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
139 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
140 		u16 dael:1; /* bit 2 Direct Access error Log */
141 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
142 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
143 		u16 reserved1:2; /* bit 13:6 Reserved */
144 		u16 reserved2:6; /* bit 13:6 Reserved */
145 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
146 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
147 	} hsf_status;
148 	u16 regval;
149 };
150 
151 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
152 /* Offset 06h FLCTL */
153 union ich8_hws_flash_ctrl {
154 	struct ich8_hsflctl {
155 		u16 flcgo:1;   /* 0 Flash Cycle Go */
156 		u16 flcycle:2;   /* 2:1 Flash Cycle */
157 		u16 reserved:5;   /* 7:3 Reserved  */
158 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
159 		u16 flockdn:6;   /* 15:10 Reserved */
160 	} hsf_ctrl;
161 	u16 regval;
162 };
163 
164 /* ICH Flash Region Access Permissions */
165 union ich8_hws_flash_regacc {
166 	struct ich8_flracc {
167 		u32 grra:8; /* 0:7 GbE region Read Access */
168 		u32 grwa:8; /* 8:15 GbE region Write Access */
169 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
170 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
171 	} hsf_flregacc;
172 	u16 regval;
173 };
174 
175 /**
176  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
177  *  @hw: pointer to the HW structure
178  *
179  *  Test access to the PHY registers by reading the PHY ID registers.  If
180  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
181  *  otherwise assume the read PHY ID is correct if it is valid.
182  *
183  *  Assumes the sw/fw/hw semaphore is already acquired.
184  **/
185 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
186 {
187 	u16 phy_reg = 0;
188 	u32 phy_id = 0;
189 	s32 ret_val = 0;
190 	u16 retry_count;
191 	u32 mac_reg = 0;
192 
193 	for (retry_count = 0; retry_count < 2; retry_count++) {
194 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
195 		if (ret_val || (phy_reg == 0xFFFF))
196 			continue;
197 		phy_id = (u32)(phy_reg << 16);
198 
199 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
200 		if (ret_val || (phy_reg == 0xFFFF)) {
201 			phy_id = 0;
202 			continue;
203 		}
204 		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
205 		break;
206 	}
207 
208 	if (hw->phy.id) {
209 		if  (hw->phy.id == phy_id)
210 			goto out;
211 	} else if (phy_id) {
212 		hw->phy.id = phy_id;
213 		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
214 		goto out;
215 	}
216 
217 	/* In case the PHY needs to be in mdio slow mode,
218 	 * set slow mode and try to get the PHY id again.
219 	 */
220 	if (hw->mac.type < e1000_pch_lpt) {
221 		hw->phy.ops.release(hw);
222 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
223 		if (!ret_val)
224 			ret_val = e1000_get_phy_id(hw);
225 		hw->phy.ops.acquire(hw);
226 	}
227 
228 	if (ret_val)
229 		return FALSE;
230 out:
231 	if (hw->mac.type == e1000_pch_lpt) {
232 		/* Unforce SMBus mode in PHY */
233 		hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
234 		phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
235 		hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
236 
237 		/* Unforce SMBus mode in MAC */
238 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
239 		mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
240 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
241 	}
242 
243 	return TRUE;
244 }
245 
246 /**
247  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
248  *  @hw: pointer to the HW structure
249  *
250  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
251  *  used to reset the PHY to a quiescent state when necessary.
252  **/
253 void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
254 {
255 	u32 mac_reg;
256 
257 	DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
258 
259 	/* Set Phy Config Counter to 50msec */
260 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
261 	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
262 	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
263 	E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
264 
265 	/* Toggle LANPHYPC Value bit */
266 	mac_reg = E1000_READ_REG(hw, E1000_CTRL);
267 	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
268 	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
269 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
270 	E1000_WRITE_FLUSH(hw);
271 	usec_delay(10);
272 	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
273 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
274 	E1000_WRITE_FLUSH(hw);
275 
276 	if (hw->mac.type < e1000_pch_lpt) {
277 		msec_delay(50);
278 	} else {
279 		u16 count = 20;
280 
281 		do {
282 			msec_delay(5);
283 		} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
284 			   E1000_CTRL_EXT_LPCD) && count--);
285 
286 		msec_delay(30);
287 	}
288 }
289 
290 /**
291  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
292  *  @hw: pointer to the HW structure
293  *
294  *  Workarounds/flow necessary for PHY initialization during driver load
295  *  and resume paths.
296  **/
297 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
298 {
299 	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
300 	s32 ret_val;
301 
302 	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
303 
304 	/* Gate automatic PHY configuration by hardware on managed and
305 	 * non-managed 82579 and newer adapters.
306 	 */
307 	e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
308 
309 	ret_val = hw->phy.ops.acquire(hw);
310 	if (ret_val) {
311 		DEBUGOUT("Failed to initialize PHY flow\n");
312 		goto out;
313 	}
314 
315 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
316 	 * inaccessible and resetting the PHY is not blocked, toggle the
317 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
318 	 */
319 	switch (hw->mac.type) {
320 	case e1000_pch_lpt:
321 		if (e1000_phy_is_accessible_pchlan(hw))
322 			break;
323 
324 		/* Before toggling LANPHYPC, see if PHY is accessible by
325 		 * forcing MAC to SMBus mode first.
326 		 */
327 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
328 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
329 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
330 
331 		/* Wait 50 milliseconds for MAC to finish any retries
332 		 * that it might be trying to perform from previous
333 		 * attempts to acknowledge any phy read requests.
334 		 */
335 		 msec_delay(50);
336 
337 		/* fall-through */
338 	case e1000_pch2lan:
339 		if (e1000_phy_is_accessible_pchlan(hw))
340 			break;
341 
342 		/* fall-through */
343 	case e1000_pchlan:
344 		if ((hw->mac.type == e1000_pchlan) &&
345 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
346 			break;
347 
348 		if (hw->phy.ops.check_reset_block(hw)) {
349 			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
350 			ret_val = -E1000_ERR_PHY;
351 			break;
352 		}
353 
354 		/* Toggle LANPHYPC Value bit */
355 		e1000_toggle_lanphypc_pch_lpt(hw);
356 		if (hw->mac.type >= e1000_pch_lpt) {
357 			if (e1000_phy_is_accessible_pchlan(hw))
358 				break;
359 
360 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
361 			 * so ensure that the MAC is also out of SMBus mode
362 			 */
363 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
364 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
365 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
366 
367 			if (e1000_phy_is_accessible_pchlan(hw))
368 				break;
369 
370 			ret_val = -E1000_ERR_PHY;
371 		}
372 		break;
373 	default:
374 		break;
375 	}
376 
377 	hw->phy.ops.release(hw);
378 	if (!ret_val) {
379 
380 		/* Check to see if able to reset PHY.  Print error if not */
381 		if (hw->phy.ops.check_reset_block(hw)) {
382 			ERROR_REPORT("Reset blocked by ME\n");
383 			goto out;
384 		}
385 
386 		/* Reset the PHY before any access to it.  Doing so, ensures
387 		 * that the PHY is in a known good state before we read/write
388 		 * PHY registers.  The generic reset is sufficient here,
389 		 * because we haven't determined the PHY type yet.
390 		 */
391 		ret_val = e1000_phy_hw_reset_generic(hw);
392 		if (ret_val)
393 			goto out;
394 
395 		/* On a successful reset, possibly need to wait for the PHY
396 		 * to quiesce to an accessible state before returning control
397 		 * to the calling function.  If the PHY does not quiesce, then
398 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
399 		 *  the PHY is in.
400 		 */
401 		ret_val = hw->phy.ops.check_reset_block(hw);
402 		if (ret_val)
403 			ERROR_REPORT("ME blocked access to PHY after reset\n");
404 	}
405 
406 out:
407 	/* Ungate automatic PHY configuration on non-managed 82579 */
408 	if ((hw->mac.type == e1000_pch2lan) &&
409 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
410 		msec_delay(10);
411 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
412 	}
413 
414 	return ret_val;
415 }
416 
417 /**
418  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
419  *  @hw: pointer to the HW structure
420  *
421  *  Initialize family-specific PHY parameters and function pointers.
422  **/
423 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
424 {
425 	struct e1000_phy_info *phy = &hw->phy;
426 	s32 ret_val;
427 
428 	DEBUGFUNC("e1000_init_phy_params_pchlan");
429 
430 	phy->addr		= 1;
431 	phy->reset_delay_us	= 100;
432 
433 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
434 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
435 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
436 	phy->ops.set_page	= e1000_set_page_igp;
437 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
438 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
439 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
440 	phy->ops.release	= e1000_release_swflag_ich8lan;
441 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
442 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
443 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
444 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
445 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
446 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
447 	phy->ops.power_up	= e1000_power_up_phy_copper;
448 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
449 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
450 
451 	phy->id = e1000_phy_unknown;
452 
453 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
454 	if (ret_val)
455 		return ret_val;
456 
457 	if (phy->id == e1000_phy_unknown)
458 		switch (hw->mac.type) {
459 		default:
460 			ret_val = e1000_get_phy_id(hw);
461 			if (ret_val)
462 				return ret_val;
463 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
464 				break;
465 			/* fall-through */
466 		case e1000_pch2lan:
467 		case e1000_pch_lpt:
468 			/* In case the PHY needs to be in mdio slow mode,
469 			 * set slow mode and try to get the PHY id again.
470 			 */
471 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
472 			if (ret_val)
473 				return ret_val;
474 			ret_val = e1000_get_phy_id(hw);
475 			if (ret_val)
476 				return ret_val;
477 			break;
478 		}
479 	phy->type = e1000_get_phy_type_from_id(phy->id);
480 
481 	switch (phy->type) {
482 	case e1000_phy_82577:
483 	case e1000_phy_82579:
484 	case e1000_phy_i217:
485 		phy->ops.check_polarity = e1000_check_polarity_82577;
486 		phy->ops.force_speed_duplex =
487 			e1000_phy_force_speed_duplex_82577;
488 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
489 		phy->ops.get_info = e1000_get_phy_info_82577;
490 		phy->ops.commit = e1000_phy_sw_reset_generic;
491 		break;
492 	case e1000_phy_82578:
493 		phy->ops.check_polarity = e1000_check_polarity_m88;
494 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
495 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
496 		phy->ops.get_info = e1000_get_phy_info_m88;
497 		break;
498 	default:
499 		ret_val = -E1000_ERR_PHY;
500 		break;
501 	}
502 
503 	return ret_val;
504 }
505 
506 /**
507  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
508  *  @hw: pointer to the HW structure
509  *
510  *  Initialize family-specific PHY parameters and function pointers.
511  **/
512 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
513 {
514 	struct e1000_phy_info *phy = &hw->phy;
515 	s32 ret_val;
516 	u16 i = 0;
517 
518 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
519 
520 	phy->addr		= 1;
521 	phy->reset_delay_us	= 100;
522 
523 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
524 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
525 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
526 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
527 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
528 	phy->ops.release	= e1000_release_swflag_ich8lan;
529 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
530 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
531 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
532 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
533 	phy->ops.power_up	= e1000_power_up_phy_copper;
534 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
535 
536 	/* We may need to do this twice - once for IGP and if that fails,
537 	 * we'll set BM func pointers and try again
538 	 */
539 	ret_val = e1000_determine_phy_address(hw);
540 	if (ret_val) {
541 		phy->ops.write_reg = e1000_write_phy_reg_bm;
542 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
543 		ret_val = e1000_determine_phy_address(hw);
544 		if (ret_val) {
545 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
546 			return ret_val;
547 		}
548 	}
549 
550 	phy->id = 0;
551 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
552 	       (i++ < 100)) {
553 		msec_delay(1);
554 		ret_val = e1000_get_phy_id(hw);
555 		if (ret_val)
556 			return ret_val;
557 	}
558 
559 	/* Verify phy id */
560 	switch (phy->id) {
561 	case IGP03E1000_E_PHY_ID:
562 		phy->type = e1000_phy_igp_3;
563 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
564 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
565 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
566 		phy->ops.get_info = e1000_get_phy_info_igp;
567 		phy->ops.check_polarity = e1000_check_polarity_igp;
568 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
569 		break;
570 	case IFE_E_PHY_ID:
571 	case IFE_PLUS_E_PHY_ID:
572 	case IFE_C_E_PHY_ID:
573 		phy->type = e1000_phy_ife;
574 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
575 		phy->ops.get_info = e1000_get_phy_info_ife;
576 		phy->ops.check_polarity = e1000_check_polarity_ife;
577 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
578 		break;
579 	case BME1000_E_PHY_ID:
580 		phy->type = e1000_phy_bm;
581 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
582 		phy->ops.read_reg = e1000_read_phy_reg_bm;
583 		phy->ops.write_reg = e1000_write_phy_reg_bm;
584 		phy->ops.commit = e1000_phy_sw_reset_generic;
585 		phy->ops.get_info = e1000_get_phy_info_m88;
586 		phy->ops.check_polarity = e1000_check_polarity_m88;
587 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
588 		break;
589 	default:
590 		return -E1000_ERR_PHY;
591 		break;
592 	}
593 
594 	return E1000_SUCCESS;
595 }
596 
597 /**
598  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
599  *  @hw: pointer to the HW structure
600  *
601  *  Initialize family-specific NVM parameters and function
602  *  pointers.
603  **/
604 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
605 {
606 	struct e1000_nvm_info *nvm = &hw->nvm;
607 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
608 	u32 gfpreg, sector_base_addr, sector_end_addr;
609 	u16 i;
610 
611 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
612 
613 	/* Can't read flash registers if the register set isn't mapped. */
614 	if (!hw->flash_address) {
615 		DEBUGOUT("ERROR: Flash registers not mapped\n");
616 		return -E1000_ERR_CONFIG;
617 	}
618 
619 	nvm->type = e1000_nvm_flash_sw;
620 
621 	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
622 
623 	/* sector_X_addr is a "sector"-aligned address (4096 bytes)
624 	 * Add 1 to sector_end_addr since this sector is included in
625 	 * the overall size.
626 	 */
627 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
628 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
629 
630 	/* flash_base_addr is byte-aligned */
631 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
632 
633 	/* find total size of the NVM, then cut in half since the total
634 	 * size represents two separate NVM banks.
635 	 */
636 	nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
637 				<< FLASH_SECTOR_ADDR_SHIFT);
638 	nvm->flash_bank_size /= 2;
639 	/* Adjust to word count */
640 	nvm->flash_bank_size /= sizeof(u16);
641 
642 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
643 
644 	/* Clear shadow ram */
645 	for (i = 0; i < nvm->word_size; i++) {
646 		dev_spec->shadow_ram[i].modified = FALSE;
647 		dev_spec->shadow_ram[i].value    = 0xFFFF;
648 	}
649 
650 	E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
651 	E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
652 
653 	/* Function Pointers */
654 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
655 	nvm->ops.release	= e1000_release_nvm_ich8lan;
656 	nvm->ops.read		= e1000_read_nvm_ich8lan;
657 	nvm->ops.update		= e1000_update_nvm_checksum_ich8lan;
658 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
659 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
660 	nvm->ops.write		= e1000_write_nvm_ich8lan;
661 
662 	return E1000_SUCCESS;
663 }
664 
665 /**
666  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
667  *  @hw: pointer to the HW structure
668  *
669  *  Initialize family-specific MAC parameters and function
670  *  pointers.
671  **/
672 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
673 {
674 	struct e1000_mac_info *mac = &hw->mac;
675 
676 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
677 
678 	/* Set media type function pointer */
679 	hw->phy.media_type = e1000_media_type_copper;
680 
681 	/* Set mta register count */
682 	mac->mta_reg_count = 32;
683 	/* Set rar entry count */
684 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
685 	if (mac->type == e1000_ich8lan)
686 		mac->rar_entry_count--;
687 	/* Set if part includes ASF firmware */
688 	mac->asf_firmware_present = TRUE;
689 	/* FWSM register */
690 	mac->has_fwsm = TRUE;
691 	/* ARC subsystem not supported */
692 	mac->arc_subsystem_valid = FALSE;
693 	/* Adaptive IFS supported */
694 	mac->adaptive_ifs = TRUE;
695 
696 	/* Function pointers */
697 
698 	/* bus type/speed/width */
699 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
700 	/* function id */
701 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
702 	/* reset */
703 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
704 	/* hw initialization */
705 	mac->ops.init_hw = e1000_init_hw_ich8lan;
706 	/* link setup */
707 	mac->ops.setup_link = e1000_setup_link_ich8lan;
708 	/* physical interface setup */
709 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
710 	/* check for link */
711 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
712 	/* link info */
713 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
714 	/* multicast address update */
715 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
716 	/* clear hardware counters */
717 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
718 
719 	/* LED and other operations */
720 	switch (mac->type) {
721 	case e1000_ich8lan:
722 	case e1000_ich9lan:
723 	case e1000_ich10lan:
724 		/* check management mode */
725 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
726 		/* ID LED init */
727 		mac->ops.id_led_init = e1000_id_led_init_generic;
728 		/* blink LED */
729 		mac->ops.blink_led = e1000_blink_led_generic;
730 		/* setup LED */
731 		mac->ops.setup_led = e1000_setup_led_generic;
732 		/* cleanup LED */
733 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
734 		/* turn on/off LED */
735 		mac->ops.led_on = e1000_led_on_ich8lan;
736 		mac->ops.led_off = e1000_led_off_ich8lan;
737 		break;
738 	case e1000_pch2lan:
739 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
740 		mac->ops.rar_set = e1000_rar_set_pch2lan;
741 		/* fall-through */
742 	case e1000_pch_lpt:
743 		/* multicast address update for pch2 */
744 		mac->ops.update_mc_addr_list =
745 			e1000_update_mc_addr_list_pch2lan;
746 	case e1000_pchlan:
747 		/* check management mode */
748 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
749 		/* ID LED init */
750 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
751 		/* setup LED */
752 		mac->ops.setup_led = e1000_setup_led_pchlan;
753 		/* cleanup LED */
754 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
755 		/* turn on/off LED */
756 		mac->ops.led_on = e1000_led_on_pchlan;
757 		mac->ops.led_off = e1000_led_off_pchlan;
758 		break;
759 	default:
760 		break;
761 	}
762 
763 	if (mac->type == e1000_pch_lpt) {
764 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
765 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
766 		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
767 		mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
768 	}
769 
770 	/* Enable PCS Lock-loss workaround for ICH8 */
771 	if (mac->type == e1000_ich8lan)
772 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
773 
774 	return E1000_SUCCESS;
775 }
776 
777 /**
778  *  __e1000_access_emi_reg_locked - Read/write EMI register
779  *  @hw: pointer to the HW structure
780  *  @addr: EMI address to program
781  *  @data: pointer to value to read/write from/to the EMI address
782  *  @read: boolean flag to indicate read or write
783  *
784  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
785  **/
786 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
787 					 u16 *data, bool read)
788 {
789 	s32 ret_val;
790 
791 	DEBUGFUNC("__e1000_access_emi_reg_locked");
792 
793 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
794 	if (ret_val)
795 		return ret_val;
796 
797 	if (read)
798 		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
799 						      data);
800 	else
801 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
802 						       *data);
803 
804 	return ret_val;
805 }
806 
807 /**
808  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
809  *  @hw: pointer to the HW structure
810  *  @addr: EMI address to program
811  *  @data: value to be read from the EMI address
812  *
813  *  Assumes the SW/FW/HW Semaphore is already acquired.
814  **/
815 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
816 {
817 	DEBUGFUNC("e1000_read_emi_reg_locked");
818 
819 	return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
820 }
821 
822 /**
823  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
824  *  @hw: pointer to the HW structure
825  *  @addr: EMI address to program
826  *  @data: value to be written to the EMI address
827  *
828  *  Assumes the SW/FW/HW Semaphore is already acquired.
829  **/
830 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
831 {
832 	DEBUGFUNC("e1000_read_emi_reg_locked");
833 
834 	return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
835 }
836 
837 /**
838  *  e1000_set_eee_pchlan - Enable/disable EEE support
839  *  @hw: pointer to the HW structure
840  *
841  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
842  *  the link and the EEE capabilities of the link partner.  The LPI Control
843  *  register bits will remain set only if/when link is up.
844  *
845  *  EEE LPI must not be asserted earlier than one second after link is up.
846  *  On 82579, EEE LPI should not be enabled until such time otherwise there
847  *  can be link issues with some switches.  Other devices can have EEE LPI
848  *  enabled immediately upon link up since they have a timer in hardware which
849  *  prevents LPI from being asserted too early.
850  **/
851 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
852 {
853 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
854 	s32 ret_val;
855 	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
856 
857 	DEBUGFUNC("e1000_set_eee_pchlan");
858 
859 	switch (hw->phy.type) {
860 	case e1000_phy_82579:
861 		lpa = I82579_EEE_LP_ABILITY;
862 		pcs_status = I82579_EEE_PCS_STATUS;
863 		adv_addr = I82579_EEE_ADVERTISEMENT;
864 		break;
865 	case e1000_phy_i217:
866 		lpa = I217_EEE_LP_ABILITY;
867 		pcs_status = I217_EEE_PCS_STATUS;
868 		adv_addr = I217_EEE_ADVERTISEMENT;
869 		break;
870 	default:
871 		return E1000_SUCCESS;
872 	}
873 
874 	ret_val = hw->phy.ops.acquire(hw);
875 	if (ret_val)
876 		return ret_val;
877 
878 	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
879 	if (ret_val)
880 		goto release;
881 
882 	/* Clear bits that enable EEE in various speeds */
883 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
884 
885 	/* Enable EEE if not disabled by user */
886 	if (!dev_spec->eee_disable) {
887 		/* Save off link partner's EEE ability */
888 		ret_val = e1000_read_emi_reg_locked(hw, lpa,
889 						    &dev_spec->eee_lp_ability);
890 		if (ret_val)
891 			goto release;
892 
893 		/* Read EEE advertisement */
894 		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
895 		if (ret_val)
896 			goto release;
897 
898 		/* Enable EEE only for speeds in which the link partner is
899 		 * EEE capable and for which we advertise EEE.
900 		 */
901 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
902 			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
903 
904 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
905 			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
906 			if (data & NWAY_LPAR_100TX_FD_CAPS)
907 				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
908 			else
909 				/* EEE is not supported in 100Half, so ignore
910 				 * partner's EEE in 100 ability if full-duplex
911 				 * is not advertised.
912 				 */
913 				dev_spec->eee_lp_ability &=
914 				    ~I82579_EEE_100_SUPPORTED;
915 		}
916 	}
917 
918 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
919 	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
920 	if (ret_val)
921 		goto release;
922 
923 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
924 release:
925 	hw->phy.ops.release(hw);
926 
927 	return ret_val;
928 }
929 
930 /**
931  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
932  *  @hw:   pointer to the HW structure
933  *  @link: link up bool flag
934  *
935  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
936  *  preventing further DMA write requests.  Workaround the issue by disabling
937  *  the de-assertion of the clock request when in 1Gpbs mode.
938  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
939  *  speeds in order to avoid Tx hangs.
940  **/
941 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
942 {
943 	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
944 	u32 status = E1000_READ_REG(hw, E1000_STATUS);
945 	s32 ret_val = E1000_SUCCESS;
946 	u16 reg;
947 
948 	if (link && (status & E1000_STATUS_SPEED_1000)) {
949 		ret_val = hw->phy.ops.acquire(hw);
950 		if (ret_val)
951 			return ret_val;
952 
953 		ret_val =
954 		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
955 					       &reg);
956 		if (ret_val)
957 			goto release;
958 
959 		ret_val =
960 		    e1000_write_kmrn_reg_locked(hw,
961 						E1000_KMRNCTRLSTA_K1_CONFIG,
962 						reg &
963 						~E1000_KMRNCTRLSTA_K1_ENABLE);
964 		if (ret_val)
965 			goto release;
966 
967 		usec_delay(10);
968 
969 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
970 				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
971 
972 		ret_val =
973 		    e1000_write_kmrn_reg_locked(hw,
974 						E1000_KMRNCTRLSTA_K1_CONFIG,
975 						reg);
976 release:
977 		hw->phy.ops.release(hw);
978 	} else {
979 		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
980 		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
981 
982 		if (!link || ((status & E1000_STATUS_SPEED_100) &&
983 			      (status & E1000_STATUS_FD)))
984 			goto update_fextnvm6;
985 
986 		ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
987 		if (ret_val)
988 			return ret_val;
989 
990 		/* Clear link status transmit timeout */
991 		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
992 
993 		if (status & E1000_STATUS_SPEED_100) {
994 			/* Set inband Tx timeout to 5x10us for 100Half */
995 			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
996 
997 			/* Do not extend the K1 entry latency for 100Half */
998 			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
999 		} else {
1000 			/* Set inband Tx timeout to 50x10us for 10Full/Half */
1001 			reg |= 50 <<
1002 			       I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1003 
1004 			/* Extend the K1 entry latency for 10 Mbps */
1005 			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1006 		}
1007 
1008 		ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1009 		if (ret_val)
1010 			return ret_val;
1011 
1012 update_fextnvm6:
1013 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1014 	}
1015 
1016 	return ret_val;
1017 }
1018 
1019 static u64 e1000_ltr2ns(u16 ltr)
1020 {
1021 	u32 value, scale;
1022 
1023 	/* Determine the latency in nsec based on the LTR value & scale */
1024 	value = ltr & E1000_LTRV_VALUE_MASK;
1025 	scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1026 
1027 	return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1028 }
1029 
1030 /**
1031  *  e1000_platform_pm_pch_lpt - Set platform power management values
1032  *  @hw: pointer to the HW structure
1033  *  @link: bool indicating link status
1034  *
1035  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1036  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1037  *  when link is up (which must not exceed the maximum latency supported
1038  *  by the platform), otherwise specify there is no LTR requirement.
1039  *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1040  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1041  *  Capability register set, on this device LTR is set by writing the
1042  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1043  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1044  *  message to the PMC.
1045  *
1046  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1047  *  high-water mark.
1048  **/
1049 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1050 {
1051 	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1052 		  link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1053 	u16 lat_enc = 0;	/* latency encoded */
1054 	s32 obff_hwm = 0;
1055 
1056 	DEBUGFUNC("e1000_platform_pm_pch_lpt");
1057 
1058 	if (link) {
1059 		u16 speed, duplex, scale = 0;
1060 		u16 max_snoop, max_nosnoop;
1061 		u16 max_ltr_enc;	/* max LTR latency encoded */
1062 		s64 lat_ns;		/* latency (ns) */
1063 		s64 value;
1064 		u32 rxa;
1065 
1066 		if (!hw->mac.max_frame_size) {
1067 			DEBUGOUT("max_frame_size not set.\n");
1068 			return -E1000_ERR_CONFIG;
1069 		}
1070 
1071 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1072 		if (!speed) {
1073 			DEBUGOUT("Speed not set.\n");
1074 			return -E1000_ERR_CONFIG;
1075 		}
1076 
1077 		/* Rx Packet Buffer Allocation size (KB) */
1078 		rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1079 
1080 		/* Determine the maximum latency tolerated by the device.
1081 		 *
1082 		 * Per the PCIe spec, the tolerated latencies are encoded as
1083 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1084 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
1085 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1086 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1087 		 */
1088 		lat_ns = ((s64)rxa * 1024 -
1089 			  (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1090 		if (lat_ns < 0)
1091 			lat_ns = 0;
1092 		else
1093 			lat_ns /= speed;
1094 
1095 		value = lat_ns;
1096 		while (value > E1000_LTRV_VALUE_MASK) {
1097 			scale++;
1098 			value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1099 		}
1100 		if (scale > E1000_LTRV_SCALE_MAX) {
1101 			DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1102 			return -E1000_ERR_CONFIG;
1103 		}
1104 		lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1105 
1106 		/* Determine the maximum latency tolerated by the platform */
1107 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1108 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1109 		max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1110 
1111 		if (lat_enc > max_ltr_enc) {
1112 			lat_enc = max_ltr_enc;
1113 			lat_ns = e1000_ltr2ns(max_ltr_enc);
1114 		}
1115 
1116 		if (lat_ns) {
1117 			lat_ns *= speed * 1000;
1118 			lat_ns /= 8;
1119 			lat_ns /= 1000000000;
1120 			obff_hwm = (s32)(rxa - lat_ns);
1121 		}
1122 		if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1123 			DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1124 			return -E1000_ERR_CONFIG;
1125 		}
1126 	}
1127 
1128 	/* Set Snoop and No-Snoop latencies the same */
1129 	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1130 	E1000_WRITE_REG(hw, E1000_LTRV, reg);
1131 
1132 	/* Set OBFF high water mark */
1133 	reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1134 	reg |= obff_hwm;
1135 	E1000_WRITE_REG(hw, E1000_SVT, reg);
1136 
1137 	/* Enable OBFF */
1138 	reg = E1000_READ_REG(hw, E1000_SVCR);
1139 	reg |= E1000_SVCR_OFF_EN;
1140 	/* Always unblock interrupts to the CPU even when the system is
1141 	 * in OBFF mode. This ensures that small round-robin traffic
1142 	 * (like ping) does not get dropped or experience long latency.
1143 	 */
1144 	reg |= E1000_SVCR_OFF_MASKINT;
1145 	E1000_WRITE_REG(hw, E1000_SVCR, reg);
1146 
1147 	return E1000_SUCCESS;
1148 }
1149 
1150 /**
1151  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1152  *  @hw: pointer to the HW structure
1153  *  @itr: interrupt throttling rate
1154  *
1155  *  Configure OBFF with the updated interrupt rate.
1156  **/
1157 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1158 {
1159 	u32 svcr;
1160 	s32 timer;
1161 
1162 	DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1163 
1164 	/* Convert ITR value into microseconds for OBFF timer */
1165 	timer = itr & E1000_ITR_MASK;
1166 	timer = (timer * E1000_ITR_MULT) / 1000;
1167 
1168 	if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1169 		DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1170 		return -E1000_ERR_CONFIG;
1171 	}
1172 
1173 	svcr = E1000_READ_REG(hw, E1000_SVCR);
1174 	svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1175 	svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1176 	E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1177 
1178 	return E1000_SUCCESS;
1179 }
1180 
1181 /**
1182  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1183  *  @hw: pointer to the HW structure
1184  *
1185  *  Checks to see of the link status of the hardware has changed.  If a
1186  *  change in link status has been detected, then we read the PHY registers
1187  *  to get the current speed/duplex if link exists.
1188  **/
1189 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1190 {
1191 	struct e1000_mac_info *mac = &hw->mac;
1192 	s32 ret_val;
1193 	bool link;
1194 	u16 phy_reg;
1195 
1196 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1197 
1198 	/* We only want to go out to the PHY registers to see if Auto-Neg
1199 	 * has completed and/or if our link status has changed.  The
1200 	 * get_link_status flag is set upon receiving a Link Status
1201 	 * Change or Rx Sequence Error interrupt.
1202 	 */
1203 	if (!mac->get_link_status)
1204 		return E1000_SUCCESS;
1205 
1206 		/* First we want to see if the MII Status Register reports
1207 		 * link.  If so, then we want to get the current speed/duplex
1208 		 * of the PHY.
1209 		 */
1210 		ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1211 		if (ret_val)
1212 			return ret_val;
1213 
1214 	if (hw->mac.type == e1000_pchlan) {
1215 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1216 		if (ret_val)
1217 			return ret_val;
1218 	}
1219 
1220 	/* When connected at 10Mbps half-duplex, 82579 parts are excessively
1221 	 * aggressive resulting in many collisions. To avoid this, increase
1222 	 * the IPG and reduce Rx latency in the PHY.
1223 	 */
1224 	if ((hw->mac.type == e1000_pch2lan) && link) {
1225 		u32 reg;
1226 		reg = E1000_READ_REG(hw, E1000_STATUS);
1227 		if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1228 			reg = E1000_READ_REG(hw, E1000_TIPG);
1229 			reg &= ~E1000_TIPG_IPGT_MASK;
1230 			reg |= 0xFF;
1231 			E1000_WRITE_REG(hw, E1000_TIPG, reg);
1232 
1233 			/* Reduce Rx latency in analog PHY */
1234 			ret_val = hw->phy.ops.acquire(hw);
1235 			if (ret_val)
1236 				return ret_val;
1237 
1238 			ret_val = e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
1239 
1240 			hw->phy.ops.release(hw);
1241 
1242 			if (ret_val)
1243 				return ret_val;
1244 		}
1245 	}
1246 
1247 	/* Work-around I218 hang issue */
1248 	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1249 	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
1250 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1251 		if (ret_val)
1252 			return ret_val;
1253 	}
1254 
1255 	if (hw->mac.type == e1000_pch_lpt) {
1256 		/* Set platform power management values for
1257 		 * Latency Tolerance Reporting (LTR)
1258 		 * Optimized Buffer Flush/Fill (OBFF)
1259 		 */
1260 		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1261 		if (ret_val)
1262 			return ret_val;
1263 	}
1264 
1265 	/* Clear link partner's EEE ability */
1266 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1267 
1268 	if (!link)
1269 		return E1000_SUCCESS; /* No link detected */
1270 
1271 	mac->get_link_status = FALSE;
1272 
1273 	switch (hw->mac.type) {
1274 	case e1000_pch2lan:
1275 		ret_val = e1000_k1_workaround_lv(hw);
1276 		if (ret_val)
1277 			return ret_val;
1278 		/* fall-thru */
1279 	case e1000_pchlan:
1280 		if (hw->phy.type == e1000_phy_82578) {
1281 			ret_val = e1000_link_stall_workaround_hv(hw);
1282 			if (ret_val)
1283 				return ret_val;
1284 		}
1285 
1286 		/* Workaround for PCHx parts in half-duplex:
1287 		 * Set the number of preambles removed from the packet
1288 		 * when it is passed from the PHY to the MAC to prevent
1289 		 * the MAC from misinterpreting the packet type.
1290 		 */
1291 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1292 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1293 
1294 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1295 		    E1000_STATUS_FD)
1296 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1297 
1298 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1299 		break;
1300 	default:
1301 		break;
1302 	}
1303 
1304 	/* Check if there was DownShift, must be checked
1305 	 * immediately after link-up
1306 	 */
1307 	e1000_check_downshift_generic(hw);
1308 
1309 	/* Enable/Disable EEE after link up */
1310 	if (hw->phy.type > e1000_phy_82579) {
1311 		ret_val = e1000_set_eee_pchlan(hw);
1312 		if (ret_val)
1313 			return ret_val;
1314 	}
1315 
1316 	/* If we are forcing speed/duplex, then we simply return since
1317 	 * we have already determined whether we have link or not.
1318 	 */
1319 	if (!mac->autoneg)
1320 		return -E1000_ERR_CONFIG;
1321 
1322 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1323 	 * of MAC speed/duplex configuration.  So we only need to
1324 	 * configure Collision Distance in the MAC.
1325 	 */
1326 	mac->ops.config_collision_dist(hw);
1327 
1328 	/* Configure Flow Control now that Auto-Neg has completed.
1329 	 * First, we need to restore the desired flow control
1330 	 * settings because we may have had to re-autoneg with a
1331 	 * different link partner.
1332 	 */
1333 	ret_val = e1000_config_fc_after_link_up_generic(hw);
1334 	if (ret_val)
1335 		DEBUGOUT("Error configuring flow control\n");
1336 
1337 	return ret_val;
1338 }
1339 
1340 /**
1341  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1342  *  @hw: pointer to the HW structure
1343  *
1344  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1345  **/
1346 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1347 {
1348 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1349 
1350 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1351 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1352 	switch (hw->mac.type) {
1353 	case e1000_ich8lan:
1354 	case e1000_ich9lan:
1355 	case e1000_ich10lan:
1356 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1357 		break;
1358 	case e1000_pchlan:
1359 	case e1000_pch2lan:
1360 	case e1000_pch_lpt:
1361 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1362 		break;
1363 	default:
1364 		break;
1365 	}
1366 }
1367 
1368 /**
1369  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1370  *  @hw: pointer to the HW structure
1371  *
1372  *  Acquires the mutex for performing NVM operations.
1373  **/
1374 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1375 {
1376 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1377 
1378 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1379 
1380 	return E1000_SUCCESS;
1381 }
1382 
1383 /**
1384  *  e1000_release_nvm_ich8lan - Release NVM mutex
1385  *  @hw: pointer to the HW structure
1386  *
1387  *  Releases the mutex used while performing NVM operations.
1388  **/
1389 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1390 {
1391 	DEBUGFUNC("e1000_release_nvm_ich8lan");
1392 
1393 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1394 
1395 	return;
1396 }
1397 
1398 /**
1399  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1400  *  @hw: pointer to the HW structure
1401  *
1402  *  Acquires the software control flag for performing PHY and select
1403  *  MAC CSR accesses.
1404  **/
1405 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1406 {
1407 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1408 	s32 ret_val = E1000_SUCCESS;
1409 
1410 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1411 
1412 	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1413 
1414 	while (timeout) {
1415 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1416 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1417 			break;
1418 
1419 		msec_delay_irq(1);
1420 		timeout--;
1421 	}
1422 
1423 	if (!timeout) {
1424 		DEBUGOUT("SW has already locked the resource.\n");
1425 		ret_val = -E1000_ERR_CONFIG;
1426 		goto out;
1427 	}
1428 
1429 	timeout = SW_FLAG_TIMEOUT;
1430 
1431 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1432 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1433 
1434 	while (timeout) {
1435 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1436 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1437 			break;
1438 
1439 		msec_delay_irq(1);
1440 		timeout--;
1441 	}
1442 
1443 	if (!timeout) {
1444 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1445 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1446 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1447 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1448 		ret_val = -E1000_ERR_CONFIG;
1449 		goto out;
1450 	}
1451 
1452 out:
1453 	if (ret_val)
1454 		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1455 
1456 	return ret_val;
1457 }
1458 
1459 /**
1460  *  e1000_release_swflag_ich8lan - Release software control flag
1461  *  @hw: pointer to the HW structure
1462  *
1463  *  Releases the software control flag for performing PHY and select
1464  *  MAC CSR accesses.
1465  **/
1466 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1467 {
1468 	u32 extcnf_ctrl;
1469 
1470 	DEBUGFUNC("e1000_release_swflag_ich8lan");
1471 
1472 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1473 
1474 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1475 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1476 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1477 	} else {
1478 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1479 	}
1480 
1481 	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1482 
1483 	return;
1484 }
1485 
1486 /**
1487  *  e1000_check_mng_mode_ich8lan - Checks management mode
1488  *  @hw: pointer to the HW structure
1489  *
1490  *  This checks if the adapter has any manageability enabled.
1491  *  This is a function pointer entry point only called by read/write
1492  *  routines for the PHY and NVM parts.
1493  **/
1494 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1495 {
1496 	u32 fwsm;
1497 
1498 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1499 
1500 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1501 
1502 	return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
1503 		((fwsm & E1000_FWSM_MODE_MASK) ==
1504 		 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
1505 }
1506 
1507 /**
1508  *  e1000_check_mng_mode_pchlan - Checks management mode
1509  *  @hw: pointer to the HW structure
1510  *
1511  *  This checks if the adapter has iAMT enabled.
1512  *  This is a function pointer entry point only called by read/write
1513  *  routines for the PHY and NVM parts.
1514  **/
1515 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1516 {
1517 	u32 fwsm;
1518 
1519 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
1520 
1521 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1522 
1523 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1524 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1525 }
1526 
1527 /**
1528  *  e1000_rar_set_pch2lan - Set receive address register
1529  *  @hw: pointer to the HW structure
1530  *  @addr: pointer to the receive address
1531  *  @index: receive address array register
1532  *
1533  *  Sets the receive address array register at index to the address passed
1534  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1535  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1536  *  Use SHRA[0-3] in place of those reserved for ME.
1537  **/
1538 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1539 {
1540 	u32 rar_low, rar_high;
1541 
1542 	DEBUGFUNC("e1000_rar_set_pch2lan");
1543 
1544 	/* HW expects these in little endian so we reverse the byte order
1545 	 * from network order (big endian) to little endian
1546 	 */
1547 	rar_low = ((u32) addr[0] |
1548 		   ((u32) addr[1] << 8) |
1549 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1550 
1551 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1552 
1553 	/* If MAC address zero, no need to set the AV bit */
1554 	if (rar_low || rar_high)
1555 		rar_high |= E1000_RAH_AV;
1556 
1557 	if (index == 0) {
1558 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1559 		E1000_WRITE_FLUSH(hw);
1560 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1561 		E1000_WRITE_FLUSH(hw);
1562 		return;
1563 	}
1564 
1565 	/* RAR[1-6] are owned by manageability.  Skip those and program the
1566 	 * next address into the SHRA register array.
1567 	 */
1568 	if (index < (u32) (hw->mac.rar_entry_count - 6)) {
1569 		s32 ret_val;
1570 
1571 		ret_val = e1000_acquire_swflag_ich8lan(hw);
1572 		if (ret_val)
1573 			goto out;
1574 
1575 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1576 		E1000_WRITE_FLUSH(hw);
1577 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1578 		E1000_WRITE_FLUSH(hw);
1579 
1580 		e1000_release_swflag_ich8lan(hw);
1581 
1582 		/* verify the register updates */
1583 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1584 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1585 			return;
1586 
1587 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1588 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1589 	}
1590 
1591 out:
1592 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
1593 }
1594 
1595 /**
1596  *  e1000_rar_set_pch_lpt - Set receive address registers
1597  *  @hw: pointer to the HW structure
1598  *  @addr: pointer to the receive address
1599  *  @index: receive address array register
1600  *
1601  *  Sets the receive address register array at index to the address passed
1602  *  in by addr. For LPT, RAR[0] is the base address register that is to
1603  *  contain the MAC address. SHRA[0-10] are the shared receive address
1604  *  registers that are shared between the Host and manageability engine (ME).
1605  **/
1606 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1607 {
1608 	u32 rar_low, rar_high;
1609 	u32 wlock_mac;
1610 
1611 	DEBUGFUNC("e1000_rar_set_pch_lpt");
1612 
1613 	/* HW expects these in little endian so we reverse the byte order
1614 	 * from network order (big endian) to little endian
1615 	 */
1616 	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1617 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1618 
1619 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1620 
1621 	/* If MAC address zero, no need to set the AV bit */
1622 	if (rar_low || rar_high)
1623 		rar_high |= E1000_RAH_AV;
1624 
1625 	if (index == 0) {
1626 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1627 		E1000_WRITE_FLUSH(hw);
1628 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1629 		E1000_WRITE_FLUSH(hw);
1630 		return;
1631 	}
1632 
1633 	/* The manageability engine (ME) can lock certain SHRAR registers that
1634 	 * it is using - those registers are unavailable for use.
1635 	 */
1636 	if (index < hw->mac.rar_entry_count) {
1637 		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1638 			    E1000_FWSM_WLOCK_MAC_MASK;
1639 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1640 
1641 		/* Check if all SHRAR registers are locked */
1642 		if (wlock_mac == 1)
1643 			goto out;
1644 
1645 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
1646 			s32 ret_val;
1647 
1648 			ret_val = e1000_acquire_swflag_ich8lan(hw);
1649 
1650 			if (ret_val)
1651 				goto out;
1652 
1653 			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1654 					rar_low);
1655 			E1000_WRITE_FLUSH(hw);
1656 			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1657 					rar_high);
1658 			E1000_WRITE_FLUSH(hw);
1659 
1660 			e1000_release_swflag_ich8lan(hw);
1661 
1662 			/* verify the register updates */
1663 			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1664 			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1665 				return;
1666 		}
1667 	}
1668 
1669 out:
1670 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
1671 }
1672 
1673 /**
1674  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1675  *  @hw: pointer to the HW structure
1676  *  @mc_addr_list: array of multicast addresses to program
1677  *  @mc_addr_count: number of multicast addresses to program
1678  *
1679  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1680  *  The caller must have a packed mc_addr_list of multicast addresses.
1681  **/
1682 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1683 					      u8 *mc_addr_list,
1684 					      u32 mc_addr_count)
1685 {
1686 	u16 phy_reg = 0;
1687 	int i;
1688 	s32 ret_val;
1689 
1690 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1691 
1692 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1693 
1694 	ret_val = hw->phy.ops.acquire(hw);
1695 	if (ret_val)
1696 		return;
1697 
1698 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1699 	if (ret_val)
1700 		goto release;
1701 
1702 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
1703 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1704 					   (u16)(hw->mac.mta_shadow[i] &
1705 						 0xFFFF));
1706 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1707 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
1708 						 0xFFFF));
1709 	}
1710 
1711 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1712 
1713 release:
1714 	hw->phy.ops.release(hw);
1715 }
1716 
1717 /**
1718  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1719  *  @hw: pointer to the HW structure
1720  *
1721  *  Checks if firmware is blocking the reset of the PHY.
1722  *  This is a function pointer entry point only called by
1723  *  reset routines.
1724  **/
1725 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1726 {
1727 	u32 fwsm;
1728 	bool blocked = FALSE;
1729 	int i = 0;
1730 
1731 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
1732 
1733 	do {
1734 		fwsm = E1000_READ_REG(hw, E1000_FWSM);
1735 		if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
1736 			blocked = TRUE;
1737 			msec_delay(10);
1738 			continue;
1739 		}
1740 		blocked = FALSE;
1741 	} while (blocked && (i++ < 10));
1742 	return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
1743 }
1744 
1745 /**
1746  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1747  *  @hw: pointer to the HW structure
1748  *
1749  *  Assumes semaphore already acquired.
1750  *
1751  **/
1752 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1753 {
1754 	u16 phy_data;
1755 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1756 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1757 		E1000_STRAP_SMT_FREQ_SHIFT;
1758 	s32 ret_val;
1759 
1760 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1761 
1762 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1763 	if (ret_val)
1764 		return ret_val;
1765 
1766 	phy_data &= ~HV_SMB_ADDR_MASK;
1767 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1768 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1769 
1770 	if (hw->phy.type == e1000_phy_i217) {
1771 		/* Restore SMBus frequency */
1772 		if (freq--) {
1773 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1774 			phy_data |= (freq & (1 << 0)) <<
1775 				HV_SMB_ADDR_FREQ_LOW_SHIFT;
1776 			phy_data |= (freq & (1 << 1)) <<
1777 				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1778 		} else {
1779 			DEBUGOUT("Unsupported SMB frequency in PHY\n");
1780 		}
1781 	}
1782 
1783 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1784 }
1785 
1786 /**
1787  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1788  *  @hw:   pointer to the HW structure
1789  *
1790  *  SW should configure the LCD from the NVM extended configuration region
1791  *  as a workaround for certain parts.
1792  **/
1793 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1794 {
1795 	struct e1000_phy_info *phy = &hw->phy;
1796 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1797 	s32 ret_val = E1000_SUCCESS;
1798 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
1799 
1800 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1801 
1802 	/* Initialize the PHY from the NVM on ICH platforms.  This
1803 	 * is needed due to an issue where the NVM configuration is
1804 	 * not properly autoloaded after power transitions.
1805 	 * Therefore, after each PHY reset, we will load the
1806 	 * configuration data out of the NVM manually.
1807 	 */
1808 	switch (hw->mac.type) {
1809 	case e1000_ich8lan:
1810 		if (phy->type != e1000_phy_igp_3)
1811 			return ret_val;
1812 
1813 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1814 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1815 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1816 			break;
1817 		}
1818 		/* Fall-thru */
1819 	case e1000_pchlan:
1820 	case e1000_pch2lan:
1821 	case e1000_pch_lpt:
1822 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1823 		break;
1824 	default:
1825 		return ret_val;
1826 	}
1827 
1828 	ret_val = hw->phy.ops.acquire(hw);
1829 	if (ret_val)
1830 		return ret_val;
1831 
1832 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
1833 	if (!(data & sw_cfg_mask))
1834 		goto release;
1835 
1836 	/* Make sure HW does not configure LCD from PHY
1837 	 * extended configuration before SW configuration
1838 	 */
1839 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1840 	if ((hw->mac.type < e1000_pch2lan) &&
1841 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1842 			goto release;
1843 
1844 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1845 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1846 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1847 	if (!cnf_size)
1848 		goto release;
1849 
1850 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1851 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1852 
1853 	if (((hw->mac.type == e1000_pchlan) &&
1854 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1855 	    (hw->mac.type > e1000_pchlan)) {
1856 		/* HW configures the SMBus address and LEDs when the
1857 		 * OEM and LCD Write Enable bits are set in the NVM.
1858 		 * When both NVM bits are cleared, SW will configure
1859 		 * them instead.
1860 		 */
1861 		ret_val = e1000_write_smbus_addr(hw);
1862 		if (ret_val)
1863 			goto release;
1864 
1865 		data = E1000_READ_REG(hw, E1000_LEDCTL);
1866 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1867 							(u16)data);
1868 		if (ret_val)
1869 			goto release;
1870 	}
1871 
1872 	/* Configure LCD from extended configuration region. */
1873 
1874 	/* cnf_base_addr is in DWORD */
1875 	word_addr = (u16)(cnf_base_addr << 1);
1876 
1877 	for (i = 0; i < cnf_size; i++) {
1878 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1879 					   &reg_data);
1880 		if (ret_val)
1881 			goto release;
1882 
1883 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1884 					   1, &reg_addr);
1885 		if (ret_val)
1886 			goto release;
1887 
1888 		/* Save off the PHY page for future writes. */
1889 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1890 			phy_page = reg_data;
1891 			continue;
1892 		}
1893 
1894 		reg_addr &= PHY_REG_MASK;
1895 		reg_addr |= phy_page;
1896 
1897 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1898 						    reg_data);
1899 		if (ret_val)
1900 			goto release;
1901 	}
1902 
1903 release:
1904 	hw->phy.ops.release(hw);
1905 	return ret_val;
1906 }
1907 
1908 /**
1909  *  e1000_k1_gig_workaround_hv - K1 Si workaround
1910  *  @hw:   pointer to the HW structure
1911  *  @link: link up bool flag
1912  *
1913  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1914  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
1915  *  If link is down, the function will restore the default K1 setting located
1916  *  in the NVM.
1917  **/
1918 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1919 {
1920 	s32 ret_val = E1000_SUCCESS;
1921 	u16 status_reg = 0;
1922 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1923 
1924 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
1925 
1926 	if (hw->mac.type != e1000_pchlan)
1927 		return E1000_SUCCESS;
1928 
1929 	/* Wrap the whole flow with the sw flag */
1930 	ret_val = hw->phy.ops.acquire(hw);
1931 	if (ret_val)
1932 		return ret_val;
1933 
1934 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1935 	if (link) {
1936 		if (hw->phy.type == e1000_phy_82578) {
1937 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1938 							      &status_reg);
1939 			if (ret_val)
1940 				goto release;
1941 
1942 			status_reg &= (BM_CS_STATUS_LINK_UP |
1943 				       BM_CS_STATUS_RESOLVED |
1944 				       BM_CS_STATUS_SPEED_MASK);
1945 
1946 			if (status_reg == (BM_CS_STATUS_LINK_UP |
1947 					   BM_CS_STATUS_RESOLVED |
1948 					   BM_CS_STATUS_SPEED_1000))
1949 				k1_enable = FALSE;
1950 		}
1951 
1952 		if (hw->phy.type == e1000_phy_82577) {
1953 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1954 							      &status_reg);
1955 			if (ret_val)
1956 				goto release;
1957 
1958 			status_reg &= (HV_M_STATUS_LINK_UP |
1959 				       HV_M_STATUS_AUTONEG_COMPLETE |
1960 				       HV_M_STATUS_SPEED_MASK);
1961 
1962 			if (status_reg == (HV_M_STATUS_LINK_UP |
1963 					   HV_M_STATUS_AUTONEG_COMPLETE |
1964 					   HV_M_STATUS_SPEED_1000))
1965 				k1_enable = FALSE;
1966 		}
1967 
1968 		/* Link stall fix for link up */
1969 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1970 						       0x0100);
1971 		if (ret_val)
1972 			goto release;
1973 
1974 	} else {
1975 		/* Link stall fix for link down */
1976 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1977 						       0x4100);
1978 		if (ret_val)
1979 			goto release;
1980 	}
1981 
1982 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1983 
1984 release:
1985 	hw->phy.ops.release(hw);
1986 
1987 	return ret_val;
1988 }
1989 
1990 /**
1991  *  e1000_configure_k1_ich8lan - Configure K1 power state
1992  *  @hw: pointer to the HW structure
1993  *  @enable: K1 state to configure
1994  *
1995  *  Configure the K1 power state based on the provided parameter.
1996  *  Assumes semaphore already acquired.
1997  *
1998  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1999  **/
2000 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2001 {
2002 	s32 ret_val;
2003 	u32 ctrl_reg = 0;
2004 	u32 ctrl_ext = 0;
2005 	u32 reg = 0;
2006 	u16 kmrn_reg = 0;
2007 
2008 	DEBUGFUNC("e1000_configure_k1_ich8lan");
2009 
2010 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2011 					     &kmrn_reg);
2012 	if (ret_val)
2013 		return ret_val;
2014 
2015 	if (k1_enable)
2016 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2017 	else
2018 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2019 
2020 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2021 					      kmrn_reg);
2022 	if (ret_val)
2023 		return ret_val;
2024 
2025 	usec_delay(20);
2026 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2027 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2028 
2029 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2030 	reg |= E1000_CTRL_FRCSPD;
2031 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2032 
2033 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2034 	E1000_WRITE_FLUSH(hw);
2035 	usec_delay(20);
2036 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2037 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2038 	E1000_WRITE_FLUSH(hw);
2039 	usec_delay(20);
2040 
2041 	return E1000_SUCCESS;
2042 }
2043 
2044 /**
2045  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2046  *  @hw:       pointer to the HW structure
2047  *  @d0_state: boolean if entering d0 or d3 device state
2048  *
2049  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2050  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2051  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2052  **/
2053 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2054 {
2055 	s32 ret_val = 0;
2056 	u32 mac_reg;
2057 	u16 oem_reg;
2058 
2059 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2060 
2061 	if (hw->mac.type < e1000_pchlan)
2062 		return ret_val;
2063 
2064 	ret_val = hw->phy.ops.acquire(hw);
2065 	if (ret_val)
2066 		return ret_val;
2067 
2068 	if (hw->mac.type == e1000_pchlan) {
2069 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2070 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2071 			goto release;
2072 	}
2073 
2074 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2075 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2076 		goto release;
2077 
2078 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2079 
2080 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2081 	if (ret_val)
2082 		goto release;
2083 
2084 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2085 
2086 	if (d0_state) {
2087 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2088 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2089 
2090 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2091 			oem_reg |= HV_OEM_BITS_LPLU;
2092 	} else {
2093 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2094 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2095 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2096 
2097 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2098 		    E1000_PHY_CTRL_NOND0A_LPLU))
2099 			oem_reg |= HV_OEM_BITS_LPLU;
2100 	}
2101 
2102 	/* Set Restart auto-neg to activate the bits */
2103 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2104 	    !hw->phy.ops.check_reset_block(hw))
2105 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2106 
2107 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2108 
2109 release:
2110 	hw->phy.ops.release(hw);
2111 
2112 	return ret_val;
2113 }
2114 
2115 
2116 /**
2117  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2118  *  @hw:   pointer to the HW structure
2119  **/
2120 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2121 {
2122 	s32 ret_val;
2123 	u16 data;
2124 
2125 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2126 
2127 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2128 	if (ret_val)
2129 		return ret_val;
2130 
2131 	data |= HV_KMRN_MDIO_SLOW;
2132 
2133 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2134 
2135 	return ret_val;
2136 }
2137 
2138 /**
2139  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2140  *  done after every PHY reset.
2141  **/
2142 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2143 {
2144 	s32 ret_val = E1000_SUCCESS;
2145 	u16 phy_data;
2146 
2147 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2148 
2149 	if (hw->mac.type != e1000_pchlan)
2150 		return E1000_SUCCESS;
2151 
2152 	/* Set MDIO slow mode before any other MDIO access */
2153 	if (hw->phy.type == e1000_phy_82577) {
2154 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2155 		if (ret_val)
2156 			return ret_val;
2157 	}
2158 
2159 	if (((hw->phy.type == e1000_phy_82577) &&
2160 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2161 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2162 		/* Disable generation of early preamble */
2163 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2164 		if (ret_val)
2165 			return ret_val;
2166 
2167 		/* Preamble tuning for SSC */
2168 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2169 						0xA204);
2170 		if (ret_val)
2171 			return ret_val;
2172 	}
2173 
2174 	if (hw->phy.type == e1000_phy_82578) {
2175 		/* Return registers to default by doing a soft reset then
2176 		 * writing 0x3140 to the control register.
2177 		 */
2178 		if (hw->phy.revision < 2) {
2179 			e1000_phy_sw_reset_generic(hw);
2180 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2181 							0x3140);
2182 		}
2183 	}
2184 
2185 	/* Select page 0 */
2186 	ret_val = hw->phy.ops.acquire(hw);
2187 	if (ret_val)
2188 		return ret_val;
2189 
2190 	hw->phy.addr = 1;
2191 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2192 	hw->phy.ops.release(hw);
2193 	if (ret_val)
2194 		return ret_val;
2195 
2196 	/* Configure the K1 Si workaround during phy reset assuming there is
2197 	 * link so that it disables K1 if link is in 1Gbps.
2198 	 */
2199 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2200 	if (ret_val)
2201 		return ret_val;
2202 
2203 	/* Workaround for link disconnects on a busy hub in half duplex */
2204 	ret_val = hw->phy.ops.acquire(hw);
2205 	if (ret_val)
2206 		return ret_val;
2207 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2208 	if (ret_val)
2209 		goto release;
2210 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2211 					       phy_data & 0x00FF);
2212 	if (ret_val)
2213 		goto release;
2214 
2215 	/* set MSE higher to enable link to stay up when noise is high */
2216 	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2217 release:
2218 	hw->phy.ops.release(hw);
2219 
2220 	return ret_val;
2221 }
2222 
2223 /**
2224  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2225  *  @hw:   pointer to the HW structure
2226  **/
2227 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2228 {
2229 	u32 mac_reg;
2230 	u16 i, phy_reg = 0;
2231 	s32 ret_val;
2232 
2233 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2234 
2235 	ret_val = hw->phy.ops.acquire(hw);
2236 	if (ret_val)
2237 		return;
2238 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2239 	if (ret_val)
2240 		goto release;
2241 
2242 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2243 	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2244 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2245 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2246 					   (u16)(mac_reg & 0xFFFF));
2247 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2248 					   (u16)((mac_reg >> 16) & 0xFFFF));
2249 
2250 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2251 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2252 					   (u16)(mac_reg & 0xFFFF));
2253 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2254 					   (u16)((mac_reg & E1000_RAH_AV)
2255 						 >> 16));
2256 	}
2257 
2258 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2259 
2260 release:
2261 	hw->phy.ops.release(hw);
2262 }
2263 
2264 static u32 e1000_calc_rx_da_crc(u8 mac[])
2265 {
2266 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2267 	u32 i, j, mask, crc;
2268 
2269 	DEBUGFUNC("e1000_calc_rx_da_crc");
2270 
2271 	crc = 0xffffffff;
2272 	for (i = 0; i < 6; i++) {
2273 		crc = crc ^ mac[i];
2274 		for (j = 8; j > 0; j--) {
2275 			mask = (crc & 1) * (-1);
2276 			crc = (crc >> 1) ^ (poly & mask);
2277 		}
2278 	}
2279 	return ~crc;
2280 }
2281 
2282 /**
2283  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2284  *  with 82579 PHY
2285  *  @hw: pointer to the HW structure
2286  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2287  **/
2288 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2289 {
2290 	s32 ret_val = E1000_SUCCESS;
2291 	u16 phy_reg, data;
2292 	u32 mac_reg;
2293 	u16 i;
2294 
2295 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2296 
2297 	if (hw->mac.type < e1000_pch2lan)
2298 		return E1000_SUCCESS;
2299 
2300 	/* disable Rx path while enabling/disabling workaround */
2301 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2302 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2303 					phy_reg | (1 << 14));
2304 	if (ret_val)
2305 		return ret_val;
2306 
2307 	if (enable) {
2308 		/* Write Rx addresses (rar_entry_count for RAL/H, and
2309 		 * SHRAL/H) and initial CRC values to the MAC
2310 		 */
2311 		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2312 			u8 mac_addr[ETH_ADDR_LEN] = {0};
2313 			u32 addr_high, addr_low;
2314 
2315 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2316 			if (!(addr_high & E1000_RAH_AV))
2317 				continue;
2318 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2319 			mac_addr[0] = (addr_low & 0xFF);
2320 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2321 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2322 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2323 			mac_addr[4] = (addr_high & 0xFF);
2324 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2325 
2326 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2327 					e1000_calc_rx_da_crc(mac_addr));
2328 		}
2329 
2330 		/* Write Rx addresses to the PHY */
2331 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2332 
2333 		/* Enable jumbo frame workaround in the MAC */
2334 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2335 		mac_reg &= ~(1 << 14);
2336 		mac_reg |= (7 << 15);
2337 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2338 
2339 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2340 		mac_reg |= E1000_RCTL_SECRC;
2341 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2342 
2343 		ret_val = e1000_read_kmrn_reg_generic(hw,
2344 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2345 						&data);
2346 		if (ret_val)
2347 			return ret_val;
2348 		ret_val = e1000_write_kmrn_reg_generic(hw,
2349 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2350 						data | (1 << 0));
2351 		if (ret_val)
2352 			return ret_val;
2353 		ret_val = e1000_read_kmrn_reg_generic(hw,
2354 						E1000_KMRNCTRLSTA_HD_CTRL,
2355 						&data);
2356 		if (ret_val)
2357 			return ret_val;
2358 		data &= ~(0xF << 8);
2359 		data |= (0xB << 8);
2360 		ret_val = e1000_write_kmrn_reg_generic(hw,
2361 						E1000_KMRNCTRLSTA_HD_CTRL,
2362 						data);
2363 		if (ret_val)
2364 			return ret_val;
2365 
2366 		/* Enable jumbo frame workaround in the PHY */
2367 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2368 		data &= ~(0x7F << 5);
2369 		data |= (0x37 << 5);
2370 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2371 		if (ret_val)
2372 			return ret_val;
2373 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2374 		data &= ~(1 << 13);
2375 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2376 		if (ret_val)
2377 			return ret_val;
2378 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2379 		data &= ~(0x3FF << 2);
2380 		data |= (0x1A << 2);
2381 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2382 		if (ret_val)
2383 			return ret_val;
2384 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2385 		if (ret_val)
2386 			return ret_val;
2387 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2388 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2389 						(1 << 10));
2390 		if (ret_val)
2391 			return ret_val;
2392 	} else {
2393 		/* Write MAC register values back to h/w defaults */
2394 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2395 		mac_reg &= ~(0xF << 14);
2396 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2397 
2398 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2399 		mac_reg &= ~E1000_RCTL_SECRC;
2400 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2401 
2402 		ret_val = e1000_read_kmrn_reg_generic(hw,
2403 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2404 						&data);
2405 		if (ret_val)
2406 			return ret_val;
2407 		ret_val = e1000_write_kmrn_reg_generic(hw,
2408 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2409 						data & ~(1 << 0));
2410 		if (ret_val)
2411 			return ret_val;
2412 		ret_val = e1000_read_kmrn_reg_generic(hw,
2413 						E1000_KMRNCTRLSTA_HD_CTRL,
2414 						&data);
2415 		if (ret_val)
2416 			return ret_val;
2417 		data &= ~(0xF << 8);
2418 		data |= (0xB << 8);
2419 		ret_val = e1000_write_kmrn_reg_generic(hw,
2420 						E1000_KMRNCTRLSTA_HD_CTRL,
2421 						data);
2422 		if (ret_val)
2423 			return ret_val;
2424 
2425 		/* Write PHY register values back to h/w defaults */
2426 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2427 		data &= ~(0x7F << 5);
2428 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2429 		if (ret_val)
2430 			return ret_val;
2431 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2432 		data |= (1 << 13);
2433 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2434 		if (ret_val)
2435 			return ret_val;
2436 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2437 		data &= ~(0x3FF << 2);
2438 		data |= (0x8 << 2);
2439 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2440 		if (ret_val)
2441 			return ret_val;
2442 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2443 		if (ret_val)
2444 			return ret_val;
2445 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2446 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2447 						~(1 << 10));
2448 		if (ret_val)
2449 			return ret_val;
2450 	}
2451 
2452 	/* re-enable Rx path after enabling/disabling workaround */
2453 	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2454 				     ~(1 << 14));
2455 }
2456 
2457 /**
2458  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2459  *  done after every PHY reset.
2460  **/
2461 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2462 {
2463 	s32 ret_val = E1000_SUCCESS;
2464 
2465 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2466 
2467 	if (hw->mac.type != e1000_pch2lan)
2468 		return E1000_SUCCESS;
2469 
2470 	/* Set MDIO slow mode before any other MDIO access */
2471 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2472 	if (ret_val)
2473 		return ret_val;
2474 
2475 	ret_val = hw->phy.ops.acquire(hw);
2476 	if (ret_val)
2477 		return ret_val;
2478 	/* set MSE higher to enable link to stay up when noise is high */
2479 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2480 	if (ret_val)
2481 		goto release;
2482 	/* drop link after 5 times MSE threshold was reached */
2483 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2484 release:
2485 	hw->phy.ops.release(hw);
2486 
2487 	return ret_val;
2488 }
2489 
2490 /**
2491  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2492  *  @hw:   pointer to the HW structure
2493  *
2494  *  Workaround to set the K1 beacon duration for 82579 parts
2495  **/
2496 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2497 {
2498 	s32 ret_val = E1000_SUCCESS;
2499 	u16 status_reg = 0;
2500 	u32 mac_reg;
2501 	u16 phy_reg;
2502 
2503 	DEBUGFUNC("e1000_k1_workaround_lv");
2504 
2505 	if (hw->mac.type != e1000_pch2lan)
2506 		return E1000_SUCCESS;
2507 
2508 	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
2509 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2510 	if (ret_val)
2511 		return ret_val;
2512 
2513 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2514 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2515 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2516 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2517 
2518 		ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
2519 		if (ret_val)
2520 			return ret_val;
2521 
2522 		if (status_reg & HV_M_STATUS_SPEED_1000) {
2523 			u16 pm_phy_reg;
2524 
2525 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
2526 			phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2527 			/* LV 1G Packet drop issue wa  */
2528 			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2529 						       &pm_phy_reg);
2530 			if (ret_val)
2531 				return ret_val;
2532 			pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2533 			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2534 							pm_phy_reg);
2535 			if (ret_val)
2536 				return ret_val;
2537 		} else {
2538 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2539 			phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2540 		}
2541 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2542 		ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
2543 	}
2544 
2545 	return ret_val;
2546 }
2547 
2548 /**
2549  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2550  *  @hw:   pointer to the HW structure
2551  *  @gate: boolean set to TRUE to gate, FALSE to ungate
2552  *
2553  *  Gate/ungate the automatic PHY configuration via hardware; perform
2554  *  the configuration via software instead.
2555  **/
2556 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2557 {
2558 	u32 extcnf_ctrl;
2559 
2560 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2561 
2562 	if (hw->mac.type < e1000_pch2lan)
2563 		return;
2564 
2565 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2566 
2567 	if (gate)
2568 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2569 	else
2570 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2571 
2572 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2573 }
2574 
2575 /**
2576  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2577  *  @hw: pointer to the HW structure
2578  *
2579  *  Check the appropriate indication the MAC has finished configuring the
2580  *  PHY after a software reset.
2581  **/
2582 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2583 {
2584 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2585 
2586 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
2587 
2588 	/* Wait for basic configuration completes before proceeding */
2589 	do {
2590 		data = E1000_READ_REG(hw, E1000_STATUS);
2591 		data &= E1000_STATUS_LAN_INIT_DONE;
2592 		usec_delay(100);
2593 	} while ((!data) && --loop);
2594 
2595 	/* If basic configuration is incomplete before the above loop
2596 	 * count reaches 0, loading the configuration from NVM will
2597 	 * leave the PHY in a bad state possibly resulting in no link.
2598 	 */
2599 	if (loop == 0)
2600 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2601 
2602 	/* Clear the Init Done bit for the next init event */
2603 	data = E1000_READ_REG(hw, E1000_STATUS);
2604 	data &= ~E1000_STATUS_LAN_INIT_DONE;
2605 	E1000_WRITE_REG(hw, E1000_STATUS, data);
2606 }
2607 
2608 /**
2609  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2610  *  @hw: pointer to the HW structure
2611  **/
2612 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2613 {
2614 	s32 ret_val = E1000_SUCCESS;
2615 	u16 reg;
2616 
2617 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2618 
2619 	if (hw->phy.ops.check_reset_block(hw))
2620 		return E1000_SUCCESS;
2621 
2622 	/* Allow time for h/w to get to quiescent state after reset */
2623 	msec_delay(10);
2624 
2625 	/* Perform any necessary post-reset workarounds */
2626 	switch (hw->mac.type) {
2627 	case e1000_pchlan:
2628 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2629 		if (ret_val)
2630 			return ret_val;
2631 		break;
2632 	case e1000_pch2lan:
2633 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2634 		if (ret_val)
2635 			return ret_val;
2636 		break;
2637 	default:
2638 		break;
2639 	}
2640 
2641 	/* Clear the host wakeup bit after lcd reset */
2642 	if (hw->mac.type >= e1000_pchlan) {
2643 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2644 		reg &= ~BM_WUC_HOST_WU_BIT;
2645 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2646 	}
2647 
2648 	/* Configure the LCD with the extended configuration region in NVM */
2649 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
2650 	if (ret_val)
2651 		return ret_val;
2652 
2653 	/* Configure the LCD with the OEM bits in NVM */
2654 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
2655 
2656 	if (hw->mac.type == e1000_pch2lan) {
2657 		/* Ungate automatic PHY configuration on non-managed 82579 */
2658 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
2659 		    E1000_ICH_FWSM_FW_VALID)) {
2660 			msec_delay(10);
2661 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
2662 		}
2663 
2664 		/* Set EEE LPI Update Timer to 200usec */
2665 		ret_val = hw->phy.ops.acquire(hw);
2666 		if (ret_val)
2667 			return ret_val;
2668 		ret_val = e1000_write_emi_reg_locked(hw,
2669 						     I82579_LPI_UPDATE_TIMER,
2670 						     0x1387);
2671 		hw->phy.ops.release(hw);
2672 	}
2673 
2674 	return ret_val;
2675 }
2676 
2677 /**
2678  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2679  *  @hw: pointer to the HW structure
2680  *
2681  *  Resets the PHY
2682  *  This is a function pointer entry point called by drivers
2683  *  or other shared routines.
2684  **/
2685 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2686 {
2687 	s32 ret_val = E1000_SUCCESS;
2688 
2689 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2690 
2691 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
2692 	if ((hw->mac.type == e1000_pch2lan) &&
2693 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2694 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
2695 
2696 	ret_val = e1000_phy_hw_reset_generic(hw);
2697 	if (ret_val)
2698 		return ret_val;
2699 
2700 	return e1000_post_phy_reset_ich8lan(hw);
2701 }
2702 
2703 /**
2704  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2705  *  @hw: pointer to the HW structure
2706  *  @active: TRUE to enable LPLU, FALSE to disable
2707  *
2708  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2709  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2710  *  the phy speed. This function will manually set the LPLU bit and restart
2711  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2712  *  since it configures the same bit.
2713  **/
2714 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2715 {
2716 	s32 ret_val;
2717 	u16 oem_reg;
2718 
2719 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
2720 
2721 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2722 	if (ret_val)
2723 		return ret_val;
2724 
2725 	if (active)
2726 		oem_reg |= HV_OEM_BITS_LPLU;
2727 	else
2728 		oem_reg &= ~HV_OEM_BITS_LPLU;
2729 
2730 	if (!hw->phy.ops.check_reset_block(hw))
2731 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2732 
2733 	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2734 }
2735 
2736 /**
2737  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2738  *  @hw: pointer to the HW structure
2739  *  @active: TRUE to enable LPLU, FALSE to disable
2740  *
2741  *  Sets the LPLU D0 state according to the active flag.  When
2742  *  activating LPLU this function also disables smart speed
2743  *  and vice versa.  LPLU will not be activated unless the
2744  *  device autonegotiation advertisement meets standards of
2745  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2746  *  This is a function pointer entry point only called by
2747  *  PHY setup routines.
2748  **/
2749 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2750 {
2751 	struct e1000_phy_info *phy = &hw->phy;
2752 	u32 phy_ctrl;
2753 	s32 ret_val = E1000_SUCCESS;
2754 	u16 data;
2755 
2756 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2757 
2758 	if (phy->type == e1000_phy_ife)
2759 		return E1000_SUCCESS;
2760 
2761 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2762 
2763 	if (active) {
2764 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2765 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2766 
2767 		if (phy->type != e1000_phy_igp_3)
2768 			return E1000_SUCCESS;
2769 
2770 		/* Call gig speed drop workaround on LPLU before accessing
2771 		 * any PHY registers
2772 		 */
2773 		if (hw->mac.type == e1000_ich8lan)
2774 			e1000_gig_downshift_workaround_ich8lan(hw);
2775 
2776 		/* When LPLU is enabled, we should disable SmartSpeed */
2777 		ret_val = phy->ops.read_reg(hw,
2778 					    IGP01E1000_PHY_PORT_CONFIG,
2779 					    &data);
2780 		if (ret_val)
2781 			return ret_val;
2782 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2783 		ret_val = phy->ops.write_reg(hw,
2784 					     IGP01E1000_PHY_PORT_CONFIG,
2785 					     data);
2786 		if (ret_val)
2787 			return ret_val;
2788 	} else {
2789 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2790 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2791 
2792 		if (phy->type != e1000_phy_igp_3)
2793 			return E1000_SUCCESS;
2794 
2795 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2796 		 * during Dx states where the power conservation is most
2797 		 * important.  During driver activity we should enable
2798 		 * SmartSpeed, so performance is maintained.
2799 		 */
2800 		if (phy->smart_speed == e1000_smart_speed_on) {
2801 			ret_val = phy->ops.read_reg(hw,
2802 						    IGP01E1000_PHY_PORT_CONFIG,
2803 						    &data);
2804 			if (ret_val)
2805 				return ret_val;
2806 
2807 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2808 			ret_val = phy->ops.write_reg(hw,
2809 						     IGP01E1000_PHY_PORT_CONFIG,
2810 						     data);
2811 			if (ret_val)
2812 				return ret_val;
2813 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2814 			ret_val = phy->ops.read_reg(hw,
2815 						    IGP01E1000_PHY_PORT_CONFIG,
2816 						    &data);
2817 			if (ret_val)
2818 				return ret_val;
2819 
2820 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2821 			ret_val = phy->ops.write_reg(hw,
2822 						     IGP01E1000_PHY_PORT_CONFIG,
2823 						     data);
2824 			if (ret_val)
2825 				return ret_val;
2826 		}
2827 	}
2828 
2829 	return E1000_SUCCESS;
2830 }
2831 
2832 /**
2833  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2834  *  @hw: pointer to the HW structure
2835  *  @active: TRUE to enable LPLU, FALSE to disable
2836  *
2837  *  Sets the LPLU D3 state according to the active flag.  When
2838  *  activating LPLU this function also disables smart speed
2839  *  and vice versa.  LPLU will not be activated unless the
2840  *  device autonegotiation advertisement meets standards of
2841  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2842  *  This is a function pointer entry point only called by
2843  *  PHY setup routines.
2844  **/
2845 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2846 {
2847 	struct e1000_phy_info *phy = &hw->phy;
2848 	u32 phy_ctrl;
2849 	s32 ret_val = E1000_SUCCESS;
2850 	u16 data;
2851 
2852 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2853 
2854 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2855 
2856 	if (!active) {
2857 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2858 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2859 
2860 		if (phy->type != e1000_phy_igp_3)
2861 			return E1000_SUCCESS;
2862 
2863 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2864 		 * during Dx states where the power conservation is most
2865 		 * important.  During driver activity we should enable
2866 		 * SmartSpeed, so performance is maintained.
2867 		 */
2868 		if (phy->smart_speed == e1000_smart_speed_on) {
2869 			ret_val = phy->ops.read_reg(hw,
2870 						    IGP01E1000_PHY_PORT_CONFIG,
2871 						    &data);
2872 			if (ret_val)
2873 				return ret_val;
2874 
2875 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2876 			ret_val = phy->ops.write_reg(hw,
2877 						     IGP01E1000_PHY_PORT_CONFIG,
2878 						     data);
2879 			if (ret_val)
2880 				return ret_val;
2881 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2882 			ret_val = phy->ops.read_reg(hw,
2883 						    IGP01E1000_PHY_PORT_CONFIG,
2884 						    &data);
2885 			if (ret_val)
2886 				return ret_val;
2887 
2888 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2889 			ret_val = phy->ops.write_reg(hw,
2890 						     IGP01E1000_PHY_PORT_CONFIG,
2891 						     data);
2892 			if (ret_val)
2893 				return ret_val;
2894 		}
2895 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2896 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2897 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2898 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2899 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2900 
2901 		if (phy->type != e1000_phy_igp_3)
2902 			return E1000_SUCCESS;
2903 
2904 		/* Call gig speed drop workaround on LPLU before accessing
2905 		 * any PHY registers
2906 		 */
2907 		if (hw->mac.type == e1000_ich8lan)
2908 			e1000_gig_downshift_workaround_ich8lan(hw);
2909 
2910 		/* When LPLU is enabled, we should disable SmartSpeed */
2911 		ret_val = phy->ops.read_reg(hw,
2912 					    IGP01E1000_PHY_PORT_CONFIG,
2913 					    &data);
2914 		if (ret_val)
2915 			return ret_val;
2916 
2917 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2918 		ret_val = phy->ops.write_reg(hw,
2919 					     IGP01E1000_PHY_PORT_CONFIG,
2920 					     data);
2921 	}
2922 
2923 	return ret_val;
2924 }
2925 
2926 /**
2927  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2928  *  @hw: pointer to the HW structure
2929  *  @bank:  pointer to the variable that returns the active bank
2930  *
2931  *  Reads signature byte from the NVM using the flash access registers.
2932  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2933  **/
2934 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2935 {
2936 	u32 eecd;
2937 	struct e1000_nvm_info *nvm = &hw->nvm;
2938 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2939 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2940 	u8 sig_byte = 0;
2941 	s32 ret_val;
2942 
2943 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2944 
2945 	switch (hw->mac.type) {
2946 	case e1000_ich8lan:
2947 	case e1000_ich9lan:
2948 		eecd = E1000_READ_REG(hw, E1000_EECD);
2949 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2950 		    E1000_EECD_SEC1VAL_VALID_MASK) {
2951 			if (eecd & E1000_EECD_SEC1VAL)
2952 				*bank = 1;
2953 			else
2954 				*bank = 0;
2955 
2956 			return E1000_SUCCESS;
2957 		}
2958 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2959 		/* fall-thru */
2960 	default:
2961 		/* set bank to 0 in case flash read fails */
2962 		*bank = 0;
2963 
2964 		/* Check bank 0 */
2965 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2966 							&sig_byte);
2967 		if (ret_val)
2968 			return ret_val;
2969 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2970 		    E1000_ICH_NVM_SIG_VALUE) {
2971 			*bank = 0;
2972 			return E1000_SUCCESS;
2973 		}
2974 
2975 		/* Check bank 1 */
2976 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2977 							bank1_offset,
2978 							&sig_byte);
2979 		if (ret_val)
2980 			return ret_val;
2981 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2982 		    E1000_ICH_NVM_SIG_VALUE) {
2983 			*bank = 1;
2984 			return E1000_SUCCESS;
2985 		}
2986 
2987 		DEBUGOUT("ERROR: No valid NVM bank present\n");
2988 		return -E1000_ERR_NVM;
2989 	}
2990 }
2991 
2992 /**
2993  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
2994  *  @hw: pointer to the HW structure
2995  *  @offset: The offset (in bytes) of the word(s) to read.
2996  *  @words: Size of data to read in words
2997  *  @data: Pointer to the word(s) to read at offset.
2998  *
2999  *  Reads a word(s) from the NVM using the flash access registers.
3000  **/
3001 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3002 				  u16 *data)
3003 {
3004 	struct e1000_nvm_info *nvm = &hw->nvm;
3005 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3006 	u32 act_offset;
3007 	s32 ret_val = E1000_SUCCESS;
3008 	u32 bank = 0;
3009 	u16 i, word;
3010 
3011 	DEBUGFUNC("e1000_read_nvm_ich8lan");
3012 
3013 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3014 	    (words == 0)) {
3015 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3016 		ret_val = -E1000_ERR_NVM;
3017 		goto out;
3018 	}
3019 
3020 	nvm->ops.acquire(hw);
3021 
3022 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3023 	if (ret_val != E1000_SUCCESS) {
3024 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3025 		bank = 0;
3026 	}
3027 
3028 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3029 	act_offset += offset;
3030 
3031 	ret_val = E1000_SUCCESS;
3032 	for (i = 0; i < words; i++) {
3033 		if (dev_spec->shadow_ram[offset+i].modified) {
3034 			data[i] = dev_spec->shadow_ram[offset+i].value;
3035 		} else {
3036 			ret_val = e1000_read_flash_word_ich8lan(hw,
3037 								act_offset + i,
3038 								&word);
3039 			if (ret_val)
3040 				break;
3041 			data[i] = word;
3042 		}
3043 	}
3044 
3045 	nvm->ops.release(hw);
3046 
3047 out:
3048 	if (ret_val)
3049 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3050 
3051 	return ret_val;
3052 }
3053 
3054 /**
3055  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3056  *  @hw: pointer to the HW structure
3057  *
3058  *  This function does initial flash setup so that a new read/write/erase cycle
3059  *  can be started.
3060  **/
3061 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3062 {
3063 	union ich8_hws_flash_status hsfsts;
3064 	s32 ret_val = -E1000_ERR_NVM;
3065 
3066 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3067 
3068 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3069 
3070 	/* Check if the flash descriptor is valid */
3071 	if (!hsfsts.hsf_status.fldesvalid) {
3072 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3073 		return -E1000_ERR_NVM;
3074 	}
3075 
3076 	/* Clear FCERR and DAEL in hw status by writing 1 */
3077 	hsfsts.hsf_status.flcerr = 1;
3078 	hsfsts.hsf_status.dael = 1;
3079 
3080 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3081 
3082 	/* Either we should have a hardware SPI cycle in progress
3083 	 * bit to check against, in order to start a new cycle or
3084 	 * FDONE bit should be changed in the hardware so that it
3085 	 * is 1 after hardware reset, which can then be used as an
3086 	 * indication whether a cycle is in progress or has been
3087 	 * completed.
3088 	 */
3089 
3090 	if (!hsfsts.hsf_status.flcinprog) {
3091 		/* There is no cycle running at present,
3092 		 * so we can start a cycle.
3093 		 * Begin by setting Flash Cycle Done.
3094 		 */
3095 		hsfsts.hsf_status.flcdone = 1;
3096 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3097 		ret_val = E1000_SUCCESS;
3098 	} else {
3099 		s32 i;
3100 
3101 		/* Otherwise poll for sometime so the current
3102 		 * cycle has a chance to end before giving up.
3103 		 */
3104 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3105 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3106 							      ICH_FLASH_HSFSTS);
3107 			if (!hsfsts.hsf_status.flcinprog) {
3108 				ret_val = E1000_SUCCESS;
3109 				break;
3110 			}
3111 			usec_delay(1);
3112 		}
3113 		if (ret_val == E1000_SUCCESS) {
3114 			/* Successful in waiting for previous cycle to timeout,
3115 			 * now set the Flash Cycle Done.
3116 			 */
3117 			hsfsts.hsf_status.flcdone = 1;
3118 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3119 						hsfsts.regval);
3120 		} else {
3121 			DEBUGOUT("Flash controller busy, cannot get access\n");
3122 		}
3123 	}
3124 
3125 	return ret_val;
3126 }
3127 
3128 /**
3129  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3130  *  @hw: pointer to the HW structure
3131  *  @timeout: maximum time to wait for completion
3132  *
3133  *  This function starts a flash cycle and waits for its completion.
3134  **/
3135 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3136 {
3137 	union ich8_hws_flash_ctrl hsflctl;
3138 	union ich8_hws_flash_status hsfsts;
3139 	u32 i = 0;
3140 
3141 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3142 
3143 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3144 	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3145 	hsflctl.hsf_ctrl.flcgo = 1;
3146 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3147 
3148 	/* wait till FDONE bit is set to 1 */
3149 	do {
3150 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3151 		if (hsfsts.hsf_status.flcdone)
3152 			break;
3153 		usec_delay(1);
3154 	} while (i++ < timeout);
3155 
3156 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3157 		return E1000_SUCCESS;
3158 
3159 	return -E1000_ERR_NVM;
3160 }
3161 
3162 /**
3163  *  e1000_read_flash_word_ich8lan - Read word from flash
3164  *  @hw: pointer to the HW structure
3165  *  @offset: offset to data location
3166  *  @data: pointer to the location for storing the data
3167  *
3168  *  Reads the flash word at offset into data.  Offset is converted
3169  *  to bytes before read.
3170  **/
3171 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3172 					 u16 *data)
3173 {
3174 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3175 
3176 	if (!data)
3177 		return -E1000_ERR_NVM;
3178 
3179 	/* Must convert offset into bytes. */
3180 	offset <<= 1;
3181 
3182 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3183 }
3184 
3185 /**
3186  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3187  *  @hw: pointer to the HW structure
3188  *  @offset: The offset of the byte to read.
3189  *  @data: Pointer to a byte to store the value read.
3190  *
3191  *  Reads a single byte from the NVM using the flash access registers.
3192  **/
3193 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3194 					 u8 *data)
3195 {
3196 	s32 ret_val;
3197 	u16 word = 0;
3198 
3199 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3200 	if (ret_val)
3201 		return ret_val;
3202 
3203 	*data = (u8)word;
3204 
3205 	return E1000_SUCCESS;
3206 }
3207 
3208 /**
3209  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3210  *  @hw: pointer to the HW structure
3211  *  @offset: The offset (in bytes) of the byte or word to read.
3212  *  @size: Size of data to read, 1=byte 2=word
3213  *  @data: Pointer to the word to store the value read.
3214  *
3215  *  Reads a byte or word from the NVM using the flash access registers.
3216  **/
3217 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3218 					 u8 size, u16 *data)
3219 {
3220 	union ich8_hws_flash_status hsfsts;
3221 	union ich8_hws_flash_ctrl hsflctl;
3222 	u32 flash_linear_addr;
3223 	u32 flash_data = 0;
3224 	s32 ret_val = -E1000_ERR_NVM;
3225 	u8 count = 0;
3226 
3227 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3228 
3229 	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3230 		return -E1000_ERR_NVM;
3231 
3232 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3233 			     hw->nvm.flash_base_addr);
3234 
3235 	do {
3236 		usec_delay(1);
3237 		/* Steps */
3238 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3239 		if (ret_val != E1000_SUCCESS)
3240 			break;
3241 
3242 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3243 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3244 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3245 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3246 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3247 
3248 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3249 
3250 		ret_val =
3251 		    e1000_flash_cycle_ich8lan(hw,
3252 					      ICH_FLASH_READ_COMMAND_TIMEOUT);
3253 
3254 		/* Check if FCERR is set to 1, if set to 1, clear it
3255 		 * and try the whole sequence a few more times, else
3256 		 * read in (shift in) the Flash Data0, the order is
3257 		 * least significant byte first msb to lsb
3258 		 */
3259 		if (ret_val == E1000_SUCCESS) {
3260 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3261 			if (size == 1)
3262 				*data = (u8)(flash_data & 0x000000FF);
3263 			else if (size == 2)
3264 				*data = (u16)(flash_data & 0x0000FFFF);
3265 			break;
3266 		} else {
3267 			/* If we've gotten here, then things are probably
3268 			 * completely hosed, but if the error condition is
3269 			 * detected, it won't hurt to give it another try...
3270 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3271 			 */
3272 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3273 							      ICH_FLASH_HSFSTS);
3274 			if (hsfsts.hsf_status.flcerr) {
3275 				/* Repeat for some time before giving up. */
3276 				continue;
3277 			} else if (!hsfsts.hsf_status.flcdone) {
3278 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3279 				break;
3280 			}
3281 		}
3282 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3283 
3284 	return ret_val;
3285 }
3286 
3287 /**
3288  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3289  *  @hw: pointer to the HW structure
3290  *  @offset: The offset (in bytes) of the word(s) to write.
3291  *  @words: Size of data to write in words
3292  *  @data: Pointer to the word(s) to write at offset.
3293  *
3294  *  Writes a byte or word to the NVM using the flash access registers.
3295  **/
3296 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3297 				   u16 *data)
3298 {
3299 	struct e1000_nvm_info *nvm = &hw->nvm;
3300 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3301 	u16 i;
3302 
3303 	DEBUGFUNC("e1000_write_nvm_ich8lan");
3304 
3305 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3306 	    (words == 0)) {
3307 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3308 		return -E1000_ERR_NVM;
3309 	}
3310 
3311 	nvm->ops.acquire(hw);
3312 
3313 	for (i = 0; i < words; i++) {
3314 		dev_spec->shadow_ram[offset+i].modified = TRUE;
3315 		dev_spec->shadow_ram[offset+i].value = data[i];
3316 	}
3317 
3318 	nvm->ops.release(hw);
3319 
3320 	return E1000_SUCCESS;
3321 }
3322 
3323 /**
3324  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3325  *  @hw: pointer to the HW structure
3326  *
3327  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3328  *  which writes the checksum to the shadow ram.  The changes in the shadow
3329  *  ram are then committed to the EEPROM by processing each bank at a time
3330  *  checking for the modified bit and writing only the pending changes.
3331  *  After a successful commit, the shadow ram is cleared and is ready for
3332  *  future writes.
3333  **/
3334 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3335 {
3336 	struct e1000_nvm_info *nvm = &hw->nvm;
3337 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3338 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3339 	s32 ret_val;
3340 	u16 data;
3341 
3342 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3343 
3344 	ret_val = e1000_update_nvm_checksum_generic(hw);
3345 	if (ret_val)
3346 		goto out;
3347 
3348 	if (nvm->type != e1000_nvm_flash_sw)
3349 		goto out;
3350 
3351 	nvm->ops.acquire(hw);
3352 
3353 	/* We're writing to the opposite bank so if we're on bank 1,
3354 	 * write to bank 0 etc.  We also need to erase the segment that
3355 	 * is going to be written
3356 	 */
3357 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3358 	if (ret_val != E1000_SUCCESS) {
3359 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3360 		bank = 0;
3361 	}
3362 
3363 	if (bank == 0) {
3364 		new_bank_offset = nvm->flash_bank_size;
3365 		old_bank_offset = 0;
3366 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3367 		if (ret_val)
3368 			goto release;
3369 	} else {
3370 		old_bank_offset = nvm->flash_bank_size;
3371 		new_bank_offset = 0;
3372 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3373 		if (ret_val)
3374 			goto release;
3375 	}
3376 
3377 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3378 		/* Determine whether to write the value stored
3379 		 * in the other NVM bank or a modified value stored
3380 		 * in the shadow RAM
3381 		 */
3382 		if (dev_spec->shadow_ram[i].modified) {
3383 			data = dev_spec->shadow_ram[i].value;
3384 		} else {
3385 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
3386 								old_bank_offset,
3387 								&data);
3388 			if (ret_val)
3389 				break;
3390 		}
3391 
3392 		/* If the word is 0x13, then make sure the signature bits
3393 		 * (15:14) are 11b until the commit has completed.
3394 		 * This will allow us to write 10b which indicates the
3395 		 * signature is valid.  We want to do this after the write
3396 		 * has completed so that we don't mark the segment valid
3397 		 * while the write is still in progress
3398 		 */
3399 		if (i == E1000_ICH_NVM_SIG_WORD)
3400 			data |= E1000_ICH_NVM_SIG_MASK;
3401 
3402 		/* Convert offset to bytes. */
3403 		act_offset = (i + new_bank_offset) << 1;
3404 
3405 		usec_delay(100);
3406 		/* Write the bytes to the new bank. */
3407 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3408 							       act_offset,
3409 							       (u8)data);
3410 		if (ret_val)
3411 			break;
3412 
3413 		usec_delay(100);
3414 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3415 							  act_offset + 1,
3416 							  (u8)(data >> 8));
3417 		if (ret_val)
3418 			break;
3419 	}
3420 
3421 	/* Don't bother writing the segment valid bits if sector
3422 	 * programming failed.
3423 	 */
3424 	if (ret_val) {
3425 		DEBUGOUT("Flash commit failed.\n");
3426 		goto release;
3427 	}
3428 
3429 	/* Finally validate the new segment by setting bit 15:14
3430 	 * to 10b in word 0x13 , this can be done without an
3431 	 * erase as well since these bits are 11 to start with
3432 	 * and we need to change bit 14 to 0b
3433 	 */
3434 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3435 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3436 	if (ret_val)
3437 		goto release;
3438 
3439 	data &= 0xBFFF;
3440 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3441 						       act_offset * 2 + 1,
3442 						       (u8)(data >> 8));
3443 	if (ret_val)
3444 		goto release;
3445 
3446 	/* And invalidate the previously valid segment by setting
3447 	 * its signature word (0x13) high_byte to 0b. This can be
3448 	 * done without an erase because flash erase sets all bits
3449 	 * to 1's. We can write 1's to 0's without an erase
3450 	 */
3451 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3452 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3453 	if (ret_val)
3454 		goto release;
3455 
3456 	/* Great!  Everything worked, we can now clear the cached entries. */
3457 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3458 		dev_spec->shadow_ram[i].modified = FALSE;
3459 		dev_spec->shadow_ram[i].value = 0xFFFF;
3460 	}
3461 
3462 release:
3463 	nvm->ops.release(hw);
3464 
3465 	/* Reload the EEPROM, or else modifications will not appear
3466 	 * until after the next adapter reset.
3467 	 */
3468 	if (!ret_val) {
3469 		nvm->ops.reload(hw);
3470 		msec_delay(10);
3471 	}
3472 
3473 out:
3474 	if (ret_val)
3475 		DEBUGOUT1("NVM update error: %d\n", ret_val);
3476 
3477 	return ret_val;
3478 }
3479 
3480 /**
3481  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3482  *  @hw: pointer to the HW structure
3483  *
3484  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3485  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3486  *  calculated, in which case we need to calculate the checksum and set bit 6.
3487  **/
3488 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3489 {
3490 	s32 ret_val;
3491 	u16 data;
3492 	u16 word;
3493 	u16 valid_csum_mask;
3494 
3495 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3496 
3497 	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3498 	 * the checksum needs to be fixed.  This bit is an indication that
3499 	 * the NVM was prepared by OEM software and did not calculate
3500 	 * the checksum...a likely scenario.
3501 	 */
3502 	switch (hw->mac.type) {
3503 	case e1000_pch_lpt:
3504 		word = NVM_COMPAT;
3505 		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3506 		break;
3507 	default:
3508 		word = NVM_FUTURE_INIT_WORD1;
3509 		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3510 		break;
3511 	}
3512 
3513 	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3514 	if (ret_val)
3515 		return ret_val;
3516 
3517 	if (!(data & valid_csum_mask)) {
3518 		data |= valid_csum_mask;
3519 		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3520 		if (ret_val)
3521 			return ret_val;
3522 		ret_val = hw->nvm.ops.update(hw);
3523 		if (ret_val)
3524 			return ret_val;
3525 	}
3526 
3527 	return e1000_validate_nvm_checksum_generic(hw);
3528 }
3529 
3530 /**
3531  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3532  *  @hw: pointer to the HW structure
3533  *  @offset: The offset (in bytes) of the byte/word to read.
3534  *  @size: Size of data to read, 1=byte 2=word
3535  *  @data: The byte(s) to write to the NVM.
3536  *
3537  *  Writes one/two bytes to the NVM using the flash access registers.
3538  **/
3539 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3540 					  u8 size, u16 data)
3541 {
3542 	union ich8_hws_flash_status hsfsts;
3543 	union ich8_hws_flash_ctrl hsflctl;
3544 	u32 flash_linear_addr;
3545 	u32 flash_data = 0;
3546 	s32 ret_val;
3547 	u8 count = 0;
3548 
3549 	DEBUGFUNC("e1000_write_ich8_data");
3550 
3551 	if (size < 1 || size > 2 || data > size * 0xff ||
3552 	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
3553 		return -E1000_ERR_NVM;
3554 
3555 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3556 			     hw->nvm.flash_base_addr);
3557 
3558 	do {
3559 		usec_delay(1);
3560 		/* Steps */
3561 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3562 		if (ret_val != E1000_SUCCESS)
3563 			break;
3564 
3565 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3566 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3567 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3568 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3569 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3570 
3571 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3572 
3573 		if (size == 1)
3574 			flash_data = (u32)data & 0x00FF;
3575 		else
3576 			flash_data = (u32)data;
3577 
3578 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3579 
3580 		/* check if FCERR is set to 1 , if set to 1, clear it
3581 		 * and try the whole sequence a few more times else done
3582 		 */
3583 		ret_val =
3584 		    e1000_flash_cycle_ich8lan(hw,
3585 					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3586 		if (ret_val == E1000_SUCCESS)
3587 			break;
3588 
3589 		/* If we're here, then things are most likely
3590 		 * completely hosed, but if the error condition
3591 		 * is detected, it won't hurt to give it another
3592 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3593 		 */
3594 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3595 		if (hsfsts.hsf_status.flcerr)
3596 			/* Repeat for some time before giving up. */
3597 			continue;
3598 		if (!hsfsts.hsf_status.flcdone) {
3599 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3600 			break;
3601 		}
3602 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3603 
3604 	return ret_val;
3605 }
3606 
3607 /**
3608  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3609  *  @hw: pointer to the HW structure
3610  *  @offset: The index of the byte to read.
3611  *  @data: The byte to write to the NVM.
3612  *
3613  *  Writes a single byte to the NVM using the flash access registers.
3614  **/
3615 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3616 					  u8 data)
3617 {
3618 	u16 word = (u16)data;
3619 
3620 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3621 
3622 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3623 }
3624 
3625 /**
3626  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3627  *  @hw: pointer to the HW structure
3628  *  @offset: The offset of the byte to write.
3629  *  @byte: The byte to write to the NVM.
3630  *
3631  *  Writes a single byte to the NVM using the flash access registers.
3632  *  Goes through a retry algorithm before giving up.
3633  **/
3634 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3635 						u32 offset, u8 byte)
3636 {
3637 	s32 ret_val;
3638 	u16 program_retries;
3639 
3640 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3641 
3642 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3643 	if (!ret_val)
3644 		return ret_val;
3645 
3646 	for (program_retries = 0; program_retries < 100; program_retries++) {
3647 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3648 		usec_delay(100);
3649 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3650 		if (ret_val == E1000_SUCCESS)
3651 			break;
3652 	}
3653 	if (program_retries == 100)
3654 		return -E1000_ERR_NVM;
3655 
3656 	return E1000_SUCCESS;
3657 }
3658 
3659 /**
3660  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3661  *  @hw: pointer to the HW structure
3662  *  @bank: 0 for first bank, 1 for second bank, etc.
3663  *
3664  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3665  *  bank N is 4096 * N + flash_reg_addr.
3666  **/
3667 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3668 {
3669 	struct e1000_nvm_info *nvm = &hw->nvm;
3670 	union ich8_hws_flash_status hsfsts;
3671 	union ich8_hws_flash_ctrl hsflctl;
3672 	u32 flash_linear_addr;
3673 	/* bank size is in 16bit words - adjust to bytes */
3674 	u32 flash_bank_size = nvm->flash_bank_size * 2;
3675 	s32 ret_val;
3676 	s32 count = 0;
3677 	s32 j, iteration, sector_size;
3678 
3679 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3680 
3681 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3682 
3683 	/* Determine HW Sector size: Read BERASE bits of hw flash status
3684 	 * register
3685 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3686 	 *     consecutive sectors.  The start index for the nth Hw sector
3687 	 *     can be calculated as = bank * 4096 + n * 256
3688 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3689 	 *     The start index for the nth Hw sector can be calculated
3690 	 *     as = bank * 4096
3691 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3692 	 *     (ich9 only, otherwise error condition)
3693 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3694 	 */
3695 	switch (hsfsts.hsf_status.berasesz) {
3696 	case 0:
3697 		/* Hw sector size 256 */
3698 		sector_size = ICH_FLASH_SEG_SIZE_256;
3699 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3700 		break;
3701 	case 1:
3702 		sector_size = ICH_FLASH_SEG_SIZE_4K;
3703 		iteration = 1;
3704 		break;
3705 	case 2:
3706 		sector_size = ICH_FLASH_SEG_SIZE_8K;
3707 		iteration = 1;
3708 		break;
3709 	case 3:
3710 		sector_size = ICH_FLASH_SEG_SIZE_64K;
3711 		iteration = 1;
3712 		break;
3713 	default:
3714 		return -E1000_ERR_NVM;
3715 	}
3716 
3717 	/* Start with the base address, then add the sector offset. */
3718 	flash_linear_addr = hw->nvm.flash_base_addr;
3719 	flash_linear_addr += (bank) ? flash_bank_size : 0;
3720 
3721 	for (j = 0; j < iteration; j++) {
3722 		do {
3723 			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3724 
3725 			/* Steps */
3726 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
3727 			if (ret_val)
3728 				return ret_val;
3729 
3730 			/* Write a value 11 (block Erase) in Flash
3731 			 * Cycle field in hw flash control
3732 			 */
3733 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3734 							      ICH_FLASH_HSFCTL);
3735 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3736 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3737 						hsflctl.regval);
3738 
3739 			/* Write the last 24 bits of an index within the
3740 			 * block into Flash Linear address field in Flash
3741 			 * Address.
3742 			 */
3743 			flash_linear_addr += (j * sector_size);
3744 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3745 					      flash_linear_addr);
3746 
3747 			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3748 			if (ret_val == E1000_SUCCESS)
3749 				break;
3750 
3751 			/* Check if FCERR is set to 1.  If 1,
3752 			 * clear it and try the whole sequence
3753 			 * a few more times else Done
3754 			 */
3755 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3756 						      ICH_FLASH_HSFSTS);
3757 			if (hsfsts.hsf_status.flcerr)
3758 				/* repeat for some time before giving up */
3759 				continue;
3760 			else if (!hsfsts.hsf_status.flcdone)
3761 				return ret_val;
3762 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3763 	}
3764 
3765 	return E1000_SUCCESS;
3766 }
3767 
3768 /**
3769  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3770  *  @hw: pointer to the HW structure
3771  *  @data: Pointer to the LED settings
3772  *
3773  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3774  *  settings is all 0's or F's, set the LED default to a valid LED default
3775  *  setting.
3776  **/
3777 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3778 {
3779 	s32 ret_val;
3780 
3781 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
3782 
3783 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3784 	if (ret_val) {
3785 		DEBUGOUT("NVM Read Error\n");
3786 		return ret_val;
3787 	}
3788 
3789 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3790 		*data = ID_LED_DEFAULT_ICH8LAN;
3791 
3792 	return E1000_SUCCESS;
3793 }
3794 
3795 /**
3796  *  e1000_id_led_init_pchlan - store LED configurations
3797  *  @hw: pointer to the HW structure
3798  *
3799  *  PCH does not control LEDs via the LEDCTL register, rather it uses
3800  *  the PHY LED configuration register.
3801  *
3802  *  PCH also does not have an "always on" or "always off" mode which
3803  *  complicates the ID feature.  Instead of using the "on" mode to indicate
3804  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3805  *  use "link_up" mode.  The LEDs will still ID on request if there is no
3806  *  link based on logic in e1000_led_[on|off]_pchlan().
3807  **/
3808 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3809 {
3810 	struct e1000_mac_info *mac = &hw->mac;
3811 	s32 ret_val;
3812 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3813 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3814 	u16 data, i, temp, shift;
3815 
3816 	DEBUGFUNC("e1000_id_led_init_pchlan");
3817 
3818 	/* Get default ID LED modes */
3819 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3820 	if (ret_val)
3821 		return ret_val;
3822 
3823 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3824 	mac->ledctl_mode1 = mac->ledctl_default;
3825 	mac->ledctl_mode2 = mac->ledctl_default;
3826 
3827 	for (i = 0; i < 4; i++) {
3828 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3829 		shift = (i * 5);
3830 		switch (temp) {
3831 		case ID_LED_ON1_DEF2:
3832 		case ID_LED_ON1_ON2:
3833 		case ID_LED_ON1_OFF2:
3834 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3835 			mac->ledctl_mode1 |= (ledctl_on << shift);
3836 			break;
3837 		case ID_LED_OFF1_DEF2:
3838 		case ID_LED_OFF1_ON2:
3839 		case ID_LED_OFF1_OFF2:
3840 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3841 			mac->ledctl_mode1 |= (ledctl_off << shift);
3842 			break;
3843 		default:
3844 			/* Do nothing */
3845 			break;
3846 		}
3847 		switch (temp) {
3848 		case ID_LED_DEF1_ON2:
3849 		case ID_LED_ON1_ON2:
3850 		case ID_LED_OFF1_ON2:
3851 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3852 			mac->ledctl_mode2 |= (ledctl_on << shift);
3853 			break;
3854 		case ID_LED_DEF1_OFF2:
3855 		case ID_LED_ON1_OFF2:
3856 		case ID_LED_OFF1_OFF2:
3857 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3858 			mac->ledctl_mode2 |= (ledctl_off << shift);
3859 			break;
3860 		default:
3861 			/* Do nothing */
3862 			break;
3863 		}
3864 	}
3865 
3866 	return E1000_SUCCESS;
3867 }
3868 
3869 /**
3870  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3871  *  @hw: pointer to the HW structure
3872  *
3873  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3874  *  register, so the the bus width is hard coded.
3875  **/
3876 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3877 {
3878 	struct e1000_bus_info *bus = &hw->bus;
3879 	s32 ret_val;
3880 
3881 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
3882 
3883 	ret_val = e1000_get_bus_info_pcie_generic(hw);
3884 
3885 	/* ICH devices are "PCI Express"-ish.  They have
3886 	 * a configuration space, but do not contain
3887 	 * PCI Express Capability registers, so bus width
3888 	 * must be hardcoded.
3889 	 */
3890 	if (bus->width == e1000_bus_width_unknown)
3891 		bus->width = e1000_bus_width_pcie_x1;
3892 
3893 	return ret_val;
3894 }
3895 
3896 /**
3897  *  e1000_reset_hw_ich8lan - Reset the hardware
3898  *  @hw: pointer to the HW structure
3899  *
3900  *  Does a full reset of the hardware which includes a reset of the PHY and
3901  *  MAC.
3902  **/
3903 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3904 {
3905 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3906 	u16 kum_cfg;
3907 	u32 ctrl, reg;
3908 	s32 ret_val;
3909 
3910 	DEBUGFUNC("e1000_reset_hw_ich8lan");
3911 
3912 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
3913 	 * on the last TLP read/write transaction when MAC is reset.
3914 	 */
3915 	ret_val = e1000_disable_pcie_master_generic(hw);
3916 	if (ret_val)
3917 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
3918 
3919 	DEBUGOUT("Masking off all interrupts\n");
3920 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3921 
3922 	/* Disable the Transmit and Receive units.  Then delay to allow
3923 	 * any pending transactions to complete before we hit the MAC
3924 	 * with the global reset.
3925 	 */
3926 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
3927 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3928 	E1000_WRITE_FLUSH(hw);
3929 
3930 	msec_delay(10);
3931 
3932 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
3933 	if (hw->mac.type == e1000_ich8lan) {
3934 		/* Set Tx and Rx buffer allocation to 8k apiece. */
3935 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3936 		/* Set Packet Buffer Size to 16k. */
3937 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3938 	}
3939 
3940 	if (hw->mac.type == e1000_pchlan) {
3941 		/* Save the NVM K1 bit setting*/
3942 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3943 		if (ret_val)
3944 			return ret_val;
3945 
3946 		if (kum_cfg & E1000_NVM_K1_ENABLE)
3947 			dev_spec->nvm_k1_enabled = TRUE;
3948 		else
3949 			dev_spec->nvm_k1_enabled = FALSE;
3950 	}
3951 
3952 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3953 
3954 	if (!hw->phy.ops.check_reset_block(hw)) {
3955 		/* Full-chip reset requires MAC and PHY reset at the same
3956 		 * time to make sure the interface between MAC and the
3957 		 * external PHY is reset.
3958 		 */
3959 		ctrl |= E1000_CTRL_PHY_RST;
3960 
3961 		/* Gate automatic PHY configuration by hardware on
3962 		 * non-managed 82579
3963 		 */
3964 		if ((hw->mac.type == e1000_pch2lan) &&
3965 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3966 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3967 	}
3968 	ret_val = e1000_acquire_swflag_ich8lan(hw);
3969 	DEBUGOUT("Issuing a global reset to ich8lan\n");
3970 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3971 	/* cannot issue a flush here because it hangs the hardware */
3972 	msec_delay(20);
3973 
3974 	/* Set Phy Config Counter to 50msec */
3975 	if (hw->mac.type == e1000_pch2lan) {
3976 		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
3977 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3978 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3979 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
3980 	}
3981 
3982 	if (!ret_val)
3983 		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
3984 
3985 	if (ctrl & E1000_CTRL_PHY_RST) {
3986 		ret_val = hw->phy.ops.get_cfg_done(hw);
3987 		if (ret_val)
3988 			return ret_val;
3989 
3990 		ret_val = e1000_post_phy_reset_ich8lan(hw);
3991 		if (ret_val)
3992 			return ret_val;
3993 	}
3994 
3995 	/* For PCH, this write will make sure that any noise
3996 	 * will be detected as a CRC error and be dropped rather than show up
3997 	 * as a bad packet to the DMA engine.
3998 	 */
3999 	if (hw->mac.type == e1000_pchlan)
4000 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4001 
4002 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4003 	E1000_READ_REG(hw, E1000_ICR);
4004 
4005 	reg = E1000_READ_REG(hw, E1000_KABGTXD);
4006 	reg |= E1000_KABGTXD_BGSQLBIAS;
4007 	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4008 
4009 	return E1000_SUCCESS;
4010 }
4011 
4012 /**
4013  *  e1000_init_hw_ich8lan - Initialize the hardware
4014  *  @hw: pointer to the HW structure
4015  *
4016  *  Prepares the hardware for transmit and receive by doing the following:
4017  *   - initialize hardware bits
4018  *   - initialize LED identification
4019  *   - setup receive address registers
4020  *   - setup flow control
4021  *   - setup transmit descriptors
4022  *   - clear statistics
4023  **/
4024 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4025 {
4026 	struct e1000_mac_info *mac = &hw->mac;
4027 	u32 ctrl_ext, txdctl, snoop;
4028 	s32 ret_val;
4029 	u16 i;
4030 
4031 	DEBUGFUNC("e1000_init_hw_ich8lan");
4032 
4033 	e1000_initialize_hw_bits_ich8lan(hw);
4034 
4035 	/* Initialize identification LED */
4036 	ret_val = mac->ops.id_led_init(hw);
4037 	/* An error is not fatal and we should not stop init due to this */
4038 	if (ret_val)
4039 		DEBUGOUT("Error initializing identification LED\n");
4040 
4041 	/* Setup the receive address. */
4042 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4043 
4044 	/* Zero out the Multicast HASH table */
4045 	DEBUGOUT("Zeroing the MTA\n");
4046 	for (i = 0; i < mac->mta_reg_count; i++)
4047 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4048 
4049 	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
4050 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
4051 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
4052 	 */
4053 	if (hw->phy.type == e1000_phy_82578) {
4054 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4055 		i &= ~BM_WUC_HOST_WU_BIT;
4056 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4057 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
4058 		if (ret_val)
4059 			return ret_val;
4060 	}
4061 
4062 	/* Setup link and flow control */
4063 	ret_val = mac->ops.setup_link(hw);
4064 
4065 	/* Set the transmit descriptor write-back policy for both queues */
4066 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4067 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4068 		  E1000_TXDCTL_FULL_TX_DESC_WB);
4069 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4070 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4071 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4072 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4073 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4074 		  E1000_TXDCTL_FULL_TX_DESC_WB);
4075 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4076 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4077 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4078 
4079 	/* ICH8 has opposite polarity of no_snoop bits.
4080 	 * By default, we should use snoop behavior.
4081 	 */
4082 	if (mac->type == e1000_ich8lan)
4083 		snoop = PCIE_ICH8_SNOOP_ALL;
4084 	else
4085 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4086 	e1000_set_pcie_no_snoop_generic(hw, snoop);
4087 
4088 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4089 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4090 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4091 
4092 	/* Clear all of the statistics registers (clear on read).  It is
4093 	 * important that we do this after we have tried to establish link
4094 	 * because the symbol error count will increment wildly if there
4095 	 * is no link.
4096 	 */
4097 	e1000_clear_hw_cntrs_ich8lan(hw);
4098 
4099 	return ret_val;
4100 }
4101 
4102 /**
4103  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4104  *  @hw: pointer to the HW structure
4105  *
4106  *  Sets/Clears required hardware bits necessary for correctly setting up the
4107  *  hardware for transmit and receive.
4108  **/
4109 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4110 {
4111 	u32 reg;
4112 
4113 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4114 
4115 	/* Extended Device Control */
4116 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4117 	reg |= (1 << 22);
4118 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
4119 	if (hw->mac.type >= e1000_pchlan)
4120 		reg |= E1000_CTRL_EXT_PHYPDEN;
4121 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4122 
4123 	/* Transmit Descriptor Control 0 */
4124 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4125 	reg |= (1 << 22);
4126 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4127 
4128 	/* Transmit Descriptor Control 1 */
4129 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4130 	reg |= (1 << 22);
4131 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4132 
4133 	/* Transmit Arbitration Control 0 */
4134 	reg = E1000_READ_REG(hw, E1000_TARC(0));
4135 	if (hw->mac.type == e1000_ich8lan)
4136 		reg |= (1 << 28) | (1 << 29);
4137 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4138 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4139 
4140 	/* Transmit Arbitration Control 1 */
4141 	reg = E1000_READ_REG(hw, E1000_TARC(1));
4142 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4143 		reg &= ~(1 << 28);
4144 	else
4145 		reg |= (1 << 28);
4146 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
4147 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4148 
4149 	/* Device Status */
4150 	if (hw->mac.type == e1000_ich8lan) {
4151 		reg = E1000_READ_REG(hw, E1000_STATUS);
4152 		reg &= ~(1U << 31);
4153 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
4154 	}
4155 
4156 	/* work-around descriptor data corruption issue during nfs v2 udp
4157 	 * traffic, just disable the nfs filtering capability
4158 	 */
4159 	reg = E1000_READ_REG(hw, E1000_RFCTL);
4160 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4161 
4162 	/* Disable IPv6 extension header parsing because some malformed
4163 	 * IPv6 headers can hang the Rx.
4164 	 */
4165 	if (hw->mac.type == e1000_ich8lan)
4166 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4167 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4168 
4169 	/* Enable ECC on Lynxpoint */
4170 	if (hw->mac.type == e1000_pch_lpt) {
4171 		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4172 		reg |= E1000_PBECCSTS_ECC_ENABLE;
4173 		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4174 
4175 		reg = E1000_READ_REG(hw, E1000_CTRL);
4176 		reg |= E1000_CTRL_MEHE;
4177 		E1000_WRITE_REG(hw, E1000_CTRL, reg);
4178 	}
4179 
4180 	return;
4181 }
4182 
4183 /**
4184  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4185  *  @hw: pointer to the HW structure
4186  *
4187  *  Determines which flow control settings to use, then configures flow
4188  *  control.  Calls the appropriate media-specific link configuration
4189  *  function.  Assuming the adapter has a valid link partner, a valid link
4190  *  should be established.  Assumes the hardware has previously been reset
4191  *  and the transmitter and receiver are not enabled.
4192  **/
4193 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4194 {
4195 	s32 ret_val;
4196 
4197 	DEBUGFUNC("e1000_setup_link_ich8lan");
4198 
4199 	if (hw->phy.ops.check_reset_block(hw))
4200 		return E1000_SUCCESS;
4201 
4202 	/* ICH parts do not have a word in the NVM to determine
4203 	 * the default flow control setting, so we explicitly
4204 	 * set it to full.
4205 	 */
4206 	if (hw->fc.requested_mode == e1000_fc_default)
4207 		hw->fc.requested_mode = e1000_fc_full;
4208 
4209 	/* Save off the requested flow control mode for use later.  Depending
4210 	 * on the link partner's capabilities, we may or may not use this mode.
4211 	 */
4212 	hw->fc.current_mode = hw->fc.requested_mode;
4213 
4214 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4215 		hw->fc.current_mode);
4216 
4217 	/* Continue to configure the copper link. */
4218 	ret_val = hw->mac.ops.setup_physical_interface(hw);
4219 	if (ret_val)
4220 		return ret_val;
4221 
4222 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4223 	if ((hw->phy.type == e1000_phy_82578) ||
4224 	    (hw->phy.type == e1000_phy_82579) ||
4225 	    (hw->phy.type == e1000_phy_i217) ||
4226 	    (hw->phy.type == e1000_phy_82577)) {
4227 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4228 
4229 		ret_val = hw->phy.ops.write_reg(hw,
4230 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
4231 					     hw->fc.pause_time);
4232 		if (ret_val)
4233 			return ret_val;
4234 	}
4235 
4236 	return e1000_set_fc_watermarks_generic(hw);
4237 }
4238 
4239 /**
4240  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4241  *  @hw: pointer to the HW structure
4242  *
4243  *  Configures the kumeran interface to the PHY to wait the appropriate time
4244  *  when polling the PHY, then call the generic setup_copper_link to finish
4245  *  configuring the copper link.
4246  **/
4247 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4248 {
4249 	u32 ctrl;
4250 	s32 ret_val;
4251 	u16 reg_data;
4252 
4253 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4254 
4255 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4256 	ctrl |= E1000_CTRL_SLU;
4257 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4258 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4259 
4260 	/* Set the mac to wait the maximum time between each iteration
4261 	 * and increase the max iterations when polling the phy;
4262 	 * this fixes erroneous timeouts at 10Mbps.
4263 	 */
4264 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4265 					       0xFFFF);
4266 	if (ret_val)
4267 		return ret_val;
4268 	ret_val = e1000_read_kmrn_reg_generic(hw,
4269 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
4270 					      &reg_data);
4271 	if (ret_val)
4272 		return ret_val;
4273 	reg_data |= 0x3F;
4274 	ret_val = e1000_write_kmrn_reg_generic(hw,
4275 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
4276 					       reg_data);
4277 	if (ret_val)
4278 		return ret_val;
4279 
4280 	switch (hw->phy.type) {
4281 	case e1000_phy_igp_3:
4282 		ret_val = e1000_copper_link_setup_igp(hw);
4283 		if (ret_val)
4284 			return ret_val;
4285 		break;
4286 	case e1000_phy_bm:
4287 	case e1000_phy_82578:
4288 		ret_val = e1000_copper_link_setup_m88(hw);
4289 		if (ret_val)
4290 			return ret_val;
4291 		break;
4292 	case e1000_phy_82577:
4293 	case e1000_phy_82579:
4294 		ret_val = e1000_copper_link_setup_82577(hw);
4295 		if (ret_val)
4296 			return ret_val;
4297 		break;
4298 	case e1000_phy_ife:
4299 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4300 					       &reg_data);
4301 		if (ret_val)
4302 			return ret_val;
4303 
4304 		reg_data &= ~IFE_PMC_AUTO_MDIX;
4305 
4306 		switch (hw->phy.mdix) {
4307 		case 1:
4308 			reg_data &= ~IFE_PMC_FORCE_MDIX;
4309 			break;
4310 		case 2:
4311 			reg_data |= IFE_PMC_FORCE_MDIX;
4312 			break;
4313 		case 0:
4314 		default:
4315 			reg_data |= IFE_PMC_AUTO_MDIX;
4316 			break;
4317 		}
4318 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4319 						reg_data);
4320 		if (ret_val)
4321 			return ret_val;
4322 		break;
4323 	default:
4324 		break;
4325 	}
4326 
4327 	return e1000_setup_copper_link_generic(hw);
4328 }
4329 
4330 /**
4331  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4332  *  @hw: pointer to the HW structure
4333  *
4334  *  Calls the PHY specific link setup function and then calls the
4335  *  generic setup_copper_link to finish configuring the link for
4336  *  Lynxpoint PCH devices
4337  **/
4338 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4339 {
4340 	u32 ctrl;
4341 	s32 ret_val;
4342 
4343 	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4344 
4345 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4346 	ctrl |= E1000_CTRL_SLU;
4347 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4348 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4349 
4350 	ret_val = e1000_copper_link_setup_82577(hw);
4351 	if (ret_val)
4352 		return ret_val;
4353 
4354 	return e1000_setup_copper_link_generic(hw);
4355 }
4356 
4357 /**
4358  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4359  *  @hw: pointer to the HW structure
4360  *  @speed: pointer to store current link speed
4361  *  @duplex: pointer to store the current link duplex
4362  *
4363  *  Calls the generic get_speed_and_duplex to retrieve the current link
4364  *  information and then calls the Kumeran lock loss workaround for links at
4365  *  gigabit speeds.
4366  **/
4367 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4368 					  u16 *duplex)
4369 {
4370 	s32 ret_val;
4371 
4372 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4373 
4374 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4375 	if (ret_val)
4376 		return ret_val;
4377 
4378 	if ((hw->mac.type == e1000_ich8lan) &&
4379 	    (hw->phy.type == e1000_phy_igp_3) &&
4380 	    (*speed == SPEED_1000)) {
4381 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4382 	}
4383 
4384 	return ret_val;
4385 }
4386 
4387 /**
4388  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4389  *  @hw: pointer to the HW structure
4390  *
4391  *  Work-around for 82566 Kumeran PCS lock loss:
4392  *  On link status change (i.e. PCI reset, speed change) and link is up and
4393  *  speed is gigabit-
4394  *    0) if workaround is optionally disabled do nothing
4395  *    1) wait 1ms for Kumeran link to come up
4396  *    2) check Kumeran Diagnostic register PCS lock loss bit
4397  *    3) if not set the link is locked (all is good), otherwise...
4398  *    4) reset the PHY
4399  *    5) repeat up to 10 times
4400  *  Note: this is only called for IGP3 copper when speed is 1gb.
4401  **/
4402 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4403 {
4404 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4405 	u32 phy_ctrl;
4406 	s32 ret_val;
4407 	u16 i, data;
4408 	bool link;
4409 
4410 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4411 
4412 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4413 		return E1000_SUCCESS;
4414 
4415 	/* Make sure link is up before proceeding.  If not just return.
4416 	 * Attempting this while link is negotiating fouled up link
4417 	 * stability
4418 	 */
4419 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4420 	if (!link)
4421 		return E1000_SUCCESS;
4422 
4423 	for (i = 0; i < 10; i++) {
4424 		/* read once to clear */
4425 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4426 		if (ret_val)
4427 			return ret_val;
4428 		/* and again to get new status */
4429 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4430 		if (ret_val)
4431 			return ret_val;
4432 
4433 		/* check for PCS lock */
4434 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4435 			return E1000_SUCCESS;
4436 
4437 		/* Issue PHY reset */
4438 		hw->phy.ops.reset(hw);
4439 		msec_delay_irq(5);
4440 	}
4441 	/* Disable GigE link negotiation */
4442 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4443 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4444 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4445 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4446 
4447 	/* Call gig speed drop workaround on Gig disable before accessing
4448 	 * any PHY registers
4449 	 */
4450 	e1000_gig_downshift_workaround_ich8lan(hw);
4451 
4452 	/* unable to acquire PCS lock */
4453 	return -E1000_ERR_PHY;
4454 }
4455 
4456 /**
4457  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4458  *  @hw: pointer to the HW structure
4459  *  @state: boolean value used to set the current Kumeran workaround state
4460  *
4461  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
4462  *  /disabled - FALSE).
4463  **/
4464 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4465 						 bool state)
4466 {
4467 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4468 
4469 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4470 
4471 	if (hw->mac.type != e1000_ich8lan) {
4472 		DEBUGOUT("Workaround applies to ICH8 only.\n");
4473 		return;
4474 	}
4475 
4476 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
4477 
4478 	return;
4479 }
4480 
4481 /**
4482  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4483  *  @hw: pointer to the HW structure
4484  *
4485  *  Workaround for 82566 power-down on D3 entry:
4486  *    1) disable gigabit link
4487  *    2) write VR power-down enable
4488  *    3) read it back
4489  *  Continue if successful, else issue LCD reset and repeat
4490  **/
4491 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4492 {
4493 	u32 reg;
4494 	u16 data;
4495 	u8  retry = 0;
4496 
4497 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4498 
4499 	if (hw->phy.type != e1000_phy_igp_3)
4500 		return;
4501 
4502 	/* Try the workaround twice (if needed) */
4503 	do {
4504 		/* Disable link */
4505 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4506 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4507 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4508 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4509 
4510 		/* Call gig speed drop workaround on Gig disable before
4511 		 * accessing any PHY registers
4512 		 */
4513 		if (hw->mac.type == e1000_ich8lan)
4514 			e1000_gig_downshift_workaround_ich8lan(hw);
4515 
4516 		/* Write VR power-down enable */
4517 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4518 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4519 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4520 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4521 
4522 		/* Read it back and test */
4523 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4524 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4525 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4526 			break;
4527 
4528 		/* Issue PHY reset and repeat at most one more time */
4529 		reg = E1000_READ_REG(hw, E1000_CTRL);
4530 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4531 		retry++;
4532 	} while (retry);
4533 }
4534 
4535 /**
4536  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4537  *  @hw: pointer to the HW structure
4538  *
4539  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4540  *  LPLU, Gig disable, MDIC PHY reset):
4541  *    1) Set Kumeran Near-end loopback
4542  *    2) Clear Kumeran Near-end loopback
4543  *  Should only be called for ICH8[m] devices with any 1G Phy.
4544  **/
4545 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4546 {
4547 	s32 ret_val;
4548 	u16 reg_data;
4549 
4550 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4551 
4552 	if ((hw->mac.type != e1000_ich8lan) ||
4553 	    (hw->phy.type == e1000_phy_ife))
4554 		return;
4555 
4556 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4557 					      &reg_data);
4558 	if (ret_val)
4559 		return;
4560 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4561 	ret_val = e1000_write_kmrn_reg_generic(hw,
4562 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
4563 					       reg_data);
4564 	if (ret_val)
4565 		return;
4566 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4567 	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4568 				     reg_data);
4569 }
4570 
4571 /**
4572  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4573  *  @hw: pointer to the HW structure
4574  *
4575  *  During S0 to Sx transition, it is possible the link remains at gig
4576  *  instead of negotiating to a lower speed.  Before going to Sx, set
4577  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4578  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4579  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4580  *  needs to be written.
4581  *  Parts that support (and are linked to a partner which support) EEE in
4582  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4583  *  than 10Mbps w/o EEE.
4584  **/
4585 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4586 {
4587 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4588 	u32 phy_ctrl;
4589 	s32 ret_val;
4590 
4591 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4592 
4593 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4594 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4595 
4596 	if (hw->phy.type == e1000_phy_i217) {
4597 		u16 phy_reg, device_id = hw->device_id;
4598 
4599 		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4600 		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4601 			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4602 
4603 			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4604 					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4605 		}
4606 
4607 		ret_val = hw->phy.ops.acquire(hw);
4608 		if (ret_val)
4609 			goto out;
4610 
4611 		if (!dev_spec->eee_disable) {
4612 			u16 eee_advert;
4613 
4614 			ret_val =
4615 			    e1000_read_emi_reg_locked(hw,
4616 						      I217_EEE_ADVERTISEMENT,
4617 						      &eee_advert);
4618 			if (ret_val)
4619 				goto release;
4620 
4621 			/* Disable LPLU if both link partners support 100BaseT
4622 			 * EEE and 100Full is advertised on both ends of the
4623 			 * link, and enable Auto Enable LPI since there will
4624 			 * be no driver to enable LPI while in Sx.
4625 			 */
4626 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4627 			    (dev_spec->eee_lp_ability &
4628 			     I82579_EEE_100_SUPPORTED) &&
4629 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4630 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4631 					      E1000_PHY_CTRL_NOND0A_LPLU);
4632 
4633 				/* Set Auto Enable LPI after link up */
4634 				hw->phy.ops.read_reg_locked(hw,
4635 							    I217_LPI_GPIO_CTRL,
4636 							    &phy_reg);
4637 				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4638 				hw->phy.ops.write_reg_locked(hw,
4639 							     I217_LPI_GPIO_CTRL,
4640 							     phy_reg);
4641 			}
4642 		}
4643 
4644 		/* For i217 Intel Rapid Start Technology support,
4645 		 * when the system is going into Sx and no manageability engine
4646 		 * is present, the driver must configure proxy to reset only on
4647 		 * power good.  LPI (Low Power Idle) state must also reset only
4648 		 * on power good, as well as the MTA (Multicast table array).
4649 		 * The SMBus release must also be disabled on LCD reset.
4650 		 */
4651 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
4652 			E1000_ICH_FWSM_FW_VALID)) {
4653 			/* Enable proxy to reset only on power good. */
4654 			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4655 						    &phy_reg);
4656 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4657 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4658 						     phy_reg);
4659 
4660 			/* Set bit enable LPI (EEE) to reset only on
4661 			 * power good.
4662 			*/
4663 			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4664 			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4665 			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4666 
4667 			/* Disable the SMB release on LCD reset. */
4668 			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4669 			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4670 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4671 		}
4672 
4673 		/* Enable MTA to reset for Intel Rapid Start Technology
4674 		 * Support
4675 		 */
4676 		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4677 		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4678 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4679 
4680 release:
4681 		hw->phy.ops.release(hw);
4682 	}
4683 out:
4684 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4685 
4686 	if (hw->mac.type == e1000_ich8lan)
4687 		e1000_gig_downshift_workaround_ich8lan(hw);
4688 
4689 	if (hw->mac.type >= e1000_pchlan) {
4690 		e1000_oem_bits_config_ich8lan(hw, FALSE);
4691 
4692 		/* Reset PHY to activate OEM bits on 82577/8 */
4693 		if (hw->mac.type == e1000_pchlan)
4694 			e1000_phy_hw_reset_generic(hw);
4695 
4696 		ret_val = hw->phy.ops.acquire(hw);
4697 		if (ret_val)
4698 			return;
4699 		e1000_write_smbus_addr(hw);
4700 		hw->phy.ops.release(hw);
4701 	}
4702 
4703 	return;
4704 }
4705 
4706 /**
4707  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4708  *  @hw: pointer to the HW structure
4709  *
4710  *  During Sx to S0 transitions on non-managed devices or managed devices
4711  *  on which PHY resets are not blocked, if the PHY registers cannot be
4712  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4713  *  the PHY.
4714  *  On i217, setup Intel Rapid Start Technology.
4715  **/
4716 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4717 {
4718 	s32 ret_val;
4719 
4720 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
4721 
4722 	if (hw->mac.type < e1000_pch2lan)
4723 		return;
4724 
4725 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
4726 	if (ret_val) {
4727 		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4728 		return;
4729 	}
4730 
4731 	/* For i217 Intel Rapid Start Technology support when the system
4732 	 * is transitioning from Sx and no manageability engine is present
4733 	 * configure SMBus to restore on reset, disable proxy, and enable
4734 	 * the reset on MTA (Multicast table array).
4735 	 */
4736 	if (hw->phy.type == e1000_phy_i217) {
4737 		u16 phy_reg;
4738 
4739 		ret_val = hw->phy.ops.acquire(hw);
4740 		if (ret_val) {
4741 			DEBUGOUT("Failed to setup iRST\n");
4742 			return;
4743 		}
4744 
4745 		/* Clear Auto Enable LPI after link up */
4746 		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
4747 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4748 		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
4749 
4750 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
4751 		    E1000_ICH_FWSM_FW_VALID)) {
4752 			/* Restore clear on SMB if no manageability engine
4753 			 * is present
4754 			 */
4755 			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4756 							      &phy_reg);
4757 			if (ret_val)
4758 				goto release;
4759 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4760 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4761 
4762 			/* Disable Proxy */
4763 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4764 		}
4765 		/* Enable reset on MTA */
4766 		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4767 						      &phy_reg);
4768 		if (ret_val)
4769 			goto release;
4770 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4771 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4772 release:
4773 		if (ret_val)
4774 			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4775 		hw->phy.ops.release(hw);
4776 	}
4777 }
4778 
4779 /**
4780  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
4781  *  @hw: pointer to the HW structure
4782  *
4783  *  Return the LED back to the default configuration.
4784  **/
4785 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4786 {
4787 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
4788 
4789 	if (hw->phy.type == e1000_phy_ife)
4790 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4791 					     0);
4792 
4793 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4794 	return E1000_SUCCESS;
4795 }
4796 
4797 /**
4798  *  e1000_led_on_ich8lan - Turn LEDs on
4799  *  @hw: pointer to the HW structure
4800  *
4801  *  Turn on the LEDs.
4802  **/
4803 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
4804 {
4805 	DEBUGFUNC("e1000_led_on_ich8lan");
4806 
4807 	if (hw->phy.type == e1000_phy_ife)
4808 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4809 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4810 
4811 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
4812 	return E1000_SUCCESS;
4813 }
4814 
4815 /**
4816  *  e1000_led_off_ich8lan - Turn LEDs off
4817  *  @hw: pointer to the HW structure
4818  *
4819  *  Turn off the LEDs.
4820  **/
4821 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
4822 {
4823 	DEBUGFUNC("e1000_led_off_ich8lan");
4824 
4825 	if (hw->phy.type == e1000_phy_ife)
4826 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4827 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
4828 
4829 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
4830 	return E1000_SUCCESS;
4831 }
4832 
4833 /**
4834  *  e1000_setup_led_pchlan - Configures SW controllable LED
4835  *  @hw: pointer to the HW structure
4836  *
4837  *  This prepares the SW controllable LED for use.
4838  **/
4839 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4840 {
4841 	DEBUGFUNC("e1000_setup_led_pchlan");
4842 
4843 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4844 				     (u16)hw->mac.ledctl_mode1);
4845 }
4846 
4847 /**
4848  *  e1000_cleanup_led_pchlan - Restore the default LED operation
4849  *  @hw: pointer to the HW structure
4850  *
4851  *  Return the LED back to the default configuration.
4852  **/
4853 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4854 {
4855 	DEBUGFUNC("e1000_cleanup_led_pchlan");
4856 
4857 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4858 				     (u16)hw->mac.ledctl_default);
4859 }
4860 
4861 /**
4862  *  e1000_led_on_pchlan - Turn LEDs on
4863  *  @hw: pointer to the HW structure
4864  *
4865  *  Turn on the LEDs.
4866  **/
4867 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4868 {
4869 	u16 data = (u16)hw->mac.ledctl_mode2;
4870 	u32 i, led;
4871 
4872 	DEBUGFUNC("e1000_led_on_pchlan");
4873 
4874 	/* If no link, then turn LED on by setting the invert bit
4875 	 * for each LED that's mode is "link_up" in ledctl_mode2.
4876 	 */
4877 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4878 		for (i = 0; i < 3; i++) {
4879 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4880 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4881 			    E1000_LEDCTL_MODE_LINK_UP)
4882 				continue;
4883 			if (led & E1000_PHY_LED0_IVRT)
4884 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4885 			else
4886 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4887 		}
4888 	}
4889 
4890 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4891 }
4892 
4893 /**
4894  *  e1000_led_off_pchlan - Turn LEDs off
4895  *  @hw: pointer to the HW structure
4896  *
4897  *  Turn off the LEDs.
4898  **/
4899 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4900 {
4901 	u16 data = (u16)hw->mac.ledctl_mode1;
4902 	u32 i, led;
4903 
4904 	DEBUGFUNC("e1000_led_off_pchlan");
4905 
4906 	/* If no link, then turn LED off by clearing the invert bit
4907 	 * for each LED that's mode is "link_up" in ledctl_mode1.
4908 	 */
4909 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4910 		for (i = 0; i < 3; i++) {
4911 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4912 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4913 			    E1000_LEDCTL_MODE_LINK_UP)
4914 				continue;
4915 			if (led & E1000_PHY_LED0_IVRT)
4916 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4917 			else
4918 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4919 		}
4920 	}
4921 
4922 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4923 }
4924 
4925 /**
4926  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4927  *  @hw: pointer to the HW structure
4928  *
4929  *  Read appropriate register for the config done bit for completion status
4930  *  and configure the PHY through s/w for EEPROM-less parts.
4931  *
4932  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
4933  *  config done bit, so only an error is logged and continues.  If we were
4934  *  to return with error, EEPROM-less silicon would not be able to be reset
4935  *  or change link.
4936  **/
4937 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4938 {
4939 	s32 ret_val = E1000_SUCCESS;
4940 	u32 bank = 0;
4941 	u32 status;
4942 
4943 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4944 
4945 	e1000_get_cfg_done_generic(hw);
4946 
4947 	/* Wait for indication from h/w that it has completed basic config */
4948 	if (hw->mac.type >= e1000_ich10lan) {
4949 		e1000_lan_init_done_ich8lan(hw);
4950 	} else {
4951 		ret_val = e1000_get_auto_rd_done_generic(hw);
4952 		if (ret_val) {
4953 			/* When auto config read does not complete, do not
4954 			 * return with an error. This can happen in situations
4955 			 * where there is no eeprom and prevents getting link.
4956 			 */
4957 			DEBUGOUT("Auto Read Done did not complete\n");
4958 			ret_val = E1000_SUCCESS;
4959 		}
4960 	}
4961 
4962 	/* Clear PHY Reset Asserted bit */
4963 	status = E1000_READ_REG(hw, E1000_STATUS);
4964 	if (status & E1000_STATUS_PHYRA)
4965 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4966 	else
4967 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4968 
4969 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
4970 	if (hw->mac.type <= e1000_ich9lan) {
4971 		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
4972 		    (hw->phy.type == e1000_phy_igp_3)) {
4973 			e1000_phy_init_script_igp3(hw);
4974 		}
4975 	} else {
4976 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4977 			/* Maybe we should do a basic PHY config */
4978 			DEBUGOUT("EEPROM not present\n");
4979 			ret_val = -E1000_ERR_CONFIG;
4980 		}
4981 	}
4982 
4983 	return ret_val;
4984 }
4985 
4986 /**
4987  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4988  * @hw: pointer to the HW structure
4989  *
4990  * In the case of a PHY power down to save power, or to turn off link during a
4991  * driver unload, or wake on lan is not enabled, remove the link.
4992  **/
4993 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4994 {
4995 	/* If the management interface is not enabled, then power down */
4996 	if (!(hw->mac.ops.check_mng_mode(hw) ||
4997 	      hw->phy.ops.check_reset_block(hw)))
4998 		e1000_power_down_phy_copper(hw);
4999 
5000 	return;
5001 }
5002 
5003 /**
5004  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5005  *  @hw: pointer to the HW structure
5006  *
5007  *  Clears hardware counters specific to the silicon family and calls
5008  *  clear_hw_cntrs_generic to clear all general purpose counters.
5009  **/
5010 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5011 {
5012 	u16 phy_data;
5013 	s32 ret_val;
5014 
5015 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5016 
5017 	e1000_clear_hw_cntrs_base_generic(hw);
5018 
5019 	E1000_READ_REG(hw, E1000_ALGNERRC);
5020 	E1000_READ_REG(hw, E1000_RXERRC);
5021 	E1000_READ_REG(hw, E1000_TNCRS);
5022 	E1000_READ_REG(hw, E1000_CEXTERR);
5023 	E1000_READ_REG(hw, E1000_TSCTC);
5024 	E1000_READ_REG(hw, E1000_TSCTFC);
5025 
5026 	E1000_READ_REG(hw, E1000_MGTPRC);
5027 	E1000_READ_REG(hw, E1000_MGTPDC);
5028 	E1000_READ_REG(hw, E1000_MGTPTC);
5029 
5030 	E1000_READ_REG(hw, E1000_IAC);
5031 	E1000_READ_REG(hw, E1000_ICRXOC);
5032 
5033 	/* Clear PHY statistics registers */
5034 	if ((hw->phy.type == e1000_phy_82578) ||
5035 	    (hw->phy.type == e1000_phy_82579) ||
5036 	    (hw->phy.type == e1000_phy_i217) ||
5037 	    (hw->phy.type == e1000_phy_82577)) {
5038 		ret_val = hw->phy.ops.acquire(hw);
5039 		if (ret_val)
5040 			return;
5041 		ret_val = hw->phy.ops.set_page(hw,
5042 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
5043 		if (ret_val)
5044 			goto release;
5045 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5046 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5047 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5048 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5049 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5050 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5051 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5052 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5053 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5054 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5055 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5056 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5057 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5058 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5059 release:
5060 		hw->phy.ops.release(hw);
5061 	}
5062 }
5063 
5064