xref: /linux/drivers/net/ethernet/intel/e1000e/ich8lan.c (revision a508da6cc0093171833efb8376b00473f24221b9)
1 /*******************************************************************************
2 
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2012 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 /*
30  * 82562G 10/100 Network Connection
31  * 82562G-2 10/100 Network Connection
32  * 82562GT 10/100 Network Connection
33  * 82562GT-2 10/100 Network Connection
34  * 82562V 10/100 Network Connection
35  * 82562V-2 10/100 Network Connection
36  * 82566DC-2 Gigabit Network Connection
37  * 82566DC Gigabit Network Connection
38  * 82566DM-2 Gigabit Network Connection
39  * 82566DM Gigabit Network Connection
40  * 82566MC Gigabit Network Connection
41  * 82566MM Gigabit Network Connection
42  * 82567LM Gigabit Network Connection
43  * 82567LF Gigabit Network Connection
44  * 82567V Gigabit Network Connection
45  * 82567LM-2 Gigabit Network Connection
46  * 82567LF-2 Gigabit Network Connection
47  * 82567V-2 Gigabit Network Connection
48  * 82567LF-3 Gigabit Network Connection
49  * 82567LM-3 Gigabit Network Connection
50  * 82567LM-4 Gigabit Network Connection
51  * 82577LM Gigabit Network Connection
52  * 82577LC Gigabit Network Connection
53  * 82578DM Gigabit Network Connection
54  * 82578DC Gigabit Network Connection
55  * 82579LM Gigabit Network Connection
56  * 82579V Gigabit Network Connection
57  */
58 
59 #include "e1000.h"
60 
61 #define ICH_FLASH_GFPREG		0x0000
62 #define ICH_FLASH_HSFSTS		0x0004
63 #define ICH_FLASH_HSFCTL		0x0006
64 #define ICH_FLASH_FADDR			0x0008
65 #define ICH_FLASH_FDATA0		0x0010
66 #define ICH_FLASH_PR0			0x0074
67 
68 #define ICH_FLASH_READ_COMMAND_TIMEOUT	500
69 #define ICH_FLASH_WRITE_COMMAND_TIMEOUT	500
70 #define ICH_FLASH_ERASE_COMMAND_TIMEOUT	3000000
71 #define ICH_FLASH_LINEAR_ADDR_MASK	0x00FFFFFF
72 #define ICH_FLASH_CYCLE_REPEAT_COUNT	10
73 
74 #define ICH_CYCLE_READ			0
75 #define ICH_CYCLE_WRITE			2
76 #define ICH_CYCLE_ERASE			3
77 
78 #define FLASH_GFPREG_BASE_MASK		0x1FFF
79 #define FLASH_SECTOR_ADDR_SHIFT		12
80 
81 #define ICH_FLASH_SEG_SIZE_256		256
82 #define ICH_FLASH_SEG_SIZE_4K		4096
83 #define ICH_FLASH_SEG_SIZE_8K		8192
84 #define ICH_FLASH_SEG_SIZE_64K		65536
85 
86 
87 #define E1000_ICH_FWSM_RSPCIPHY	0x00000040 /* Reset PHY on PCI Reset */
88 /* FW established a valid mode */
89 #define E1000_ICH_FWSM_FW_VALID		0x00008000
90 
91 #define E1000_ICH_MNG_IAMT_MODE		0x2
92 
93 #define ID_LED_DEFAULT_ICH8LAN  ((ID_LED_DEF1_DEF2 << 12) | \
94 				 (ID_LED_DEF1_OFF2 <<  8) | \
95 				 (ID_LED_DEF1_ON2  <<  4) | \
96 				 (ID_LED_DEF1_DEF2))
97 
98 #define E1000_ICH_NVM_SIG_WORD		0x13
99 #define E1000_ICH_NVM_SIG_MASK		0xC000
100 #define E1000_ICH_NVM_VALID_SIG_MASK    0xC0
101 #define E1000_ICH_NVM_SIG_VALUE         0x80
102 
103 #define E1000_ICH8_LAN_INIT_TIMEOUT	1500
104 
105 #define E1000_FEXTNVM_SW_CONFIG		1
106 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107 
108 #define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK    0x0C000000
109 #define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC  0x08000000
110 
111 #define E1000_FEXTNVM4_BEACON_DURATION_MASK    0x7
112 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC   0x7
113 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC  0x3
114 
115 #define PCIE_ICH8_SNOOP_ALL		PCIE_NO_SNOOP_ALL
116 
117 #define E1000_ICH_RAR_ENTRIES		7
118 #define E1000_PCH2_RAR_ENTRIES		5 /* RAR[0], SHRA[0-3] */
119 #define E1000_PCH_LPT_RAR_ENTRIES	12 /* RAR[0], SHRA[0-10] */
120 
121 #define PHY_PAGE_SHIFT 5
122 #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
123 			   ((reg) & MAX_PHY_REG_ADDRESS))
124 #define IGP3_KMRN_DIAG  PHY_REG(770, 19) /* KMRN Diagnostic */
125 #define IGP3_VR_CTRL    PHY_REG(776, 18) /* Voltage Regulator Control */
126 
127 #define IGP3_KMRN_DIAG_PCS_LOCK_LOSS	0x0002
128 #define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
129 #define IGP3_VR_CTRL_MODE_SHUTDOWN	0x0200
130 
131 #define HV_LED_CONFIG		PHY_REG(768, 30) /* LED Configuration */
132 
133 #define SW_FLAG_TIMEOUT    1000 /* SW Semaphore flag timeout in milliseconds */
134 
135 /* SMBus Control Phy Register */
136 #define CV_SMB_CTRL		PHY_REG(769, 23)
137 #define CV_SMB_CTRL_FORCE_SMBUS	0x0001
138 
139 /* SMBus Address Phy Register */
140 #define HV_SMB_ADDR            PHY_REG(768, 26)
141 #define HV_SMB_ADDR_MASK       0x007F
142 #define HV_SMB_ADDR_PEC_EN     0x0200
143 #define HV_SMB_ADDR_VALID      0x0080
144 #define HV_SMB_ADDR_FREQ_MASK           0x1100
145 #define HV_SMB_ADDR_FREQ_LOW_SHIFT      8
146 #define HV_SMB_ADDR_FREQ_HIGH_SHIFT     12
147 
148 /* PHY Power Management Control */
149 #define HV_PM_CTRL		PHY_REG(770, 17)
150 #define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA	0x100
151 
152 /* PHY Low Power Idle Control */
153 #define I82579_LPI_CTRL				PHY_REG(772, 20)
154 #define I82579_LPI_CTRL_ENABLE_MASK		0x6000
155 #define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT	0x80
156 
157 /* EMI Registers */
158 #define I82579_EMI_ADDR         0x10
159 #define I82579_EMI_DATA         0x11
160 #define I82579_LPI_UPDATE_TIMER 0x4805	/* in 40ns units + 40 ns base value */
161 #define I82579_MSE_THRESHOLD    0x084F	/* Mean Square Error Threshold */
162 #define I82579_MSE_LINK_DOWN    0x2411	/* MSE count before dropping link */
163 #define I217_EEE_ADVERTISEMENT  0x8001	/* IEEE MMD Register 7.60 */
164 #define I217_EEE_LP_ABILITY     0x8002	/* IEEE MMD Register 7.61 */
165 #define I217_EEE_100_SUPPORTED  (1 << 1)	/* 100BaseTx EEE supported */
166 
167 /* Intel Rapid Start Technology Support */
168 #define I217_PROXY_CTRL                 PHY_REG(BM_WUC_PAGE, 70)
169 #define I217_PROXY_CTRL_AUTO_DISABLE    0x0080
170 #define I217_SxCTRL                     PHY_REG(BM_PORT_CTRL_PAGE, 28)
171 #define I217_SxCTRL_MASK                0x1000
172 #define I217_CGFREG                     PHY_REG(772, 29)
173 #define I217_CGFREG_MASK                0x0002
174 #define I217_MEMPWR                     PHY_REG(772, 26)
175 #define I217_MEMPWR_MASK                0x0010
176 
177 /* Strapping Option Register - RO */
178 #define E1000_STRAP                     0x0000C
179 #define E1000_STRAP_SMBUS_ADDRESS_MASK  0x00FE0000
180 #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
181 #define E1000_STRAP_SMT_FREQ_MASK       0x00003000
182 #define E1000_STRAP_SMT_FREQ_SHIFT      12
183 
184 /* OEM Bits Phy Register */
185 #define HV_OEM_BITS            PHY_REG(768, 25)
186 #define HV_OEM_BITS_LPLU       0x0004 /* Low Power Link Up */
187 #define HV_OEM_BITS_GBE_DIS    0x0040 /* Gigabit Disable */
188 #define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
189 
190 #define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
191 #define E1000_NVM_K1_ENABLE 0x1  /* NVM Enable K1 bit */
192 
193 /* KMRN Mode Control */
194 #define HV_KMRN_MODE_CTRL      PHY_REG(769, 16)
195 #define HV_KMRN_MDIO_SLOW      0x0400
196 
197 /* KMRN FIFO Control and Status */
198 #define HV_KMRN_FIFO_CTRLSTA                  PHY_REG(770, 16)
199 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK    0x7000
200 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT   12
201 
202 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
203 /* Offset 04h HSFSTS */
204 union ich8_hws_flash_status {
205 	struct ich8_hsfsts {
206 		u16 flcdone    :1; /* bit 0 Flash Cycle Done */
207 		u16 flcerr     :1; /* bit 1 Flash Cycle Error */
208 		u16 dael       :1; /* bit 2 Direct Access error Log */
209 		u16 berasesz   :2; /* bit 4:3 Sector Erase Size */
210 		u16 flcinprog  :1; /* bit 5 flash cycle in Progress */
211 		u16 reserved1  :2; /* bit 13:6 Reserved */
212 		u16 reserved2  :6; /* bit 13:6 Reserved */
213 		u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
214 		u16 flockdn    :1; /* bit 15 Flash Config Lock-Down */
215 	} hsf_status;
216 	u16 regval;
217 };
218 
219 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
220 /* Offset 06h FLCTL */
221 union ich8_hws_flash_ctrl {
222 	struct ich8_hsflctl {
223 		u16 flcgo      :1;   /* 0 Flash Cycle Go */
224 		u16 flcycle    :2;   /* 2:1 Flash Cycle */
225 		u16 reserved   :5;   /* 7:3 Reserved  */
226 		u16 fldbcount  :2;   /* 9:8 Flash Data Byte Count */
227 		u16 flockdn    :6;   /* 15:10 Reserved */
228 	} hsf_ctrl;
229 	u16 regval;
230 };
231 
232 /* ICH Flash Region Access Permissions */
233 union ich8_hws_flash_regacc {
234 	struct ich8_flracc {
235 		u32 grra      :8; /* 0:7 GbE region Read Access */
236 		u32 grwa      :8; /* 8:15 GbE region Write Access */
237 		u32 gmrag     :8; /* 23:16 GbE Master Read Access Grant */
238 		u32 gmwag     :8; /* 31:24 GbE Master Write Access Grant */
239 	} hsf_flregacc;
240 	u16 regval;
241 };
242 
243 /* ICH Flash Protected Region */
244 union ich8_flash_protected_range {
245 	struct ich8_pr {
246 		u32 base:13;     /* 0:12 Protected Range Base */
247 		u32 reserved1:2; /* 13:14 Reserved */
248 		u32 rpe:1;       /* 15 Read Protection Enable */
249 		u32 limit:13;    /* 16:28 Protected Range Limit */
250 		u32 reserved2:2; /* 29:30 Reserved */
251 		u32 wpe:1;       /* 31 Write Protection Enable */
252 	} range;
253 	u32 regval;
254 };
255 
256 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
257 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
258 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
259 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
260 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
261 						u32 offset, u8 byte);
262 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
263 					 u8 *data);
264 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
265 					 u16 *data);
266 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
267 					 u8 size, u16 *data);
268 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
269 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
270 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
271 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
272 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
273 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
274 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
275 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
276 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
277 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
278 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
279 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
280 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
281 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
282 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
283 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
284 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
285 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
286 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
287 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
288 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
289 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
290 
291 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
292 {
293 	return readw(hw->flash_address + reg);
294 }
295 
296 static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
297 {
298 	return readl(hw->flash_address + reg);
299 }
300 
301 static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
302 {
303 	writew(val, hw->flash_address + reg);
304 }
305 
306 static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
307 {
308 	writel(val, hw->flash_address + reg);
309 }
310 
311 #define er16flash(reg)		__er16flash(hw, (reg))
312 #define er32flash(reg)		__er32flash(hw, (reg))
313 #define ew16flash(reg, val)	__ew16flash(hw, (reg), (val))
314 #define ew32flash(reg, val)	__ew32flash(hw, (reg), (val))
315 
316 /**
317  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
318  *  @hw: pointer to the HW structure
319  *
320  *  Test access to the PHY registers by reading the PHY ID registers.  If
321  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
322  *  otherwise assume the read PHY ID is correct if it is valid.
323  *
324  *  Assumes the sw/fw/hw semaphore is already acquired.
325  **/
326 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
327 {
328 	u16 phy_reg;
329 	u32 phy_id;
330 
331 	e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
332 	phy_id = (u32)(phy_reg << 16);
333 	e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
334 	phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
335 
336 	if (hw->phy.id) {
337 		if (hw->phy.id == phy_id)
338 			return true;
339 	} else {
340 		if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
341 			hw->phy.id = phy_id;
342 		return true;
343 	}
344 
345 	return false;
346 }
347 
348 /**
349  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
350  *  @hw: pointer to the HW structure
351  *
352  *  Workarounds/flow necessary for PHY initialization during driver load
353  *  and resume paths.
354  **/
355 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
356 {
357 	u32 mac_reg, fwsm = er32(FWSM);
358 	s32 ret_val;
359 	u16 phy_reg;
360 
361 	ret_val = hw->phy.ops.acquire(hw);
362 	if (ret_val) {
363 		e_dbg("Failed to initialize PHY flow\n");
364 		return ret_val;
365 	}
366 
367 	/*
368 	 * The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
369 	 * inaccessible and resetting the PHY is not blocked, toggle the
370 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
371 	 */
372 	switch (hw->mac.type) {
373 	case e1000_pch_lpt:
374 		if (e1000_phy_is_accessible_pchlan(hw))
375 			break;
376 
377 		/*
378 		 * Before toggling LANPHYPC, see if PHY is accessible by
379 		 * forcing MAC to SMBus mode first.
380 		 */
381 		mac_reg = er32(CTRL_EXT);
382 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
383 		ew32(CTRL_EXT, mac_reg);
384 
385 		/* fall-through */
386 	case e1000_pch2lan:
387 		/*
388 		 * Gate automatic PHY configuration by hardware on
389 		 * non-managed 82579
390 		 */
391 		if ((hw->mac.type == e1000_pch2lan) &&
392 		    !(fwsm & E1000_ICH_FWSM_FW_VALID))
393 			e1000_gate_hw_phy_config_ich8lan(hw, true);
394 
395 		if (e1000_phy_is_accessible_pchlan(hw)) {
396 			if (hw->mac.type == e1000_pch_lpt) {
397 				/* Unforce SMBus mode in PHY */
398 				e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
399 				phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
400 				e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
401 
402 				/* Unforce SMBus mode in MAC */
403 				mac_reg = er32(CTRL_EXT);
404 				mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
405 				ew32(CTRL_EXT, mac_reg);
406 			}
407 			break;
408 		}
409 
410 		/* fall-through */
411 	case e1000_pchlan:
412 		if ((hw->mac.type == e1000_pchlan) &&
413 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
414 			break;
415 
416 		if (hw->phy.ops.check_reset_block(hw)) {
417 			e_dbg("Required LANPHYPC toggle blocked by ME\n");
418 			break;
419 		}
420 
421 		e_dbg("Toggling LANPHYPC\n");
422 
423 		/* Set Phy Config Counter to 50msec */
424 		mac_reg = er32(FEXTNVM3);
425 		mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
426 		mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
427 		ew32(FEXTNVM3, mac_reg);
428 
429 		/* Toggle LANPHYPC Value bit */
430 		mac_reg = er32(CTRL);
431 		mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
432 		mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
433 		ew32(CTRL, mac_reg);
434 		e1e_flush();
435 		udelay(10);
436 		mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
437 		ew32(CTRL, mac_reg);
438 		e1e_flush();
439 		if (hw->mac.type < e1000_pch_lpt) {
440 			msleep(50);
441 		} else {
442 			u16 count = 20;
443 			do {
444 				usleep_range(5000, 10000);
445 			} while (!(er32(CTRL_EXT) &
446 				   E1000_CTRL_EXT_LPCD) && count--);
447 		}
448 		break;
449 	default:
450 		break;
451 	}
452 
453 	hw->phy.ops.release(hw);
454 
455 	/*
456 	 * Reset the PHY before any access to it.  Doing so, ensures
457 	 * that the PHY is in a known good state before we read/write
458 	 * PHY registers.  The generic reset is sufficient here,
459 	 * because we haven't determined the PHY type yet.
460 	 */
461 	ret_val = e1000e_phy_hw_reset_generic(hw);
462 
463 	/* Ungate automatic PHY configuration on non-managed 82579 */
464 	if ((hw->mac.type == e1000_pch2lan) &&
465 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
466 		usleep_range(10000, 20000);
467 		e1000_gate_hw_phy_config_ich8lan(hw, false);
468 	}
469 
470 	return ret_val;
471 }
472 
473 /**
474  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
475  *  @hw: pointer to the HW structure
476  *
477  *  Initialize family-specific PHY parameters and function pointers.
478  **/
479 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
480 {
481 	struct e1000_phy_info *phy = &hw->phy;
482 	s32 ret_val = 0;
483 
484 	phy->addr                     = 1;
485 	phy->reset_delay_us           = 100;
486 
487 	phy->ops.set_page             = e1000_set_page_igp;
488 	phy->ops.read_reg             = e1000_read_phy_reg_hv;
489 	phy->ops.read_reg_locked      = e1000_read_phy_reg_hv_locked;
490 	phy->ops.read_reg_page        = e1000_read_phy_reg_page_hv;
491 	phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
492 	phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
493 	phy->ops.write_reg            = e1000_write_phy_reg_hv;
494 	phy->ops.write_reg_locked     = e1000_write_phy_reg_hv_locked;
495 	phy->ops.write_reg_page       = e1000_write_phy_reg_page_hv;
496 	phy->ops.power_up             = e1000_power_up_phy_copper;
497 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
498 	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
499 
500 	phy->id = e1000_phy_unknown;
501 
502 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
503 	if (ret_val)
504 		return ret_val;
505 
506 	if (phy->id == e1000_phy_unknown)
507 		switch (hw->mac.type) {
508 		default:
509 			ret_val = e1000e_get_phy_id(hw);
510 			if (ret_val)
511 				return ret_val;
512 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
513 				break;
514 			/* fall-through */
515 		case e1000_pch2lan:
516 		case e1000_pch_lpt:
517 			/*
518 			 * In case the PHY needs to be in mdio slow mode,
519 			 * set slow mode and try to get the PHY id again.
520 			 */
521 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
522 			if (ret_val)
523 				return ret_val;
524 			ret_val = e1000e_get_phy_id(hw);
525 			if (ret_val)
526 				return ret_val;
527 			break;
528 		}
529 	phy->type = e1000e_get_phy_type_from_id(phy->id);
530 
531 	switch (phy->type) {
532 	case e1000_phy_82577:
533 	case e1000_phy_82579:
534 	case e1000_phy_i217:
535 		phy->ops.check_polarity = e1000_check_polarity_82577;
536 		phy->ops.force_speed_duplex =
537 		    e1000_phy_force_speed_duplex_82577;
538 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
539 		phy->ops.get_info = e1000_get_phy_info_82577;
540 		phy->ops.commit = e1000e_phy_sw_reset;
541 		break;
542 	case e1000_phy_82578:
543 		phy->ops.check_polarity = e1000_check_polarity_m88;
544 		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
545 		phy->ops.get_cable_length = e1000e_get_cable_length_m88;
546 		phy->ops.get_info = e1000e_get_phy_info_m88;
547 		break;
548 	default:
549 		ret_val = -E1000_ERR_PHY;
550 		break;
551 	}
552 
553 	return ret_val;
554 }
555 
556 /**
557  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
558  *  @hw: pointer to the HW structure
559  *
560  *  Initialize family-specific PHY parameters and function pointers.
561  **/
562 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
563 {
564 	struct e1000_phy_info *phy = &hw->phy;
565 	s32 ret_val;
566 	u16 i = 0;
567 
568 	phy->addr			= 1;
569 	phy->reset_delay_us		= 100;
570 
571 	phy->ops.power_up               = e1000_power_up_phy_copper;
572 	phy->ops.power_down             = e1000_power_down_phy_copper_ich8lan;
573 
574 	/*
575 	 * We may need to do this twice - once for IGP and if that fails,
576 	 * we'll set BM func pointers and try again
577 	 */
578 	ret_val = e1000e_determine_phy_address(hw);
579 	if (ret_val) {
580 		phy->ops.write_reg = e1000e_write_phy_reg_bm;
581 		phy->ops.read_reg  = e1000e_read_phy_reg_bm;
582 		ret_val = e1000e_determine_phy_address(hw);
583 		if (ret_val) {
584 			e_dbg("Cannot determine PHY addr. Erroring out\n");
585 			return ret_val;
586 		}
587 	}
588 
589 	phy->id = 0;
590 	while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
591 	       (i++ < 100)) {
592 		usleep_range(1000, 2000);
593 		ret_val = e1000e_get_phy_id(hw);
594 		if (ret_val)
595 			return ret_val;
596 	}
597 
598 	/* Verify phy id */
599 	switch (phy->id) {
600 	case IGP03E1000_E_PHY_ID:
601 		phy->type = e1000_phy_igp_3;
602 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
603 		phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
604 		phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
605 		phy->ops.get_info = e1000e_get_phy_info_igp;
606 		phy->ops.check_polarity = e1000_check_polarity_igp;
607 		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
608 		break;
609 	case IFE_E_PHY_ID:
610 	case IFE_PLUS_E_PHY_ID:
611 	case IFE_C_E_PHY_ID:
612 		phy->type = e1000_phy_ife;
613 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
614 		phy->ops.get_info = e1000_get_phy_info_ife;
615 		phy->ops.check_polarity = e1000_check_polarity_ife;
616 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
617 		break;
618 	case BME1000_E_PHY_ID:
619 		phy->type = e1000_phy_bm;
620 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
621 		phy->ops.read_reg = e1000e_read_phy_reg_bm;
622 		phy->ops.write_reg = e1000e_write_phy_reg_bm;
623 		phy->ops.commit = e1000e_phy_sw_reset;
624 		phy->ops.get_info = e1000e_get_phy_info_m88;
625 		phy->ops.check_polarity = e1000_check_polarity_m88;
626 		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
627 		break;
628 	default:
629 		return -E1000_ERR_PHY;
630 		break;
631 	}
632 
633 	return 0;
634 }
635 
636 /**
637  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
638  *  @hw: pointer to the HW structure
639  *
640  *  Initialize family-specific NVM parameters and function
641  *  pointers.
642  **/
643 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
644 {
645 	struct e1000_nvm_info *nvm = &hw->nvm;
646 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
647 	u32 gfpreg, sector_base_addr, sector_end_addr;
648 	u16 i;
649 
650 	/* Can't read flash registers if the register set isn't mapped. */
651 	if (!hw->flash_address) {
652 		e_dbg("ERROR: Flash registers not mapped\n");
653 		return -E1000_ERR_CONFIG;
654 	}
655 
656 	nvm->type = e1000_nvm_flash_sw;
657 
658 	gfpreg = er32flash(ICH_FLASH_GFPREG);
659 
660 	/*
661 	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
662 	 * Add 1 to sector_end_addr since this sector is included in
663 	 * the overall size.
664 	 */
665 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
666 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
667 
668 	/* flash_base_addr is byte-aligned */
669 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
670 
671 	/*
672 	 * find total size of the NVM, then cut in half since the total
673 	 * size represents two separate NVM banks.
674 	 */
675 	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
676 				<< FLASH_SECTOR_ADDR_SHIFT;
677 	nvm->flash_bank_size /= 2;
678 	/* Adjust to word count */
679 	nvm->flash_bank_size /= sizeof(u16);
680 
681 	nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
682 
683 	/* Clear shadow ram */
684 	for (i = 0; i < nvm->word_size; i++) {
685 		dev_spec->shadow_ram[i].modified = false;
686 		dev_spec->shadow_ram[i].value    = 0xFFFF;
687 	}
688 
689 	return 0;
690 }
691 
692 /**
693  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
694  *  @hw: pointer to the HW structure
695  *
696  *  Initialize family-specific MAC parameters and function
697  *  pointers.
698  **/
699 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
700 {
701 	struct e1000_mac_info *mac = &hw->mac;
702 
703 	/* Set media type function pointer */
704 	hw->phy.media_type = e1000_media_type_copper;
705 
706 	/* Set mta register count */
707 	mac->mta_reg_count = 32;
708 	/* Set rar entry count */
709 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
710 	if (mac->type == e1000_ich8lan)
711 		mac->rar_entry_count--;
712 	/* FWSM register */
713 	mac->has_fwsm = true;
714 	/* ARC subsystem not supported */
715 	mac->arc_subsystem_valid = false;
716 	/* Adaptive IFS supported */
717 	mac->adaptive_ifs = true;
718 
719 	/* LED and other operations */
720 	switch (mac->type) {
721 	case e1000_ich8lan:
722 	case e1000_ich9lan:
723 	case e1000_ich10lan:
724 		/* check management mode */
725 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
726 		/* ID LED init */
727 		mac->ops.id_led_init = e1000e_id_led_init_generic;
728 		/* blink LED */
729 		mac->ops.blink_led = e1000e_blink_led_generic;
730 		/* setup LED */
731 		mac->ops.setup_led = e1000e_setup_led_generic;
732 		/* cleanup LED */
733 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
734 		/* turn on/off LED */
735 		mac->ops.led_on = e1000_led_on_ich8lan;
736 		mac->ops.led_off = e1000_led_off_ich8lan;
737 		break;
738 	case e1000_pch2lan:
739 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
740 		mac->ops.rar_set = e1000_rar_set_pch2lan;
741 		/* fall-through */
742 	case e1000_pch_lpt:
743 	case e1000_pchlan:
744 		/* check management mode */
745 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
746 		/* ID LED init */
747 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
748 		/* setup LED */
749 		mac->ops.setup_led = e1000_setup_led_pchlan;
750 		/* cleanup LED */
751 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
752 		/* turn on/off LED */
753 		mac->ops.led_on = e1000_led_on_pchlan;
754 		mac->ops.led_off = e1000_led_off_pchlan;
755 		break;
756 	default:
757 		break;
758 	}
759 
760 	if (mac->type == e1000_pch_lpt) {
761 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
762 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
763 	}
764 
765 	/* Enable PCS Lock-loss workaround for ICH8 */
766 	if (mac->type == e1000_ich8lan)
767 		e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
768 
769 	/*
770 	 * Gate automatic PHY configuration by hardware on managed
771 	 * 82579 and i217
772 	 */
773 	if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
774 	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
775 		e1000_gate_hw_phy_config_ich8lan(hw, true);
776 
777 	return 0;
778 }
779 
780 /**
781  *  e1000_set_eee_pchlan - Enable/disable EEE support
782  *  @hw: pointer to the HW structure
783  *
784  *  Enable/disable EEE based on setting in dev_spec structure.  The bits in
785  *  the LPI Control register will remain set only if/when link is up.
786  **/
787 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
788 {
789 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
790 	s32 ret_val = 0;
791 	u16 phy_reg;
792 
793 	if ((hw->phy.type != e1000_phy_82579) &&
794 	    (hw->phy.type != e1000_phy_i217))
795 		return 0;
796 
797 	ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
798 	if (ret_val)
799 		return ret_val;
800 
801 	if (dev_spec->eee_disable)
802 		phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
803 	else
804 		phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
805 
806 	ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
807 	if (ret_val)
808 		return ret_val;
809 
810 	if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
811 		/* Save off link partner's EEE ability */
812 		ret_val = hw->phy.ops.acquire(hw);
813 		if (ret_val)
814 			return ret_val;
815 		ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
816 					  I217_EEE_LP_ABILITY);
817 		if (ret_val)
818 			goto release;
819 		e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
820 
821 		/*
822 		 * EEE is not supported in 100Half, so ignore partner's EEE
823 		 * in 100 ability if full-duplex is not advertised.
824 		 */
825 		e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
826 		if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
827 			dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
828 release:
829 		hw->phy.ops.release(hw);
830 	}
831 
832 	return 0;
833 }
834 
835 /**
836  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
837  *  @hw: pointer to the HW structure
838  *
839  *  Checks to see of the link status of the hardware has changed.  If a
840  *  change in link status has been detected, then we read the PHY registers
841  *  to get the current speed/duplex if link exists.
842  **/
843 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
844 {
845 	struct e1000_mac_info *mac = &hw->mac;
846 	s32 ret_val;
847 	bool link;
848 	u16 phy_reg;
849 
850 	/*
851 	 * We only want to go out to the PHY registers to see if Auto-Neg
852 	 * has completed and/or if our link status has changed.  The
853 	 * get_link_status flag is set upon receiving a Link Status
854 	 * Change or Rx Sequence Error interrupt.
855 	 */
856 	if (!mac->get_link_status)
857 		return 0;
858 
859 	/*
860 	 * First we want to see if the MII Status Register reports
861 	 * link.  If so, then we want to get the current speed/duplex
862 	 * of the PHY.
863 	 */
864 	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
865 	if (ret_val)
866 		return ret_val;
867 
868 	if (hw->mac.type == e1000_pchlan) {
869 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
870 		if (ret_val)
871 			return ret_val;
872 	}
873 
874 	/* Clear link partner's EEE ability */
875 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
876 
877 	if (!link)
878 		return 0; /* No link detected */
879 
880 	mac->get_link_status = false;
881 
882 	switch (hw->mac.type) {
883 	case e1000_pch2lan:
884 		ret_val = e1000_k1_workaround_lv(hw);
885 		if (ret_val)
886 			return ret_val;
887 		/* fall-thru */
888 	case e1000_pchlan:
889 		if (hw->phy.type == e1000_phy_82578) {
890 			ret_val = e1000_link_stall_workaround_hv(hw);
891 			if (ret_val)
892 				return ret_val;
893 		}
894 
895 		/*
896 		 * Workaround for PCHx parts in half-duplex:
897 		 * Set the number of preambles removed from the packet
898 		 * when it is passed from the PHY to the MAC to prevent
899 		 * the MAC from misinterpreting the packet type.
900 		 */
901 		e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
902 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
903 
904 		if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
905 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
906 
907 		e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
908 		break;
909 	default:
910 		break;
911 	}
912 
913 	/*
914 	 * Check if there was DownShift, must be checked
915 	 * immediately after link-up
916 	 */
917 	e1000e_check_downshift(hw);
918 
919 	/* Enable/Disable EEE after link up */
920 	ret_val = e1000_set_eee_pchlan(hw);
921 	if (ret_val)
922 		return ret_val;
923 
924 	/*
925 	 * If we are forcing speed/duplex, then we simply return since
926 	 * we have already determined whether we have link or not.
927 	 */
928 	if (!mac->autoneg)
929 		return -E1000_ERR_CONFIG;
930 
931 	/*
932 	 * Auto-Neg is enabled.  Auto Speed Detection takes care
933 	 * of MAC speed/duplex configuration.  So we only need to
934 	 * configure Collision Distance in the MAC.
935 	 */
936 	mac->ops.config_collision_dist(hw);
937 
938 	/*
939 	 * Configure Flow Control now that Auto-Neg has completed.
940 	 * First, we need to restore the desired flow control
941 	 * settings because we may have had to re-autoneg with a
942 	 * different link partner.
943 	 */
944 	ret_val = e1000e_config_fc_after_link_up(hw);
945 	if (ret_val)
946 		e_dbg("Error configuring flow control\n");
947 
948 	return ret_val;
949 }
950 
951 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
952 {
953 	struct e1000_hw *hw = &adapter->hw;
954 	s32 rc;
955 
956 	rc = e1000_init_mac_params_ich8lan(hw);
957 	if (rc)
958 		return rc;
959 
960 	rc = e1000_init_nvm_params_ich8lan(hw);
961 	if (rc)
962 		return rc;
963 
964 	switch (hw->mac.type) {
965 	case e1000_ich8lan:
966 	case e1000_ich9lan:
967 	case e1000_ich10lan:
968 		rc = e1000_init_phy_params_ich8lan(hw);
969 		break;
970 	case e1000_pchlan:
971 	case e1000_pch2lan:
972 	case e1000_pch_lpt:
973 		rc = e1000_init_phy_params_pchlan(hw);
974 		break;
975 	default:
976 		break;
977 	}
978 	if (rc)
979 		return rc;
980 
981 	/*
982 	 * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
983 	 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
984 	 */
985 	if ((adapter->hw.phy.type == e1000_phy_ife) ||
986 	    ((adapter->hw.mac.type >= e1000_pch2lan) &&
987 	     (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
988 		adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
989 		adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
990 
991 		hw->mac.ops.blink_led = NULL;
992 	}
993 
994 	if ((adapter->hw.mac.type == e1000_ich8lan) &&
995 	    (adapter->hw.phy.type != e1000_phy_ife))
996 		adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
997 
998 	/* Enable workaround for 82579 w/ ME enabled */
999 	if ((adapter->hw.mac.type == e1000_pch2lan) &&
1000 	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1001 		adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
1002 
1003 	/* Disable EEE by default until IEEE802.3az spec is finalized */
1004 	if (adapter->flags2 & FLAG2_HAS_EEE)
1005 		adapter->hw.dev_spec.ich8lan.eee_disable = true;
1006 
1007 	return 0;
1008 }
1009 
1010 static DEFINE_MUTEX(nvm_mutex);
1011 
1012 /**
1013  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1014  *  @hw: pointer to the HW structure
1015  *
1016  *  Acquires the mutex for performing NVM operations.
1017  **/
1018 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1019 {
1020 	mutex_lock(&nvm_mutex);
1021 
1022 	return 0;
1023 }
1024 
1025 /**
1026  *  e1000_release_nvm_ich8lan - Release NVM mutex
1027  *  @hw: pointer to the HW structure
1028  *
1029  *  Releases the mutex used while performing NVM operations.
1030  **/
1031 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1032 {
1033 	mutex_unlock(&nvm_mutex);
1034 }
1035 
1036 /**
1037  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1038  *  @hw: pointer to the HW structure
1039  *
1040  *  Acquires the software control flag for performing PHY and select
1041  *  MAC CSR accesses.
1042  **/
1043 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1044 {
1045 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1046 	s32 ret_val = 0;
1047 
1048 	if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
1049 			     &hw->adapter->state)) {
1050 		e_dbg("contention for Phy access\n");
1051 		return -E1000_ERR_PHY;
1052 	}
1053 
1054 	while (timeout) {
1055 		extcnf_ctrl = er32(EXTCNF_CTRL);
1056 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1057 			break;
1058 
1059 		mdelay(1);
1060 		timeout--;
1061 	}
1062 
1063 	if (!timeout) {
1064 		e_dbg("SW has already locked the resource.\n");
1065 		ret_val = -E1000_ERR_CONFIG;
1066 		goto out;
1067 	}
1068 
1069 	timeout = SW_FLAG_TIMEOUT;
1070 
1071 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1072 	ew32(EXTCNF_CTRL, extcnf_ctrl);
1073 
1074 	while (timeout) {
1075 		extcnf_ctrl = er32(EXTCNF_CTRL);
1076 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1077 			break;
1078 
1079 		mdelay(1);
1080 		timeout--;
1081 	}
1082 
1083 	if (!timeout) {
1084 		e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1085 		      er32(FWSM), extcnf_ctrl);
1086 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1087 		ew32(EXTCNF_CTRL, extcnf_ctrl);
1088 		ret_val = -E1000_ERR_CONFIG;
1089 		goto out;
1090 	}
1091 
1092 out:
1093 	if (ret_val)
1094 		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1095 
1096 	return ret_val;
1097 }
1098 
1099 /**
1100  *  e1000_release_swflag_ich8lan - Release software control flag
1101  *  @hw: pointer to the HW structure
1102  *
1103  *  Releases the software control flag for performing PHY and select
1104  *  MAC CSR accesses.
1105  **/
1106 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1107 {
1108 	u32 extcnf_ctrl;
1109 
1110 	extcnf_ctrl = er32(EXTCNF_CTRL);
1111 
1112 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1113 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1114 		ew32(EXTCNF_CTRL, extcnf_ctrl);
1115 	} else {
1116 		e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
1117 	}
1118 
1119 	clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1120 }
1121 
1122 /**
1123  *  e1000_check_mng_mode_ich8lan - Checks management mode
1124  *  @hw: pointer to the HW structure
1125  *
1126  *  This checks if the adapter has any manageability enabled.
1127  *  This is a function pointer entry point only called by read/write
1128  *  routines for the PHY and NVM parts.
1129  **/
1130 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1131 {
1132 	u32 fwsm;
1133 
1134 	fwsm = er32(FWSM);
1135 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1136 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1137 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1138 }
1139 
1140 /**
1141  *  e1000_check_mng_mode_pchlan - Checks management mode
1142  *  @hw: pointer to the HW structure
1143  *
1144  *  This checks if the adapter has iAMT enabled.
1145  *  This is a function pointer entry point only called by read/write
1146  *  routines for the PHY and NVM parts.
1147  **/
1148 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1149 {
1150 	u32 fwsm;
1151 
1152 	fwsm = er32(FWSM);
1153 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1154 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1155 }
1156 
1157 /**
1158  *  e1000_rar_set_pch2lan - Set receive address register
1159  *  @hw: pointer to the HW structure
1160  *  @addr: pointer to the receive address
1161  *  @index: receive address array register
1162  *
1163  *  Sets the receive address array register at index to the address passed
1164  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1165  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1166  *  Use SHRA[0-3] in place of those reserved for ME.
1167  **/
1168 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1169 {
1170 	u32 rar_low, rar_high;
1171 
1172 	/*
1173 	 * HW expects these in little endian so we reverse the byte order
1174 	 * from network order (big endian) to little endian
1175 	 */
1176 	rar_low = ((u32)addr[0] |
1177 		   ((u32)addr[1] << 8) |
1178 		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1179 
1180 	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1181 
1182 	/* If MAC address zero, no need to set the AV bit */
1183 	if (rar_low || rar_high)
1184 		rar_high |= E1000_RAH_AV;
1185 
1186 	if (index == 0) {
1187 		ew32(RAL(index), rar_low);
1188 		e1e_flush();
1189 		ew32(RAH(index), rar_high);
1190 		e1e_flush();
1191 		return;
1192 	}
1193 
1194 	if (index < hw->mac.rar_entry_count) {
1195 		s32 ret_val;
1196 
1197 		ret_val = e1000_acquire_swflag_ich8lan(hw);
1198 		if (ret_val)
1199 			goto out;
1200 
1201 		ew32(SHRAL(index - 1), rar_low);
1202 		e1e_flush();
1203 		ew32(SHRAH(index - 1), rar_high);
1204 		e1e_flush();
1205 
1206 		e1000_release_swflag_ich8lan(hw);
1207 
1208 		/* verify the register updates */
1209 		if ((er32(SHRAL(index - 1)) == rar_low) &&
1210 		    (er32(SHRAH(index - 1)) == rar_high))
1211 			return;
1212 
1213 		e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1214 		      (index - 1), er32(FWSM));
1215 	}
1216 
1217 out:
1218 	e_dbg("Failed to write receive address at index %d\n", index);
1219 }
1220 
1221 /**
1222  *  e1000_rar_set_pch_lpt - Set receive address registers
1223  *  @hw: pointer to the HW structure
1224  *  @addr: pointer to the receive address
1225  *  @index: receive address array register
1226  *
1227  *  Sets the receive address register array at index to the address passed
1228  *  in by addr. For LPT, RAR[0] is the base address register that is to
1229  *  contain the MAC address. SHRA[0-10] are the shared receive address
1230  *  registers that are shared between the Host and manageability engine (ME).
1231  **/
1232 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1233 {
1234 	u32 rar_low, rar_high;
1235 	u32 wlock_mac;
1236 
1237 	/*
1238 	 * HW expects these in little endian so we reverse the byte order
1239 	 * from network order (big endian) to little endian
1240 	 */
1241 	rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
1242 		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1243 
1244 	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1245 
1246 	/* If MAC address zero, no need to set the AV bit */
1247 	if (rar_low || rar_high)
1248 		rar_high |= E1000_RAH_AV;
1249 
1250 	if (index == 0) {
1251 		ew32(RAL(index), rar_low);
1252 		e1e_flush();
1253 		ew32(RAH(index), rar_high);
1254 		e1e_flush();
1255 		return;
1256 	}
1257 
1258 	/*
1259 	 * The manageability engine (ME) can lock certain SHRAR registers that
1260 	 * it is using - those registers are unavailable for use.
1261 	 */
1262 	if (index < hw->mac.rar_entry_count) {
1263 		wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1264 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1265 
1266 		/* Check if all SHRAR registers are locked */
1267 		if (wlock_mac == 1)
1268 			goto out;
1269 
1270 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
1271 			s32 ret_val;
1272 
1273 			ret_val = e1000_acquire_swflag_ich8lan(hw);
1274 
1275 			if (ret_val)
1276 				goto out;
1277 
1278 			ew32(SHRAL_PCH_LPT(index - 1), rar_low);
1279 			e1e_flush();
1280 			ew32(SHRAH_PCH_LPT(index - 1), rar_high);
1281 			e1e_flush();
1282 
1283 			e1000_release_swflag_ich8lan(hw);
1284 
1285 			/* verify the register updates */
1286 			if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1287 			    (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1288 				return;
1289 		}
1290 	}
1291 
1292 out:
1293 	e_dbg("Failed to write receive address at index %d\n", index);
1294 }
1295 
1296 /**
1297  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1298  *  @hw: pointer to the HW structure
1299  *
1300  *  Checks if firmware is blocking the reset of the PHY.
1301  *  This is a function pointer entry point only called by
1302  *  reset routines.
1303  **/
1304 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1305 {
1306 	u32 fwsm;
1307 
1308 	fwsm = er32(FWSM);
1309 
1310 	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
1311 }
1312 
1313 /**
1314  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1315  *  @hw: pointer to the HW structure
1316  *
1317  *  Assumes semaphore already acquired.
1318  *
1319  **/
1320 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1321 {
1322 	u16 phy_data;
1323 	u32 strap = er32(STRAP);
1324 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1325 	    E1000_STRAP_SMT_FREQ_SHIFT;
1326 	s32 ret_val = 0;
1327 
1328 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1329 
1330 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1331 	if (ret_val)
1332 		return ret_val;
1333 
1334 	phy_data &= ~HV_SMB_ADDR_MASK;
1335 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1336 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1337 
1338 	if (hw->phy.type == e1000_phy_i217) {
1339 		/* Restore SMBus frequency */
1340 		if (freq--) {
1341 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1342 			phy_data |= (freq & (1 << 0)) <<
1343 			    HV_SMB_ADDR_FREQ_LOW_SHIFT;
1344 			phy_data |= (freq & (1 << 1)) <<
1345 			    (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1346 		} else {
1347 			e_dbg("Unsupported SMB frequency in PHY\n");
1348 		}
1349 	}
1350 
1351 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1352 }
1353 
1354 /**
1355  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1356  *  @hw:   pointer to the HW structure
1357  *
1358  *  SW should configure the LCD from the NVM extended configuration region
1359  *  as a workaround for certain parts.
1360  **/
1361 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1362 {
1363 	struct e1000_phy_info *phy = &hw->phy;
1364 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1365 	s32 ret_val = 0;
1366 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
1367 
1368 	/*
1369 	 * Initialize the PHY from the NVM on ICH platforms.  This
1370 	 * is needed due to an issue where the NVM configuration is
1371 	 * not properly autoloaded after power transitions.
1372 	 * Therefore, after each PHY reset, we will load the
1373 	 * configuration data out of the NVM manually.
1374 	 */
1375 	switch (hw->mac.type) {
1376 	case e1000_ich8lan:
1377 		if (phy->type != e1000_phy_igp_3)
1378 			return ret_val;
1379 
1380 		if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
1381 		    (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
1382 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1383 			break;
1384 		}
1385 		/* Fall-thru */
1386 	case e1000_pchlan:
1387 	case e1000_pch2lan:
1388 	case e1000_pch_lpt:
1389 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1390 		break;
1391 	default:
1392 		return ret_val;
1393 	}
1394 
1395 	ret_val = hw->phy.ops.acquire(hw);
1396 	if (ret_val)
1397 		return ret_val;
1398 
1399 	data = er32(FEXTNVM);
1400 	if (!(data & sw_cfg_mask))
1401 		goto release;
1402 
1403 	/*
1404 	 * Make sure HW does not configure LCD from PHY
1405 	 * extended configuration before SW configuration
1406 	 */
1407 	data = er32(EXTCNF_CTRL);
1408 	if ((hw->mac.type < e1000_pch2lan) &&
1409 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1410 		goto release;
1411 
1412 	cnf_size = er32(EXTCNF_SIZE);
1413 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1414 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1415 	if (!cnf_size)
1416 		goto release;
1417 
1418 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1419 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1420 
1421 	if (((hw->mac.type == e1000_pchlan) &&
1422 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1423 	    (hw->mac.type > e1000_pchlan)) {
1424 		/*
1425 		 * HW configures the SMBus address and LEDs when the
1426 		 * OEM and LCD Write Enable bits are set in the NVM.
1427 		 * When both NVM bits are cleared, SW will configure
1428 		 * them instead.
1429 		 */
1430 		ret_val = e1000_write_smbus_addr(hw);
1431 		if (ret_val)
1432 			goto release;
1433 
1434 		data = er32(LEDCTL);
1435 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1436 							(u16)data);
1437 		if (ret_val)
1438 			goto release;
1439 	}
1440 
1441 	/* Configure LCD from extended configuration region. */
1442 
1443 	/* cnf_base_addr is in DWORD */
1444 	word_addr = (u16)(cnf_base_addr << 1);
1445 
1446 	for (i = 0; i < cnf_size; i++) {
1447 		ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
1448 					 &reg_data);
1449 		if (ret_val)
1450 			goto release;
1451 
1452 		ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
1453 					 1, &reg_addr);
1454 		if (ret_val)
1455 			goto release;
1456 
1457 		/* Save off the PHY page for future writes. */
1458 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1459 			phy_page = reg_data;
1460 			continue;
1461 		}
1462 
1463 		reg_addr &= PHY_REG_MASK;
1464 		reg_addr |= phy_page;
1465 
1466 		ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
1467 		if (ret_val)
1468 			goto release;
1469 	}
1470 
1471 release:
1472 	hw->phy.ops.release(hw);
1473 	return ret_val;
1474 }
1475 
1476 /**
1477  *  e1000_k1_gig_workaround_hv - K1 Si workaround
1478  *  @hw:   pointer to the HW structure
1479  *  @link: link up bool flag
1480  *
1481  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1482  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
1483  *  If link is down, the function will restore the default K1 setting located
1484  *  in the NVM.
1485  **/
1486 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1487 {
1488 	s32 ret_val = 0;
1489 	u16 status_reg = 0;
1490 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1491 
1492 	if (hw->mac.type != e1000_pchlan)
1493 		return 0;
1494 
1495 	/* Wrap the whole flow with the sw flag */
1496 	ret_val = hw->phy.ops.acquire(hw);
1497 	if (ret_val)
1498 		return ret_val;
1499 
1500 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1501 	if (link) {
1502 		if (hw->phy.type == e1000_phy_82578) {
1503 			ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
1504 						  &status_reg);
1505 			if (ret_val)
1506 				goto release;
1507 
1508 			status_reg &= BM_CS_STATUS_LINK_UP |
1509 			              BM_CS_STATUS_RESOLVED |
1510 			              BM_CS_STATUS_SPEED_MASK;
1511 
1512 			if (status_reg == (BM_CS_STATUS_LINK_UP |
1513 			                   BM_CS_STATUS_RESOLVED |
1514 			                   BM_CS_STATUS_SPEED_1000))
1515 				k1_enable = false;
1516 		}
1517 
1518 		if (hw->phy.type == e1000_phy_82577) {
1519 			ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
1520 			if (ret_val)
1521 				goto release;
1522 
1523 			status_reg &= HV_M_STATUS_LINK_UP |
1524 			              HV_M_STATUS_AUTONEG_COMPLETE |
1525 			              HV_M_STATUS_SPEED_MASK;
1526 
1527 			if (status_reg == (HV_M_STATUS_LINK_UP |
1528 			                   HV_M_STATUS_AUTONEG_COMPLETE |
1529 			                   HV_M_STATUS_SPEED_1000))
1530 				k1_enable = false;
1531 		}
1532 
1533 		/* Link stall fix for link up */
1534 		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
1535 		if (ret_val)
1536 			goto release;
1537 
1538 	} else {
1539 		/* Link stall fix for link down */
1540 		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
1541 		if (ret_val)
1542 			goto release;
1543 	}
1544 
1545 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1546 
1547 release:
1548 	hw->phy.ops.release(hw);
1549 
1550 	return ret_val;
1551 }
1552 
1553 /**
1554  *  e1000_configure_k1_ich8lan - Configure K1 power state
1555  *  @hw: pointer to the HW structure
1556  *  @enable: K1 state to configure
1557  *
1558  *  Configure the K1 power state based on the provided parameter.
1559  *  Assumes semaphore already acquired.
1560  *
1561  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1562  **/
1563 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1564 {
1565 	s32 ret_val = 0;
1566 	u32 ctrl_reg = 0;
1567 	u32 ctrl_ext = 0;
1568 	u32 reg = 0;
1569 	u16 kmrn_reg = 0;
1570 
1571 	ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1572 					      &kmrn_reg);
1573 	if (ret_val)
1574 		return ret_val;
1575 
1576 	if (k1_enable)
1577 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1578 	else
1579 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1580 
1581 	ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1582 					       kmrn_reg);
1583 	if (ret_val)
1584 		return ret_val;
1585 
1586 	udelay(20);
1587 	ctrl_ext = er32(CTRL_EXT);
1588 	ctrl_reg = er32(CTRL);
1589 
1590 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1591 	reg |= E1000_CTRL_FRCSPD;
1592 	ew32(CTRL, reg);
1593 
1594 	ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1595 	e1e_flush();
1596 	udelay(20);
1597 	ew32(CTRL, ctrl_reg);
1598 	ew32(CTRL_EXT, ctrl_ext);
1599 	e1e_flush();
1600 	udelay(20);
1601 
1602 	return 0;
1603 }
1604 
1605 /**
1606  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1607  *  @hw:       pointer to the HW structure
1608  *  @d0_state: boolean if entering d0 or d3 device state
1609  *
1610  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1611  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
1612  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
1613  **/
1614 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1615 {
1616 	s32 ret_val = 0;
1617 	u32 mac_reg;
1618 	u16 oem_reg;
1619 
1620 	if (hw->mac.type < e1000_pchlan)
1621 		return ret_val;
1622 
1623 	ret_val = hw->phy.ops.acquire(hw);
1624 	if (ret_val)
1625 		return ret_val;
1626 
1627 	if (hw->mac.type == e1000_pchlan) {
1628 		mac_reg = er32(EXTCNF_CTRL);
1629 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1630 			goto release;
1631 	}
1632 
1633 	mac_reg = er32(FEXTNVM);
1634 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1635 		goto release;
1636 
1637 	mac_reg = er32(PHY_CTRL);
1638 
1639 	ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
1640 	if (ret_val)
1641 		goto release;
1642 
1643 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1644 
1645 	if (d0_state) {
1646 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1647 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1648 
1649 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1650 			oem_reg |= HV_OEM_BITS_LPLU;
1651 	} else {
1652 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1653 			       E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1654 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1655 
1656 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1657 			       E1000_PHY_CTRL_NOND0A_LPLU))
1658 			oem_reg |= HV_OEM_BITS_LPLU;
1659 	}
1660 
1661 	/* Set Restart auto-neg to activate the bits */
1662 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1663 	    !hw->phy.ops.check_reset_block(hw))
1664 		oem_reg |= HV_OEM_BITS_RESTART_AN;
1665 
1666 	ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
1667 
1668 release:
1669 	hw->phy.ops.release(hw);
1670 
1671 	return ret_val;
1672 }
1673 
1674 
1675 /**
1676  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1677  *  @hw:   pointer to the HW structure
1678  **/
1679 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1680 {
1681 	s32 ret_val;
1682 	u16 data;
1683 
1684 	ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
1685 	if (ret_val)
1686 		return ret_val;
1687 
1688 	data |= HV_KMRN_MDIO_SLOW;
1689 
1690 	ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
1691 
1692 	return ret_val;
1693 }
1694 
1695 /**
1696  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1697  *  done after every PHY reset.
1698  **/
1699 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1700 {
1701 	s32 ret_val = 0;
1702 	u16 phy_data;
1703 
1704 	if (hw->mac.type != e1000_pchlan)
1705 		return 0;
1706 
1707 	/* Set MDIO slow mode before any other MDIO access */
1708 	if (hw->phy.type == e1000_phy_82577) {
1709 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
1710 		if (ret_val)
1711 			return ret_val;
1712 	}
1713 
1714 	if (((hw->phy.type == e1000_phy_82577) &&
1715 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1716 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1717 		/* Disable generation of early preamble */
1718 		ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
1719 		if (ret_val)
1720 			return ret_val;
1721 
1722 		/* Preamble tuning for SSC */
1723 		ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
1724 		if (ret_val)
1725 			return ret_val;
1726 	}
1727 
1728 	if (hw->phy.type == e1000_phy_82578) {
1729 		/*
1730 		 * Return registers to default by doing a soft reset then
1731 		 * writing 0x3140 to the control register.
1732 		 */
1733 		if (hw->phy.revision < 2) {
1734 			e1000e_phy_sw_reset(hw);
1735 			ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
1736 		}
1737 	}
1738 
1739 	/* Select page 0 */
1740 	ret_val = hw->phy.ops.acquire(hw);
1741 	if (ret_val)
1742 		return ret_val;
1743 
1744 	hw->phy.addr = 1;
1745 	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1746 	hw->phy.ops.release(hw);
1747 	if (ret_val)
1748 		return ret_val;
1749 
1750 	/*
1751 	 * Configure the K1 Si workaround during phy reset assuming there is
1752 	 * link so that it disables K1 if link is in 1Gbps.
1753 	 */
1754 	ret_val = e1000_k1_gig_workaround_hv(hw, true);
1755 	if (ret_val)
1756 		return ret_val;
1757 
1758 	/* Workaround for link disconnects on a busy hub in half duplex */
1759 	ret_val = hw->phy.ops.acquire(hw);
1760 	if (ret_val)
1761 		return ret_val;
1762 	ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1763 	if (ret_val)
1764 		goto release;
1765 	ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
1766 release:
1767 	hw->phy.ops.release(hw);
1768 
1769 	return ret_val;
1770 }
1771 
1772 /**
1773  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1774  *  @hw:   pointer to the HW structure
1775  **/
1776 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1777 {
1778 	u32 mac_reg;
1779 	u16 i, phy_reg = 0;
1780 	s32 ret_val;
1781 
1782 	ret_val = hw->phy.ops.acquire(hw);
1783 	if (ret_val)
1784 		return;
1785 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1786 	if (ret_val)
1787 		goto release;
1788 
1789 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1790 	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1791 		mac_reg = er32(RAL(i));
1792 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1793 					   (u16)(mac_reg & 0xFFFF));
1794 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1795 					   (u16)((mac_reg >> 16) & 0xFFFF));
1796 
1797 		mac_reg = er32(RAH(i));
1798 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1799 					   (u16)(mac_reg & 0xFFFF));
1800 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1801 					   (u16)((mac_reg & E1000_RAH_AV)
1802 						 >> 16));
1803 	}
1804 
1805 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1806 
1807 release:
1808 	hw->phy.ops.release(hw);
1809 }
1810 
1811 /**
1812  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1813  *  with 82579 PHY
1814  *  @hw: pointer to the HW structure
1815  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
1816  **/
1817 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1818 {
1819 	s32 ret_val = 0;
1820 	u16 phy_reg, data;
1821 	u32 mac_reg;
1822 	u16 i;
1823 
1824 	if (hw->mac.type < e1000_pch2lan)
1825 		return 0;
1826 
1827 	/* disable Rx path while enabling/disabling workaround */
1828 	e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1829 	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1830 	if (ret_val)
1831 		return ret_val;
1832 
1833 	if (enable) {
1834 		/*
1835 		 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1836 		 * SHRAL/H) and initial CRC values to the MAC
1837 		 */
1838 		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1839 			u8 mac_addr[ETH_ALEN] = {0};
1840 			u32 addr_high, addr_low;
1841 
1842 			addr_high = er32(RAH(i));
1843 			if (!(addr_high & E1000_RAH_AV))
1844 				continue;
1845 			addr_low = er32(RAL(i));
1846 			mac_addr[0] = (addr_low & 0xFF);
1847 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
1848 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
1849 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
1850 			mac_addr[4] = (addr_high & 0xFF);
1851 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
1852 
1853 			ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
1854 		}
1855 
1856 		/* Write Rx addresses to the PHY */
1857 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1858 
1859 		/* Enable jumbo frame workaround in the MAC */
1860 		mac_reg = er32(FFLT_DBG);
1861 		mac_reg &= ~(1 << 14);
1862 		mac_reg |= (7 << 15);
1863 		ew32(FFLT_DBG, mac_reg);
1864 
1865 		mac_reg = er32(RCTL);
1866 		mac_reg |= E1000_RCTL_SECRC;
1867 		ew32(RCTL, mac_reg);
1868 
1869 		ret_val = e1000e_read_kmrn_reg(hw,
1870 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1871 						&data);
1872 		if (ret_val)
1873 			return ret_val;
1874 		ret_val = e1000e_write_kmrn_reg(hw,
1875 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1876 						data | (1 << 0));
1877 		if (ret_val)
1878 			return ret_val;
1879 		ret_val = e1000e_read_kmrn_reg(hw,
1880 						E1000_KMRNCTRLSTA_HD_CTRL,
1881 						&data);
1882 		if (ret_val)
1883 			return ret_val;
1884 		data &= ~(0xF << 8);
1885 		data |= (0xB << 8);
1886 		ret_val = e1000e_write_kmrn_reg(hw,
1887 						E1000_KMRNCTRLSTA_HD_CTRL,
1888 						data);
1889 		if (ret_val)
1890 			return ret_val;
1891 
1892 		/* Enable jumbo frame workaround in the PHY */
1893 		e1e_rphy(hw, PHY_REG(769, 23), &data);
1894 		data &= ~(0x7F << 5);
1895 		data |= (0x37 << 5);
1896 		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1897 		if (ret_val)
1898 			return ret_val;
1899 		e1e_rphy(hw, PHY_REG(769, 16), &data);
1900 		data &= ~(1 << 13);
1901 		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1902 		if (ret_val)
1903 			return ret_val;
1904 		e1e_rphy(hw, PHY_REG(776, 20), &data);
1905 		data &= ~(0x3FF << 2);
1906 		data |= (0x1A << 2);
1907 		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1908 		if (ret_val)
1909 			return ret_val;
1910 		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
1911 		if (ret_val)
1912 			return ret_val;
1913 		e1e_rphy(hw, HV_PM_CTRL, &data);
1914 		ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
1915 		if (ret_val)
1916 			return ret_val;
1917 	} else {
1918 		/* Write MAC register values back to h/w defaults */
1919 		mac_reg = er32(FFLT_DBG);
1920 		mac_reg &= ~(0xF << 14);
1921 		ew32(FFLT_DBG, mac_reg);
1922 
1923 		mac_reg = er32(RCTL);
1924 		mac_reg &= ~E1000_RCTL_SECRC;
1925 		ew32(RCTL, mac_reg);
1926 
1927 		ret_val = e1000e_read_kmrn_reg(hw,
1928 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1929 						&data);
1930 		if (ret_val)
1931 			return ret_val;
1932 		ret_val = e1000e_write_kmrn_reg(hw,
1933 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1934 						data & ~(1 << 0));
1935 		if (ret_val)
1936 			return ret_val;
1937 		ret_val = e1000e_read_kmrn_reg(hw,
1938 						E1000_KMRNCTRLSTA_HD_CTRL,
1939 						&data);
1940 		if (ret_val)
1941 			return ret_val;
1942 		data &= ~(0xF << 8);
1943 		data |= (0xB << 8);
1944 		ret_val = e1000e_write_kmrn_reg(hw,
1945 						E1000_KMRNCTRLSTA_HD_CTRL,
1946 						data);
1947 		if (ret_val)
1948 			return ret_val;
1949 
1950 		/* Write PHY register values back to h/w defaults */
1951 		e1e_rphy(hw, PHY_REG(769, 23), &data);
1952 		data &= ~(0x7F << 5);
1953 		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1954 		if (ret_val)
1955 			return ret_val;
1956 		e1e_rphy(hw, PHY_REG(769, 16), &data);
1957 		data |= (1 << 13);
1958 		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1959 		if (ret_val)
1960 			return ret_val;
1961 		e1e_rphy(hw, PHY_REG(776, 20), &data);
1962 		data &= ~(0x3FF << 2);
1963 		data |= (0x8 << 2);
1964 		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1965 		if (ret_val)
1966 			return ret_val;
1967 		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
1968 		if (ret_val)
1969 			return ret_val;
1970 		e1e_rphy(hw, HV_PM_CTRL, &data);
1971 		ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
1972 		if (ret_val)
1973 			return ret_val;
1974 	}
1975 
1976 	/* re-enable Rx path after enabling/disabling workaround */
1977 	return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1978 }
1979 
1980 /**
1981  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1982  *  done after every PHY reset.
1983  **/
1984 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1985 {
1986 	s32 ret_val = 0;
1987 
1988 	if (hw->mac.type != e1000_pch2lan)
1989 		return 0;
1990 
1991 	/* Set MDIO slow mode before any other MDIO access */
1992 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
1993 
1994 	ret_val = hw->phy.ops.acquire(hw);
1995 	if (ret_val)
1996 		return ret_val;
1997 	ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
1998 	if (ret_val)
1999 		goto release;
2000 	/* set MSE higher to enable link to stay up when noise is high */
2001 	ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
2002 	if (ret_val)
2003 		goto release;
2004 	ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
2005 	if (ret_val)
2006 		goto release;
2007 	/* drop link after 5 times MSE threshold was reached */
2008 	ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
2009 release:
2010 	hw->phy.ops.release(hw);
2011 
2012 	return ret_val;
2013 }
2014 
2015 /**
2016  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2017  *  @hw:   pointer to the HW structure
2018  *
2019  *  Workaround to set the K1 beacon duration for 82579 parts
2020  **/
2021 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2022 {
2023 	s32 ret_val = 0;
2024 	u16 status_reg = 0;
2025 	u32 mac_reg;
2026 	u16 phy_reg;
2027 
2028 	if (hw->mac.type != e1000_pch2lan)
2029 		return 0;
2030 
2031 	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
2032 	ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2033 	if (ret_val)
2034 		return ret_val;
2035 
2036 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2037 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2038 		mac_reg = er32(FEXTNVM4);
2039 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2040 
2041 		ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
2042 		if (ret_val)
2043 			return ret_val;
2044 
2045 		if (status_reg & HV_M_STATUS_SPEED_1000) {
2046 			u16 pm_phy_reg;
2047 
2048 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
2049 			phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2050 			/* LV 1G Packet drop issue wa  */
2051 			ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2052 			if (ret_val)
2053 				return ret_val;
2054 			pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2055 			ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2056 			if (ret_val)
2057 				return ret_val;
2058 		} else {
2059 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2060 			phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2061 		}
2062 		ew32(FEXTNVM4, mac_reg);
2063 		ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
2064 	}
2065 
2066 	return ret_val;
2067 }
2068 
2069 /**
2070  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2071  *  @hw:   pointer to the HW structure
2072  *  @gate: boolean set to true to gate, false to ungate
2073  *
2074  *  Gate/ungate the automatic PHY configuration via hardware; perform
2075  *  the configuration via software instead.
2076  **/
2077 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2078 {
2079 	u32 extcnf_ctrl;
2080 
2081 	if (hw->mac.type < e1000_pch2lan)
2082 		return;
2083 
2084 	extcnf_ctrl = er32(EXTCNF_CTRL);
2085 
2086 	if (gate)
2087 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2088 	else
2089 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2090 
2091 	ew32(EXTCNF_CTRL, extcnf_ctrl);
2092 }
2093 
2094 /**
2095  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2096  *  @hw: pointer to the HW structure
2097  *
2098  *  Check the appropriate indication the MAC has finished configuring the
2099  *  PHY after a software reset.
2100  **/
2101 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2102 {
2103 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2104 
2105 	/* Wait for basic configuration completes before proceeding */
2106 	do {
2107 		data = er32(STATUS);
2108 		data &= E1000_STATUS_LAN_INIT_DONE;
2109 		udelay(100);
2110 	} while ((!data) && --loop);
2111 
2112 	/*
2113 	 * If basic configuration is incomplete before the above loop
2114 	 * count reaches 0, loading the configuration from NVM will
2115 	 * leave the PHY in a bad state possibly resulting in no link.
2116 	 */
2117 	if (loop == 0)
2118 		e_dbg("LAN_INIT_DONE not set, increase timeout\n");
2119 
2120 	/* Clear the Init Done bit for the next init event */
2121 	data = er32(STATUS);
2122 	data &= ~E1000_STATUS_LAN_INIT_DONE;
2123 	ew32(STATUS, data);
2124 }
2125 
2126 /**
2127  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2128  *  @hw: pointer to the HW structure
2129  **/
2130 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2131 {
2132 	s32 ret_val = 0;
2133 	u16 reg;
2134 
2135 	if (hw->phy.ops.check_reset_block(hw))
2136 		return 0;
2137 
2138 	/* Allow time for h/w to get to quiescent state after reset */
2139 	usleep_range(10000, 20000);
2140 
2141 	/* Perform any necessary post-reset workarounds */
2142 	switch (hw->mac.type) {
2143 	case e1000_pchlan:
2144 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2145 		if (ret_val)
2146 			return ret_val;
2147 		break;
2148 	case e1000_pch2lan:
2149 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2150 		if (ret_val)
2151 			return ret_val;
2152 		break;
2153 	default:
2154 		break;
2155 	}
2156 
2157 	/* Clear the host wakeup bit after lcd reset */
2158 	if (hw->mac.type >= e1000_pchlan) {
2159 		e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
2160 		reg &= ~BM_WUC_HOST_WU_BIT;
2161 		e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
2162 	}
2163 
2164 	/* Configure the LCD with the extended configuration region in NVM */
2165 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
2166 	if (ret_val)
2167 		return ret_val;
2168 
2169 	/* Configure the LCD with the OEM bits in NVM */
2170 	ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2171 
2172 	if (hw->mac.type == e1000_pch2lan) {
2173 		/* Ungate automatic PHY configuration on non-managed 82579 */
2174 		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
2175 			usleep_range(10000, 20000);
2176 			e1000_gate_hw_phy_config_ich8lan(hw, false);
2177 		}
2178 
2179 		/* Set EEE LPI Update Timer to 200usec */
2180 		ret_val = hw->phy.ops.acquire(hw);
2181 		if (ret_val)
2182 			return ret_val;
2183 		ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
2184 					  I82579_LPI_UPDATE_TIMER);
2185 		if (!ret_val)
2186 			ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
2187 		hw->phy.ops.release(hw);
2188 	}
2189 
2190 	return ret_val;
2191 }
2192 
2193 /**
2194  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2195  *  @hw: pointer to the HW structure
2196  *
2197  *  Resets the PHY
2198  *  This is a function pointer entry point called by drivers
2199  *  or other shared routines.
2200  **/
2201 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2202 {
2203 	s32 ret_val = 0;
2204 
2205 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
2206 	if ((hw->mac.type == e1000_pch2lan) &&
2207 	    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
2208 		e1000_gate_hw_phy_config_ich8lan(hw, true);
2209 
2210 	ret_val = e1000e_phy_hw_reset_generic(hw);
2211 	if (ret_val)
2212 		return ret_val;
2213 
2214 	return e1000_post_phy_reset_ich8lan(hw);
2215 }
2216 
2217 /**
2218  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2219  *  @hw: pointer to the HW structure
2220  *  @active: true to enable LPLU, false to disable
2221  *
2222  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2223  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2224  *  the phy speed. This function will manually set the LPLU bit and restart
2225  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2226  *  since it configures the same bit.
2227  **/
2228 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2229 {
2230 	s32 ret_val = 0;
2231 	u16 oem_reg;
2232 
2233 	ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
2234 	if (ret_val)
2235 		return ret_val;
2236 
2237 	if (active)
2238 		oem_reg |= HV_OEM_BITS_LPLU;
2239 	else
2240 		oem_reg &= ~HV_OEM_BITS_LPLU;
2241 
2242 	if (!hw->phy.ops.check_reset_block(hw))
2243 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2244 
2245 	return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
2246 }
2247 
2248 /**
2249  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2250  *  @hw: pointer to the HW structure
2251  *  @active: true to enable LPLU, false to disable
2252  *
2253  *  Sets the LPLU D0 state according to the active flag.  When
2254  *  activating LPLU this function also disables smart speed
2255  *  and vice versa.  LPLU will not be activated unless the
2256  *  device autonegotiation advertisement meets standards of
2257  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2258  *  This is a function pointer entry point only called by
2259  *  PHY setup routines.
2260  **/
2261 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2262 {
2263 	struct e1000_phy_info *phy = &hw->phy;
2264 	u32 phy_ctrl;
2265 	s32 ret_val = 0;
2266 	u16 data;
2267 
2268 	if (phy->type == e1000_phy_ife)
2269 		return 0;
2270 
2271 	phy_ctrl = er32(PHY_CTRL);
2272 
2273 	if (active) {
2274 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2275 		ew32(PHY_CTRL, phy_ctrl);
2276 
2277 		if (phy->type != e1000_phy_igp_3)
2278 			return 0;
2279 
2280 		/*
2281 		 * Call gig speed drop workaround on LPLU before accessing
2282 		 * any PHY registers
2283 		 */
2284 		if (hw->mac.type == e1000_ich8lan)
2285 			e1000e_gig_downshift_workaround_ich8lan(hw);
2286 
2287 		/* When LPLU is enabled, we should disable SmartSpeed */
2288 		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2289 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2290 		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2291 		if (ret_val)
2292 			return ret_val;
2293 	} else {
2294 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2295 		ew32(PHY_CTRL, phy_ctrl);
2296 
2297 		if (phy->type != e1000_phy_igp_3)
2298 			return 0;
2299 
2300 		/*
2301 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2302 		 * during Dx states where the power conservation is most
2303 		 * important.  During driver activity we should enable
2304 		 * SmartSpeed, so performance is maintained.
2305 		 */
2306 		if (phy->smart_speed == e1000_smart_speed_on) {
2307 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2308 					   &data);
2309 			if (ret_val)
2310 				return ret_val;
2311 
2312 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2313 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2314 					   data);
2315 			if (ret_val)
2316 				return ret_val;
2317 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2318 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2319 					   &data);
2320 			if (ret_val)
2321 				return ret_val;
2322 
2323 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2324 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2325 					   data);
2326 			if (ret_val)
2327 				return ret_val;
2328 		}
2329 	}
2330 
2331 	return 0;
2332 }
2333 
2334 /**
2335  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2336  *  @hw: pointer to the HW structure
2337  *  @active: true to enable LPLU, false to disable
2338  *
2339  *  Sets the LPLU D3 state according to the active flag.  When
2340  *  activating LPLU this function also disables smart speed
2341  *  and vice versa.  LPLU will not be activated unless the
2342  *  device autonegotiation advertisement meets standards of
2343  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2344  *  This is a function pointer entry point only called by
2345  *  PHY setup routines.
2346  **/
2347 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2348 {
2349 	struct e1000_phy_info *phy = &hw->phy;
2350 	u32 phy_ctrl;
2351 	s32 ret_val = 0;
2352 	u16 data;
2353 
2354 	phy_ctrl = er32(PHY_CTRL);
2355 
2356 	if (!active) {
2357 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2358 		ew32(PHY_CTRL, phy_ctrl);
2359 
2360 		if (phy->type != e1000_phy_igp_3)
2361 			return 0;
2362 
2363 		/*
2364 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2365 		 * during Dx states where the power conservation is most
2366 		 * important.  During driver activity we should enable
2367 		 * SmartSpeed, so performance is maintained.
2368 		 */
2369 		if (phy->smart_speed == e1000_smart_speed_on) {
2370 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2371 					   &data);
2372 			if (ret_val)
2373 				return ret_val;
2374 
2375 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2376 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2377 					   data);
2378 			if (ret_val)
2379 				return ret_val;
2380 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2381 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2382 					   &data);
2383 			if (ret_val)
2384 				return ret_val;
2385 
2386 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2387 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2388 					   data);
2389 			if (ret_val)
2390 				return ret_val;
2391 		}
2392 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2393 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2394 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2395 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2396 		ew32(PHY_CTRL, phy_ctrl);
2397 
2398 		if (phy->type != e1000_phy_igp_3)
2399 			return 0;
2400 
2401 		/*
2402 		 * Call gig speed drop workaround on LPLU before accessing
2403 		 * any PHY registers
2404 		 */
2405 		if (hw->mac.type == e1000_ich8lan)
2406 			e1000e_gig_downshift_workaround_ich8lan(hw);
2407 
2408 		/* When LPLU is enabled, we should disable SmartSpeed */
2409 		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2410 		if (ret_val)
2411 			return ret_val;
2412 
2413 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2414 		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2415 	}
2416 
2417 	return ret_val;
2418 }
2419 
2420 /**
2421  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2422  *  @hw: pointer to the HW structure
2423  *  @bank:  pointer to the variable that returns the active bank
2424  *
2425  *  Reads signature byte from the NVM using the flash access registers.
2426  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2427  **/
2428 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2429 {
2430 	u32 eecd;
2431 	struct e1000_nvm_info *nvm = &hw->nvm;
2432 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2433 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2434 	u8 sig_byte = 0;
2435 	s32 ret_val;
2436 
2437 	switch (hw->mac.type) {
2438 	case e1000_ich8lan:
2439 	case e1000_ich9lan:
2440 		eecd = er32(EECD);
2441 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2442 		    E1000_EECD_SEC1VAL_VALID_MASK) {
2443 			if (eecd & E1000_EECD_SEC1VAL)
2444 				*bank = 1;
2445 			else
2446 				*bank = 0;
2447 
2448 			return 0;
2449 		}
2450 		e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2451 		/* fall-thru */
2452 	default:
2453 		/* set bank to 0 in case flash read fails */
2454 		*bank = 0;
2455 
2456 		/* Check bank 0 */
2457 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2458 		                                        &sig_byte);
2459 		if (ret_val)
2460 			return ret_val;
2461 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2462 		    E1000_ICH_NVM_SIG_VALUE) {
2463 			*bank = 0;
2464 			return 0;
2465 		}
2466 
2467 		/* Check bank 1 */
2468 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2469 		                                        bank1_offset,
2470 		                                        &sig_byte);
2471 		if (ret_val)
2472 			return ret_val;
2473 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2474 		    E1000_ICH_NVM_SIG_VALUE) {
2475 			*bank = 1;
2476 			return 0;
2477 		}
2478 
2479 		e_dbg("ERROR: No valid NVM bank present\n");
2480 		return -E1000_ERR_NVM;
2481 	}
2482 }
2483 
2484 /**
2485  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
2486  *  @hw: pointer to the HW structure
2487  *  @offset: The offset (in bytes) of the word(s) to read.
2488  *  @words: Size of data to read in words
2489  *  @data: Pointer to the word(s) to read at offset.
2490  *
2491  *  Reads a word(s) from the NVM using the flash access registers.
2492  **/
2493 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2494 				  u16 *data)
2495 {
2496 	struct e1000_nvm_info *nvm = &hw->nvm;
2497 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2498 	u32 act_offset;
2499 	s32 ret_val = 0;
2500 	u32 bank = 0;
2501 	u16 i, word;
2502 
2503 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2504 	    (words == 0)) {
2505 		e_dbg("nvm parameter(s) out of bounds\n");
2506 		ret_val = -E1000_ERR_NVM;
2507 		goto out;
2508 	}
2509 
2510 	nvm->ops.acquire(hw);
2511 
2512 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2513 	if (ret_val) {
2514 		e_dbg("Could not detect valid bank, assuming bank 0\n");
2515 		bank = 0;
2516 	}
2517 
2518 	act_offset = (bank) ? nvm->flash_bank_size : 0;
2519 	act_offset += offset;
2520 
2521 	ret_val = 0;
2522 	for (i = 0; i < words; i++) {
2523 		if (dev_spec->shadow_ram[offset+i].modified) {
2524 			data[i] = dev_spec->shadow_ram[offset+i].value;
2525 		} else {
2526 			ret_val = e1000_read_flash_word_ich8lan(hw,
2527 								act_offset + i,
2528 								&word);
2529 			if (ret_val)
2530 				break;
2531 			data[i] = word;
2532 		}
2533 	}
2534 
2535 	nvm->ops.release(hw);
2536 
2537 out:
2538 	if (ret_val)
2539 		e_dbg("NVM read error: %d\n", ret_val);
2540 
2541 	return ret_val;
2542 }
2543 
2544 /**
2545  *  e1000_flash_cycle_init_ich8lan - Initialize flash
2546  *  @hw: pointer to the HW structure
2547  *
2548  *  This function does initial flash setup so that a new read/write/erase cycle
2549  *  can be started.
2550  **/
2551 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2552 {
2553 	union ich8_hws_flash_status hsfsts;
2554 	s32 ret_val = -E1000_ERR_NVM;
2555 
2556 	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2557 
2558 	/* Check if the flash descriptor is valid */
2559 	if (!hsfsts.hsf_status.fldesvalid) {
2560 		e_dbg("Flash descriptor invalid.  SW Sequencing must be used.\n");
2561 		return -E1000_ERR_NVM;
2562 	}
2563 
2564 	/* Clear FCERR and DAEL in hw status by writing 1 */
2565 	hsfsts.hsf_status.flcerr = 1;
2566 	hsfsts.hsf_status.dael = 1;
2567 
2568 	ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2569 
2570 	/*
2571 	 * Either we should have a hardware SPI cycle in progress
2572 	 * bit to check against, in order to start a new cycle or
2573 	 * FDONE bit should be changed in the hardware so that it
2574 	 * is 1 after hardware reset, which can then be used as an
2575 	 * indication whether a cycle is in progress or has been
2576 	 * completed.
2577 	 */
2578 
2579 	if (!hsfsts.hsf_status.flcinprog) {
2580 		/*
2581 		 * There is no cycle running at present,
2582 		 * so we can start a cycle.
2583 		 * Begin by setting Flash Cycle Done.
2584 		 */
2585 		hsfsts.hsf_status.flcdone = 1;
2586 		ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2587 		ret_val = 0;
2588 	} else {
2589 		s32 i;
2590 
2591 		/*
2592 		 * Otherwise poll for sometime so the current
2593 		 * cycle has a chance to end before giving up.
2594 		 */
2595 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2596 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2597 			if (!hsfsts.hsf_status.flcinprog) {
2598 				ret_val = 0;
2599 				break;
2600 			}
2601 			udelay(1);
2602 		}
2603 		if (!ret_val) {
2604 			/*
2605 			 * Successful in waiting for previous cycle to timeout,
2606 			 * now set the Flash Cycle Done.
2607 			 */
2608 			hsfsts.hsf_status.flcdone = 1;
2609 			ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2610 		} else {
2611 			e_dbg("Flash controller busy, cannot get access\n");
2612 		}
2613 	}
2614 
2615 	return ret_val;
2616 }
2617 
2618 /**
2619  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2620  *  @hw: pointer to the HW structure
2621  *  @timeout: maximum time to wait for completion
2622  *
2623  *  This function starts a flash cycle and waits for its completion.
2624  **/
2625 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2626 {
2627 	union ich8_hws_flash_ctrl hsflctl;
2628 	union ich8_hws_flash_status hsfsts;
2629 	u32 i = 0;
2630 
2631 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2632 	hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2633 	hsflctl.hsf_ctrl.flcgo = 1;
2634 	ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2635 
2636 	/* wait till FDONE bit is set to 1 */
2637 	do {
2638 		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2639 		if (hsfsts.hsf_status.flcdone)
2640 			break;
2641 		udelay(1);
2642 	} while (i++ < timeout);
2643 
2644 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
2645 		return 0;
2646 
2647 	return -E1000_ERR_NVM;
2648 }
2649 
2650 /**
2651  *  e1000_read_flash_word_ich8lan - Read word from flash
2652  *  @hw: pointer to the HW structure
2653  *  @offset: offset to data location
2654  *  @data: pointer to the location for storing the data
2655  *
2656  *  Reads the flash word at offset into data.  Offset is converted
2657  *  to bytes before read.
2658  **/
2659 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2660 					 u16 *data)
2661 {
2662 	/* Must convert offset into bytes. */
2663 	offset <<= 1;
2664 
2665 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2666 }
2667 
2668 /**
2669  *  e1000_read_flash_byte_ich8lan - Read byte from flash
2670  *  @hw: pointer to the HW structure
2671  *  @offset: The offset of the byte to read.
2672  *  @data: Pointer to a byte to store the value read.
2673  *
2674  *  Reads a single byte from the NVM using the flash access registers.
2675  **/
2676 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2677 					 u8 *data)
2678 {
2679 	s32 ret_val;
2680 	u16 word = 0;
2681 
2682 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2683 	if (ret_val)
2684 		return ret_val;
2685 
2686 	*data = (u8)word;
2687 
2688 	return 0;
2689 }
2690 
2691 /**
2692  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
2693  *  @hw: pointer to the HW structure
2694  *  @offset: The offset (in bytes) of the byte or word to read.
2695  *  @size: Size of data to read, 1=byte 2=word
2696  *  @data: Pointer to the word to store the value read.
2697  *
2698  *  Reads a byte or word from the NVM using the flash access registers.
2699  **/
2700 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2701 					 u8 size, u16 *data)
2702 {
2703 	union ich8_hws_flash_status hsfsts;
2704 	union ich8_hws_flash_ctrl hsflctl;
2705 	u32 flash_linear_addr;
2706 	u32 flash_data = 0;
2707 	s32 ret_val = -E1000_ERR_NVM;
2708 	u8 count = 0;
2709 
2710 	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2711 		return -E1000_ERR_NVM;
2712 
2713 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2714 			    hw->nvm.flash_base_addr;
2715 
2716 	do {
2717 		udelay(1);
2718 		/* Steps */
2719 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2720 		if (ret_val)
2721 			break;
2722 
2723 		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2724 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2725 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2726 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2727 		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2728 
2729 		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2730 
2731 		ret_val = e1000_flash_cycle_ich8lan(hw,
2732 						ICH_FLASH_READ_COMMAND_TIMEOUT);
2733 
2734 		/*
2735 		 * Check if FCERR is set to 1, if set to 1, clear it
2736 		 * and try the whole sequence a few more times, else
2737 		 * read in (shift in) the Flash Data0, the order is
2738 		 * least significant byte first msb to lsb
2739 		 */
2740 		if (!ret_val) {
2741 			flash_data = er32flash(ICH_FLASH_FDATA0);
2742 			if (size == 1)
2743 				*data = (u8)(flash_data & 0x000000FF);
2744 			else if (size == 2)
2745 				*data = (u16)(flash_data & 0x0000FFFF);
2746 			break;
2747 		} else {
2748 			/*
2749 			 * If we've gotten here, then things are probably
2750 			 * completely hosed, but if the error condition is
2751 			 * detected, it won't hurt to give it another try...
2752 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2753 			 */
2754 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2755 			if (hsfsts.hsf_status.flcerr) {
2756 				/* Repeat for some time before giving up. */
2757 				continue;
2758 			} else if (!hsfsts.hsf_status.flcdone) {
2759 				e_dbg("Timeout error - flash cycle did not complete.\n");
2760 				break;
2761 			}
2762 		}
2763 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2764 
2765 	return ret_val;
2766 }
2767 
2768 /**
2769  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
2770  *  @hw: pointer to the HW structure
2771  *  @offset: The offset (in bytes) of the word(s) to write.
2772  *  @words: Size of data to write in words
2773  *  @data: Pointer to the word(s) to write at offset.
2774  *
2775  *  Writes a byte or word to the NVM using the flash access registers.
2776  **/
2777 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2778 				   u16 *data)
2779 {
2780 	struct e1000_nvm_info *nvm = &hw->nvm;
2781 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2782 	u16 i;
2783 
2784 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2785 	    (words == 0)) {
2786 		e_dbg("nvm parameter(s) out of bounds\n");
2787 		return -E1000_ERR_NVM;
2788 	}
2789 
2790 	nvm->ops.acquire(hw);
2791 
2792 	for (i = 0; i < words; i++) {
2793 		dev_spec->shadow_ram[offset+i].modified = true;
2794 		dev_spec->shadow_ram[offset+i].value = data[i];
2795 	}
2796 
2797 	nvm->ops.release(hw);
2798 
2799 	return 0;
2800 }
2801 
2802 /**
2803  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2804  *  @hw: pointer to the HW structure
2805  *
2806  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
2807  *  which writes the checksum to the shadow ram.  The changes in the shadow
2808  *  ram are then committed to the EEPROM by processing each bank at a time
2809  *  checking for the modified bit and writing only the pending changes.
2810  *  After a successful commit, the shadow ram is cleared and is ready for
2811  *  future writes.
2812  **/
2813 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2814 {
2815 	struct e1000_nvm_info *nvm = &hw->nvm;
2816 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2817 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2818 	s32 ret_val;
2819 	u16 data;
2820 
2821 	ret_val = e1000e_update_nvm_checksum_generic(hw);
2822 	if (ret_val)
2823 		goto out;
2824 
2825 	if (nvm->type != e1000_nvm_flash_sw)
2826 		goto out;
2827 
2828 	nvm->ops.acquire(hw);
2829 
2830 	/*
2831 	 * We're writing to the opposite bank so if we're on bank 1,
2832 	 * write to bank 0 etc.  We also need to erase the segment that
2833 	 * is going to be written
2834 	 */
2835 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2836 	if (ret_val) {
2837 		e_dbg("Could not detect valid bank, assuming bank 0\n");
2838 		bank = 0;
2839 	}
2840 
2841 	if (bank == 0) {
2842 		new_bank_offset = nvm->flash_bank_size;
2843 		old_bank_offset = 0;
2844 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2845 		if (ret_val)
2846 			goto release;
2847 	} else {
2848 		old_bank_offset = nvm->flash_bank_size;
2849 		new_bank_offset = 0;
2850 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2851 		if (ret_val)
2852 			goto release;
2853 	}
2854 
2855 	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2856 		/*
2857 		 * Determine whether to write the value stored
2858 		 * in the other NVM bank or a modified value stored
2859 		 * in the shadow RAM
2860 		 */
2861 		if (dev_spec->shadow_ram[i].modified) {
2862 			data = dev_spec->shadow_ram[i].value;
2863 		} else {
2864 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
2865 			                                        old_bank_offset,
2866 			                                        &data);
2867 			if (ret_val)
2868 				break;
2869 		}
2870 
2871 		/*
2872 		 * If the word is 0x13, then make sure the signature bits
2873 		 * (15:14) are 11b until the commit has completed.
2874 		 * This will allow us to write 10b which indicates the
2875 		 * signature is valid.  We want to do this after the write
2876 		 * has completed so that we don't mark the segment valid
2877 		 * while the write is still in progress
2878 		 */
2879 		if (i == E1000_ICH_NVM_SIG_WORD)
2880 			data |= E1000_ICH_NVM_SIG_MASK;
2881 
2882 		/* Convert offset to bytes. */
2883 		act_offset = (i + new_bank_offset) << 1;
2884 
2885 		udelay(100);
2886 		/* Write the bytes to the new bank. */
2887 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2888 							       act_offset,
2889 							       (u8)data);
2890 		if (ret_val)
2891 			break;
2892 
2893 		udelay(100);
2894 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2895 							  act_offset + 1,
2896 							  (u8)(data >> 8));
2897 		if (ret_val)
2898 			break;
2899 	}
2900 
2901 	/*
2902 	 * Don't bother writing the segment valid bits if sector
2903 	 * programming failed.
2904 	 */
2905 	if (ret_val) {
2906 		/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
2907 		e_dbg("Flash commit failed.\n");
2908 		goto release;
2909 	}
2910 
2911 	/*
2912 	 * Finally validate the new segment by setting bit 15:14
2913 	 * to 10b in word 0x13 , this can be done without an
2914 	 * erase as well since these bits are 11 to start with
2915 	 * and we need to change bit 14 to 0b
2916 	 */
2917 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2918 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2919 	if (ret_val)
2920 		goto release;
2921 
2922 	data &= 0xBFFF;
2923 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2924 						       act_offset * 2 + 1,
2925 						       (u8)(data >> 8));
2926 	if (ret_val)
2927 		goto release;
2928 
2929 	/*
2930 	 * And invalidate the previously valid segment by setting
2931 	 * its signature word (0x13) high_byte to 0b. This can be
2932 	 * done without an erase because flash erase sets all bits
2933 	 * to 1's. We can write 1's to 0's without an erase
2934 	 */
2935 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2936 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2937 	if (ret_val)
2938 		goto release;
2939 
2940 	/* Great!  Everything worked, we can now clear the cached entries. */
2941 	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2942 		dev_spec->shadow_ram[i].modified = false;
2943 		dev_spec->shadow_ram[i].value = 0xFFFF;
2944 	}
2945 
2946 release:
2947 	nvm->ops.release(hw);
2948 
2949 	/*
2950 	 * Reload the EEPROM, or else modifications will not appear
2951 	 * until after the next adapter reset.
2952 	 */
2953 	if (!ret_val) {
2954 		nvm->ops.reload(hw);
2955 		usleep_range(10000, 20000);
2956 	}
2957 
2958 out:
2959 	if (ret_val)
2960 		e_dbg("NVM update error: %d\n", ret_val);
2961 
2962 	return ret_val;
2963 }
2964 
2965 /**
2966  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2967  *  @hw: pointer to the HW structure
2968  *
2969  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2970  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
2971  *  calculated, in which case we need to calculate the checksum and set bit 6.
2972  **/
2973 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2974 {
2975 	s32 ret_val;
2976 	u16 data;
2977 
2978 	/*
2979 	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
2980 	 * needs to be fixed.  This bit is an indication that the NVM
2981 	 * was prepared by OEM software and did not calculate the
2982 	 * checksum...a likely scenario.
2983 	 */
2984 	ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
2985 	if (ret_val)
2986 		return ret_val;
2987 
2988 	if (!(data & 0x40)) {
2989 		data |= 0x40;
2990 		ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
2991 		if (ret_val)
2992 			return ret_val;
2993 		ret_val = e1000e_update_nvm_checksum(hw);
2994 		if (ret_val)
2995 			return ret_val;
2996 	}
2997 
2998 	return e1000e_validate_nvm_checksum_generic(hw);
2999 }
3000 
3001 /**
3002  *  e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
3003  *  @hw: pointer to the HW structure
3004  *
3005  *  To prevent malicious write/erase of the NVM, set it to be read-only
3006  *  so that the hardware ignores all write/erase cycles of the NVM via
3007  *  the flash control registers.  The shadow-ram copy of the NVM will
3008  *  still be updated, however any updates to this copy will not stick
3009  *  across driver reloads.
3010  **/
3011 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
3012 {
3013 	struct e1000_nvm_info *nvm = &hw->nvm;
3014 	union ich8_flash_protected_range pr0;
3015 	union ich8_hws_flash_status hsfsts;
3016 	u32 gfpreg;
3017 
3018 	nvm->ops.acquire(hw);
3019 
3020 	gfpreg = er32flash(ICH_FLASH_GFPREG);
3021 
3022 	/* Write-protect GbE Sector of NVM */
3023 	pr0.regval = er32flash(ICH_FLASH_PR0);
3024 	pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
3025 	pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
3026 	pr0.range.wpe = true;
3027 	ew32flash(ICH_FLASH_PR0, pr0.regval);
3028 
3029 	/*
3030 	 * Lock down a subset of GbE Flash Control Registers, e.g.
3031 	 * PR0 to prevent the write-protection from being lifted.
3032 	 * Once FLOCKDN is set, the registers protected by it cannot
3033 	 * be written until FLOCKDN is cleared by a hardware reset.
3034 	 */
3035 	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3036 	hsfsts.hsf_status.flockdn = true;
3037 	ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3038 
3039 	nvm->ops.release(hw);
3040 }
3041 
3042 /**
3043  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3044  *  @hw: pointer to the HW structure
3045  *  @offset: The offset (in bytes) of the byte/word to read.
3046  *  @size: Size of data to read, 1=byte 2=word
3047  *  @data: The byte(s) to write to the NVM.
3048  *
3049  *  Writes one/two bytes to the NVM using the flash access registers.
3050  **/
3051 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3052 					  u8 size, u16 data)
3053 {
3054 	union ich8_hws_flash_status hsfsts;
3055 	union ich8_hws_flash_ctrl hsflctl;
3056 	u32 flash_linear_addr;
3057 	u32 flash_data = 0;
3058 	s32 ret_val;
3059 	u8 count = 0;
3060 
3061 	if (size < 1 || size > 2 || data > size * 0xff ||
3062 	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
3063 		return -E1000_ERR_NVM;
3064 
3065 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3066 			    hw->nvm.flash_base_addr;
3067 
3068 	do {
3069 		udelay(1);
3070 		/* Steps */
3071 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3072 		if (ret_val)
3073 			break;
3074 
3075 		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3076 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3077 		hsflctl.hsf_ctrl.fldbcount = size -1;
3078 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3079 		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3080 
3081 		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3082 
3083 		if (size == 1)
3084 			flash_data = (u32)data & 0x00FF;
3085 		else
3086 			flash_data = (u32)data;
3087 
3088 		ew32flash(ICH_FLASH_FDATA0, flash_data);
3089 
3090 		/*
3091 		 * check if FCERR is set to 1 , if set to 1, clear it
3092 		 * and try the whole sequence a few more times else done
3093 		 */
3094 		ret_val = e1000_flash_cycle_ich8lan(hw,
3095 					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3096 		if (!ret_val)
3097 			break;
3098 
3099 		/*
3100 		 * If we're here, then things are most likely
3101 		 * completely hosed, but if the error condition
3102 		 * is detected, it won't hurt to give it another
3103 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3104 		 */
3105 		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3106 		if (hsfsts.hsf_status.flcerr)
3107 			/* Repeat for some time before giving up. */
3108 			continue;
3109 		if (!hsfsts.hsf_status.flcdone) {
3110 			e_dbg("Timeout error - flash cycle did not complete.\n");
3111 			break;
3112 		}
3113 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3114 
3115 	return ret_val;
3116 }
3117 
3118 /**
3119  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3120  *  @hw: pointer to the HW structure
3121  *  @offset: The index of the byte to read.
3122  *  @data: The byte to write to the NVM.
3123  *
3124  *  Writes a single byte to the NVM using the flash access registers.
3125  **/
3126 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3127 					  u8 data)
3128 {
3129 	u16 word = (u16)data;
3130 
3131 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3132 }
3133 
3134 /**
3135  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3136  *  @hw: pointer to the HW structure
3137  *  @offset: The offset of the byte to write.
3138  *  @byte: The byte to write to the NVM.
3139  *
3140  *  Writes a single byte to the NVM using the flash access registers.
3141  *  Goes through a retry algorithm before giving up.
3142  **/
3143 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3144 						u32 offset, u8 byte)
3145 {
3146 	s32 ret_val;
3147 	u16 program_retries;
3148 
3149 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3150 	if (!ret_val)
3151 		return ret_val;
3152 
3153 	for (program_retries = 0; program_retries < 100; program_retries++) {
3154 		e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
3155 		udelay(100);
3156 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3157 		if (!ret_val)
3158 			break;
3159 	}
3160 	if (program_retries == 100)
3161 		return -E1000_ERR_NVM;
3162 
3163 	return 0;
3164 }
3165 
3166 /**
3167  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3168  *  @hw: pointer to the HW structure
3169  *  @bank: 0 for first bank, 1 for second bank, etc.
3170  *
3171  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3172  *  bank N is 4096 * N + flash_reg_addr.
3173  **/
3174 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3175 {
3176 	struct e1000_nvm_info *nvm = &hw->nvm;
3177 	union ich8_hws_flash_status hsfsts;
3178 	union ich8_hws_flash_ctrl hsflctl;
3179 	u32 flash_linear_addr;
3180 	/* bank size is in 16bit words - adjust to bytes */
3181 	u32 flash_bank_size = nvm->flash_bank_size * 2;
3182 	s32 ret_val;
3183 	s32 count = 0;
3184 	s32 j, iteration, sector_size;
3185 
3186 	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3187 
3188 	/*
3189 	 * Determine HW Sector size: Read BERASE bits of hw flash status
3190 	 * register
3191 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3192 	 *     consecutive sectors.  The start index for the nth Hw sector
3193 	 *     can be calculated as = bank * 4096 + n * 256
3194 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3195 	 *     The start index for the nth Hw sector can be calculated
3196 	 *     as = bank * 4096
3197 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3198 	 *     (ich9 only, otherwise error condition)
3199 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3200 	 */
3201 	switch (hsfsts.hsf_status.berasesz) {
3202 	case 0:
3203 		/* Hw sector size 256 */
3204 		sector_size = ICH_FLASH_SEG_SIZE_256;
3205 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3206 		break;
3207 	case 1:
3208 		sector_size = ICH_FLASH_SEG_SIZE_4K;
3209 		iteration = 1;
3210 		break;
3211 	case 2:
3212 		sector_size = ICH_FLASH_SEG_SIZE_8K;
3213 		iteration = 1;
3214 		break;
3215 	case 3:
3216 		sector_size = ICH_FLASH_SEG_SIZE_64K;
3217 		iteration = 1;
3218 		break;
3219 	default:
3220 		return -E1000_ERR_NVM;
3221 	}
3222 
3223 	/* Start with the base address, then add the sector offset. */
3224 	flash_linear_addr = hw->nvm.flash_base_addr;
3225 	flash_linear_addr += (bank) ? flash_bank_size : 0;
3226 
3227 	for (j = 0; j < iteration ; j++) {
3228 		do {
3229 			/* Steps */
3230 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
3231 			if (ret_val)
3232 				return ret_val;
3233 
3234 			/*
3235 			 * Write a value 11 (block Erase) in Flash
3236 			 * Cycle field in hw flash control
3237 			 */
3238 			hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3239 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3240 			ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3241 
3242 			/*
3243 			 * Write the last 24 bits of an index within the
3244 			 * block into Flash Linear address field in Flash
3245 			 * Address.
3246 			 */
3247 			flash_linear_addr += (j * sector_size);
3248 			ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3249 
3250 			ret_val = e1000_flash_cycle_ich8lan(hw,
3251 					       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3252 			if (!ret_val)
3253 				break;
3254 
3255 			/*
3256 			 * Check if FCERR is set to 1.  If 1,
3257 			 * clear it and try the whole sequence
3258 			 * a few more times else Done
3259 			 */
3260 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3261 			if (hsfsts.hsf_status.flcerr)
3262 				/* repeat for some time before giving up */
3263 				continue;
3264 			else if (!hsfsts.hsf_status.flcdone)
3265 				return ret_val;
3266 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3267 	}
3268 
3269 	return 0;
3270 }
3271 
3272 /**
3273  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3274  *  @hw: pointer to the HW structure
3275  *  @data: Pointer to the LED settings
3276  *
3277  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3278  *  settings is all 0's or F's, set the LED default to a valid LED default
3279  *  setting.
3280  **/
3281 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3282 {
3283 	s32 ret_val;
3284 
3285 	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
3286 	if (ret_val) {
3287 		e_dbg("NVM Read Error\n");
3288 		return ret_val;
3289 	}
3290 
3291 	if (*data == ID_LED_RESERVED_0000 ||
3292 	    *data == ID_LED_RESERVED_FFFF)
3293 		*data = ID_LED_DEFAULT_ICH8LAN;
3294 
3295 	return 0;
3296 }
3297 
3298 /**
3299  *  e1000_id_led_init_pchlan - store LED configurations
3300  *  @hw: pointer to the HW structure
3301  *
3302  *  PCH does not control LEDs via the LEDCTL register, rather it uses
3303  *  the PHY LED configuration register.
3304  *
3305  *  PCH also does not have an "always on" or "always off" mode which
3306  *  complicates the ID feature.  Instead of using the "on" mode to indicate
3307  *  in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
3308  *  use "link_up" mode.  The LEDs will still ID on request if there is no
3309  *  link based on logic in e1000_led_[on|off]_pchlan().
3310  **/
3311 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3312 {
3313 	struct e1000_mac_info *mac = &hw->mac;
3314 	s32 ret_val;
3315 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3316 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3317 	u16 data, i, temp, shift;
3318 
3319 	/* Get default ID LED modes */
3320 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3321 	if (ret_val)
3322 		return ret_val;
3323 
3324 	mac->ledctl_default = er32(LEDCTL);
3325 	mac->ledctl_mode1 = mac->ledctl_default;
3326 	mac->ledctl_mode2 = mac->ledctl_default;
3327 
3328 	for (i = 0; i < 4; i++) {
3329 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3330 		shift = (i * 5);
3331 		switch (temp) {
3332 		case ID_LED_ON1_DEF2:
3333 		case ID_LED_ON1_ON2:
3334 		case ID_LED_ON1_OFF2:
3335 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3336 			mac->ledctl_mode1 |= (ledctl_on << shift);
3337 			break;
3338 		case ID_LED_OFF1_DEF2:
3339 		case ID_LED_OFF1_ON2:
3340 		case ID_LED_OFF1_OFF2:
3341 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3342 			mac->ledctl_mode1 |= (ledctl_off << shift);
3343 			break;
3344 		default:
3345 			/* Do nothing */
3346 			break;
3347 		}
3348 		switch (temp) {
3349 		case ID_LED_DEF1_ON2:
3350 		case ID_LED_ON1_ON2:
3351 		case ID_LED_OFF1_ON2:
3352 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3353 			mac->ledctl_mode2 |= (ledctl_on << shift);
3354 			break;
3355 		case ID_LED_DEF1_OFF2:
3356 		case ID_LED_ON1_OFF2:
3357 		case ID_LED_OFF1_OFF2:
3358 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3359 			mac->ledctl_mode2 |= (ledctl_off << shift);
3360 			break;
3361 		default:
3362 			/* Do nothing */
3363 			break;
3364 		}
3365 	}
3366 
3367 	return 0;
3368 }
3369 
3370 /**
3371  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3372  *  @hw: pointer to the HW structure
3373  *
3374  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3375  *  register, so the the bus width is hard coded.
3376  **/
3377 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3378 {
3379 	struct e1000_bus_info *bus = &hw->bus;
3380 	s32 ret_val;
3381 
3382 	ret_val = e1000e_get_bus_info_pcie(hw);
3383 
3384 	/*
3385 	 * ICH devices are "PCI Express"-ish.  They have
3386 	 * a configuration space, but do not contain
3387 	 * PCI Express Capability registers, so bus width
3388 	 * must be hardcoded.
3389 	 */
3390 	if (bus->width == e1000_bus_width_unknown)
3391 		bus->width = e1000_bus_width_pcie_x1;
3392 
3393 	return ret_val;
3394 }
3395 
3396 /**
3397  *  e1000_reset_hw_ich8lan - Reset the hardware
3398  *  @hw: pointer to the HW structure
3399  *
3400  *  Does a full reset of the hardware which includes a reset of the PHY and
3401  *  MAC.
3402  **/
3403 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3404 {
3405 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3406 	u16 kum_cfg;
3407 	u32 ctrl, reg;
3408 	s32 ret_val;
3409 
3410 	/*
3411 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
3412 	 * on the last TLP read/write transaction when MAC is reset.
3413 	 */
3414 	ret_val = e1000e_disable_pcie_master(hw);
3415 	if (ret_val)
3416 		e_dbg("PCI-E Master disable polling has failed.\n");
3417 
3418 	e_dbg("Masking off all interrupts\n");
3419 	ew32(IMC, 0xffffffff);
3420 
3421 	/*
3422 	 * Disable the Transmit and Receive units.  Then delay to allow
3423 	 * any pending transactions to complete before we hit the MAC
3424 	 * with the global reset.
3425 	 */
3426 	ew32(RCTL, 0);
3427 	ew32(TCTL, E1000_TCTL_PSP);
3428 	e1e_flush();
3429 
3430 	usleep_range(10000, 20000);
3431 
3432 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
3433 	if (hw->mac.type == e1000_ich8lan) {
3434 		/* Set Tx and Rx buffer allocation to 8k apiece. */
3435 		ew32(PBA, E1000_PBA_8K);
3436 		/* Set Packet Buffer Size to 16k. */
3437 		ew32(PBS, E1000_PBS_16K);
3438 	}
3439 
3440 	if (hw->mac.type == e1000_pchlan) {
3441 		/* Save the NVM K1 bit setting */
3442 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3443 		if (ret_val)
3444 			return ret_val;
3445 
3446 		if (kum_cfg & E1000_NVM_K1_ENABLE)
3447 			dev_spec->nvm_k1_enabled = true;
3448 		else
3449 			dev_spec->nvm_k1_enabled = false;
3450 	}
3451 
3452 	ctrl = er32(CTRL);
3453 
3454 	if (!hw->phy.ops.check_reset_block(hw)) {
3455 		/*
3456 		 * Full-chip reset requires MAC and PHY reset at the same
3457 		 * time to make sure the interface between MAC and the
3458 		 * external PHY is reset.
3459 		 */
3460 		ctrl |= E1000_CTRL_PHY_RST;
3461 
3462 		/*
3463 		 * Gate automatic PHY configuration by hardware on
3464 		 * non-managed 82579
3465 		 */
3466 		if ((hw->mac.type == e1000_pch2lan) &&
3467 		    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
3468 			e1000_gate_hw_phy_config_ich8lan(hw, true);
3469 	}
3470 	ret_val = e1000_acquire_swflag_ich8lan(hw);
3471 	e_dbg("Issuing a global reset to ich8lan\n");
3472 	ew32(CTRL, (ctrl | E1000_CTRL_RST));
3473 	/* cannot issue a flush here because it hangs the hardware */
3474 	msleep(20);
3475 
3476 	/* Set Phy Config Counter to 50msec */
3477 	if (hw->mac.type == e1000_pch2lan) {
3478 		reg = er32(FEXTNVM3);
3479 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3480 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3481 		ew32(FEXTNVM3, reg);
3482 	}
3483 
3484 	if (!ret_val)
3485 		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
3486 
3487 	if (ctrl & E1000_CTRL_PHY_RST) {
3488 		ret_val = hw->phy.ops.get_cfg_done(hw);
3489 		if (ret_val)
3490 			return ret_val;
3491 
3492 		ret_val = e1000_post_phy_reset_ich8lan(hw);
3493 		if (ret_val)
3494 			return ret_val;
3495 	}
3496 
3497 	/*
3498 	 * For PCH, this write will make sure that any noise
3499 	 * will be detected as a CRC error and be dropped rather than show up
3500 	 * as a bad packet to the DMA engine.
3501 	 */
3502 	if (hw->mac.type == e1000_pchlan)
3503 		ew32(CRC_OFFSET, 0x65656565);
3504 
3505 	ew32(IMC, 0xffffffff);
3506 	er32(ICR);
3507 
3508 	reg = er32(KABGTXD);
3509 	reg |= E1000_KABGTXD_BGSQLBIAS;
3510 	ew32(KABGTXD, reg);
3511 
3512 	return 0;
3513 }
3514 
3515 /**
3516  *  e1000_init_hw_ich8lan - Initialize the hardware
3517  *  @hw: pointer to the HW structure
3518  *
3519  *  Prepares the hardware for transmit and receive by doing the following:
3520  *   - initialize hardware bits
3521  *   - initialize LED identification
3522  *   - setup receive address registers
3523  *   - setup flow control
3524  *   - setup transmit descriptors
3525  *   - clear statistics
3526  **/
3527 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3528 {
3529 	struct e1000_mac_info *mac = &hw->mac;
3530 	u32 ctrl_ext, txdctl, snoop;
3531 	s32 ret_val;
3532 	u16 i;
3533 
3534 	e1000_initialize_hw_bits_ich8lan(hw);
3535 
3536 	/* Initialize identification LED */
3537 	ret_val = mac->ops.id_led_init(hw);
3538 	if (ret_val)
3539 		e_dbg("Error initializing identification LED\n");
3540 		/* This is not fatal and we should not stop init due to this */
3541 
3542 	/* Setup the receive address. */
3543 	e1000e_init_rx_addrs(hw, mac->rar_entry_count);
3544 
3545 	/* Zero out the Multicast HASH table */
3546 	e_dbg("Zeroing the MTA\n");
3547 	for (i = 0; i < mac->mta_reg_count; i++)
3548 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3549 
3550 	/*
3551 	 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3552 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
3553 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3554 	 */
3555 	if (hw->phy.type == e1000_phy_82578) {
3556 		e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
3557 		i &= ~BM_WUC_HOST_WU_BIT;
3558 		e1e_wphy(hw, BM_PORT_GEN_CFG, i);
3559 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
3560 		if (ret_val)
3561 			return ret_val;
3562 	}
3563 
3564 	/* Setup link and flow control */
3565 	ret_val = mac->ops.setup_link(hw);
3566 
3567 	/* Set the transmit descriptor write-back policy for both queues */
3568 	txdctl = er32(TXDCTL(0));
3569 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3570 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3571 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3572 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3573 	ew32(TXDCTL(0), txdctl);
3574 	txdctl = er32(TXDCTL(1));
3575 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3576 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3577 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3578 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3579 	ew32(TXDCTL(1), txdctl);
3580 
3581 	/*
3582 	 * ICH8 has opposite polarity of no_snoop bits.
3583 	 * By default, we should use snoop behavior.
3584 	 */
3585 	if (mac->type == e1000_ich8lan)
3586 		snoop = PCIE_ICH8_SNOOP_ALL;
3587 	else
3588 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3589 	e1000e_set_pcie_no_snoop(hw, snoop);
3590 
3591 	ctrl_ext = er32(CTRL_EXT);
3592 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3593 	ew32(CTRL_EXT, ctrl_ext);
3594 
3595 	/*
3596 	 * Clear all of the statistics registers (clear on read).  It is
3597 	 * important that we do this after we have tried to establish link
3598 	 * because the symbol error count will increment wildly if there
3599 	 * is no link.
3600 	 */
3601 	e1000_clear_hw_cntrs_ich8lan(hw);
3602 
3603 	return ret_val;
3604 }
3605 /**
3606  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3607  *  @hw: pointer to the HW structure
3608  *
3609  *  Sets/Clears required hardware bits necessary for correctly setting up the
3610  *  hardware for transmit and receive.
3611  **/
3612 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3613 {
3614 	u32 reg;
3615 
3616 	/* Extended Device Control */
3617 	reg = er32(CTRL_EXT);
3618 	reg |= (1 << 22);
3619 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3620 	if (hw->mac.type >= e1000_pchlan)
3621 		reg |= E1000_CTRL_EXT_PHYPDEN;
3622 	ew32(CTRL_EXT, reg);
3623 
3624 	/* Transmit Descriptor Control 0 */
3625 	reg = er32(TXDCTL(0));
3626 	reg |= (1 << 22);
3627 	ew32(TXDCTL(0), reg);
3628 
3629 	/* Transmit Descriptor Control 1 */
3630 	reg = er32(TXDCTL(1));
3631 	reg |= (1 << 22);
3632 	ew32(TXDCTL(1), reg);
3633 
3634 	/* Transmit Arbitration Control 0 */
3635 	reg = er32(TARC(0));
3636 	if (hw->mac.type == e1000_ich8lan)
3637 		reg |= (1 << 28) | (1 << 29);
3638 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3639 	ew32(TARC(0), reg);
3640 
3641 	/* Transmit Arbitration Control 1 */
3642 	reg = er32(TARC(1));
3643 	if (er32(TCTL) & E1000_TCTL_MULR)
3644 		reg &= ~(1 << 28);
3645 	else
3646 		reg |= (1 << 28);
3647 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
3648 	ew32(TARC(1), reg);
3649 
3650 	/* Device Status */
3651 	if (hw->mac.type == e1000_ich8lan) {
3652 		reg = er32(STATUS);
3653 		reg &= ~(1 << 31);
3654 		ew32(STATUS, reg);
3655 	}
3656 
3657 	/*
3658 	 * work-around descriptor data corruption issue during nfs v2 udp
3659 	 * traffic, just disable the nfs filtering capability
3660 	 */
3661 	reg = er32(RFCTL);
3662 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3663 
3664 	/*
3665 	 * Disable IPv6 extension header parsing because some malformed
3666 	 * IPv6 headers can hang the Rx.
3667 	 */
3668 	if (hw->mac.type == e1000_ich8lan)
3669 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3670 	ew32(RFCTL, reg);
3671 }
3672 
3673 /**
3674  *  e1000_setup_link_ich8lan - Setup flow control and link settings
3675  *  @hw: pointer to the HW structure
3676  *
3677  *  Determines which flow control settings to use, then configures flow
3678  *  control.  Calls the appropriate media-specific link configuration
3679  *  function.  Assuming the adapter has a valid link partner, a valid link
3680  *  should be established.  Assumes the hardware has previously been reset
3681  *  and the transmitter and receiver are not enabled.
3682  **/
3683 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3684 {
3685 	s32 ret_val;
3686 
3687 	if (hw->phy.ops.check_reset_block(hw))
3688 		return 0;
3689 
3690 	/*
3691 	 * ICH parts do not have a word in the NVM to determine
3692 	 * the default flow control setting, so we explicitly
3693 	 * set it to full.
3694 	 */
3695 	if (hw->fc.requested_mode == e1000_fc_default) {
3696 		/* Workaround h/w hang when Tx flow control enabled */
3697 		if (hw->mac.type == e1000_pchlan)
3698 			hw->fc.requested_mode = e1000_fc_rx_pause;
3699 		else
3700 			hw->fc.requested_mode = e1000_fc_full;
3701 	}
3702 
3703 	/*
3704 	 * Save off the requested flow control mode for use later.  Depending
3705 	 * on the link partner's capabilities, we may or may not use this mode.
3706 	 */
3707 	hw->fc.current_mode = hw->fc.requested_mode;
3708 
3709 	e_dbg("After fix-ups FlowControl is now = %x\n",
3710 		hw->fc.current_mode);
3711 
3712 	/* Continue to configure the copper link. */
3713 	ret_val = hw->mac.ops.setup_physical_interface(hw);
3714 	if (ret_val)
3715 		return ret_val;
3716 
3717 	ew32(FCTTV, hw->fc.pause_time);
3718 	if ((hw->phy.type == e1000_phy_82578) ||
3719 	    (hw->phy.type == e1000_phy_82579) ||
3720 	    (hw->phy.type == e1000_phy_i217) ||
3721 	    (hw->phy.type == e1000_phy_82577)) {
3722 		ew32(FCRTV_PCH, hw->fc.refresh_time);
3723 
3724 		ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
3725 				   hw->fc.pause_time);
3726 		if (ret_val)
3727 			return ret_val;
3728 	}
3729 
3730 	return e1000e_set_fc_watermarks(hw);
3731 }
3732 
3733 /**
3734  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3735  *  @hw: pointer to the HW structure
3736  *
3737  *  Configures the kumeran interface to the PHY to wait the appropriate time
3738  *  when polling the PHY, then call the generic setup_copper_link to finish
3739  *  configuring the copper link.
3740  **/
3741 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3742 {
3743 	u32 ctrl;
3744 	s32 ret_val;
3745 	u16 reg_data;
3746 
3747 	ctrl = er32(CTRL);
3748 	ctrl |= E1000_CTRL_SLU;
3749 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3750 	ew32(CTRL, ctrl);
3751 
3752 	/*
3753 	 * Set the mac to wait the maximum time between each iteration
3754 	 * and increase the max iterations when polling the phy;
3755 	 * this fixes erroneous timeouts at 10Mbps.
3756 	 */
3757 	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
3758 	if (ret_val)
3759 		return ret_val;
3760 	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3761 	                               &reg_data);
3762 	if (ret_val)
3763 		return ret_val;
3764 	reg_data |= 0x3F;
3765 	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3766 	                                reg_data);
3767 	if (ret_val)
3768 		return ret_val;
3769 
3770 	switch (hw->phy.type) {
3771 	case e1000_phy_igp_3:
3772 		ret_val = e1000e_copper_link_setup_igp(hw);
3773 		if (ret_val)
3774 			return ret_val;
3775 		break;
3776 	case e1000_phy_bm:
3777 	case e1000_phy_82578:
3778 		ret_val = e1000e_copper_link_setup_m88(hw);
3779 		if (ret_val)
3780 			return ret_val;
3781 		break;
3782 	case e1000_phy_82577:
3783 	case e1000_phy_82579:
3784 	case e1000_phy_i217:
3785 		ret_val = e1000_copper_link_setup_82577(hw);
3786 		if (ret_val)
3787 			return ret_val;
3788 		break;
3789 	case e1000_phy_ife:
3790 		ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
3791 		if (ret_val)
3792 			return ret_val;
3793 
3794 		reg_data &= ~IFE_PMC_AUTO_MDIX;
3795 
3796 		switch (hw->phy.mdix) {
3797 		case 1:
3798 			reg_data &= ~IFE_PMC_FORCE_MDIX;
3799 			break;
3800 		case 2:
3801 			reg_data |= IFE_PMC_FORCE_MDIX;
3802 			break;
3803 		case 0:
3804 		default:
3805 			reg_data |= IFE_PMC_AUTO_MDIX;
3806 			break;
3807 		}
3808 		ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
3809 		if (ret_val)
3810 			return ret_val;
3811 		break;
3812 	default:
3813 		break;
3814 	}
3815 
3816 	return e1000e_setup_copper_link(hw);
3817 }
3818 
3819 /**
3820  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3821  *  @hw: pointer to the HW structure
3822  *  @speed: pointer to store current link speed
3823  *  @duplex: pointer to store the current link duplex
3824  *
3825  *  Calls the generic get_speed_and_duplex to retrieve the current link
3826  *  information and then calls the Kumeran lock loss workaround for links at
3827  *  gigabit speeds.
3828  **/
3829 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3830 					  u16 *duplex)
3831 {
3832 	s32 ret_val;
3833 
3834 	ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
3835 	if (ret_val)
3836 		return ret_val;
3837 
3838 	if ((hw->mac.type == e1000_ich8lan) &&
3839 	    (hw->phy.type == e1000_phy_igp_3) &&
3840 	    (*speed == SPEED_1000)) {
3841 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3842 	}
3843 
3844 	return ret_val;
3845 }
3846 
3847 /**
3848  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3849  *  @hw: pointer to the HW structure
3850  *
3851  *  Work-around for 82566 Kumeran PCS lock loss:
3852  *  On link status change (i.e. PCI reset, speed change) and link is up and
3853  *  speed is gigabit-
3854  *    0) if workaround is optionally disabled do nothing
3855  *    1) wait 1ms for Kumeran link to come up
3856  *    2) check Kumeran Diagnostic register PCS lock loss bit
3857  *    3) if not set the link is locked (all is good), otherwise...
3858  *    4) reset the PHY
3859  *    5) repeat up to 10 times
3860  *  Note: this is only called for IGP3 copper when speed is 1gb.
3861  **/
3862 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3863 {
3864 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3865 	u32 phy_ctrl;
3866 	s32 ret_val;
3867 	u16 i, data;
3868 	bool link;
3869 
3870 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3871 		return 0;
3872 
3873 	/*
3874 	 * Make sure link is up before proceeding.  If not just return.
3875 	 * Attempting this while link is negotiating fouled up link
3876 	 * stability
3877 	 */
3878 	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
3879 	if (!link)
3880 		return 0;
3881 
3882 	for (i = 0; i < 10; i++) {
3883 		/* read once to clear */
3884 		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3885 		if (ret_val)
3886 			return ret_val;
3887 		/* and again to get new status */
3888 		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3889 		if (ret_val)
3890 			return ret_val;
3891 
3892 		/* check for PCS lock */
3893 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3894 			return 0;
3895 
3896 		/* Issue PHY reset */
3897 		e1000_phy_hw_reset(hw);
3898 		mdelay(5);
3899 	}
3900 	/* Disable GigE link negotiation */
3901 	phy_ctrl = er32(PHY_CTRL);
3902 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3903 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3904 	ew32(PHY_CTRL, phy_ctrl);
3905 
3906 	/*
3907 	 * Call gig speed drop workaround on Gig disable before accessing
3908 	 * any PHY registers
3909 	 */
3910 	e1000e_gig_downshift_workaround_ich8lan(hw);
3911 
3912 	/* unable to acquire PCS lock */
3913 	return -E1000_ERR_PHY;
3914 }
3915 
3916 /**
3917  *  e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3918  *  @hw: pointer to the HW structure
3919  *  @state: boolean value used to set the current Kumeran workaround state
3920  *
3921  *  If ICH8, set the current Kumeran workaround state (enabled - true
3922  *  /disabled - false).
3923  **/
3924 void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3925 						 bool state)
3926 {
3927 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3928 
3929 	if (hw->mac.type != e1000_ich8lan) {
3930 		e_dbg("Workaround applies to ICH8 only.\n");
3931 		return;
3932 	}
3933 
3934 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
3935 }
3936 
3937 /**
3938  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3939  *  @hw: pointer to the HW structure
3940  *
3941  *  Workaround for 82566 power-down on D3 entry:
3942  *    1) disable gigabit link
3943  *    2) write VR power-down enable
3944  *    3) read it back
3945  *  Continue if successful, else issue LCD reset and repeat
3946  **/
3947 void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3948 {
3949 	u32 reg;
3950 	u16 data;
3951 	u8  retry = 0;
3952 
3953 	if (hw->phy.type != e1000_phy_igp_3)
3954 		return;
3955 
3956 	/* Try the workaround twice (if needed) */
3957 	do {
3958 		/* Disable link */
3959 		reg = er32(PHY_CTRL);
3960 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3961 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3962 		ew32(PHY_CTRL, reg);
3963 
3964 		/*
3965 		 * Call gig speed drop workaround on Gig disable before
3966 		 * accessing any PHY registers
3967 		 */
3968 		if (hw->mac.type == e1000_ich8lan)
3969 			e1000e_gig_downshift_workaround_ich8lan(hw);
3970 
3971 		/* Write VR power-down enable */
3972 		e1e_rphy(hw, IGP3_VR_CTRL, &data);
3973 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3974 		e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3975 
3976 		/* Read it back and test */
3977 		e1e_rphy(hw, IGP3_VR_CTRL, &data);
3978 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3979 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3980 			break;
3981 
3982 		/* Issue PHY reset and repeat at most one more time */
3983 		reg = er32(CTRL);
3984 		ew32(CTRL, reg | E1000_CTRL_PHY_RST);
3985 		retry++;
3986 	} while (retry);
3987 }
3988 
3989 /**
3990  *  e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3991  *  @hw: pointer to the HW structure
3992  *
3993  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3994  *  LPLU, Gig disable, MDIC PHY reset):
3995  *    1) Set Kumeran Near-end loopback
3996  *    2) Clear Kumeran Near-end loopback
3997  *  Should only be called for ICH8[m] devices with any 1G Phy.
3998  **/
3999 void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4000 {
4001 	s32 ret_val;
4002 	u16 reg_data;
4003 
4004 	if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
4005 		return;
4006 
4007 	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4008 				      &reg_data);
4009 	if (ret_val)
4010 		return;
4011 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4012 	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4013 				       reg_data);
4014 	if (ret_val)
4015 		return;
4016 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4017 	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4018 				       reg_data);
4019 }
4020 
4021 /**
4022  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4023  *  @hw: pointer to the HW structure
4024  *
4025  *  During S0 to Sx transition, it is possible the link remains at gig
4026  *  instead of negotiating to a lower speed.  Before going to Sx, set
4027  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4028  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4029  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4030  *  needs to be written.
4031  *  Parts that support (and are linked to a partner which support) EEE in
4032  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4033  *  than 10Mbps w/o EEE.
4034  **/
4035 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4036 {
4037 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4038 	u32 phy_ctrl;
4039 	s32 ret_val;
4040 
4041 	phy_ctrl = er32(PHY_CTRL);
4042 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4043 	if (hw->phy.type == e1000_phy_i217) {
4044 		u16 phy_reg;
4045 
4046 		ret_val = hw->phy.ops.acquire(hw);
4047 		if (ret_val)
4048 			goto out;
4049 
4050 		if (!dev_spec->eee_disable) {
4051 			u16 eee_advert;
4052 
4053 			ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
4054 						  I217_EEE_ADVERTISEMENT);
4055 			if (ret_val)
4056 				goto release;
4057 			e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
4058 
4059 			/*
4060 			 * Disable LPLU if both link partners support 100BaseT
4061 			 * EEE and 100Full is advertised on both ends of the
4062 			 * link.
4063 			 */
4064 			if ((eee_advert & I217_EEE_100_SUPPORTED) &&
4065 			    (dev_spec->eee_lp_ability &
4066 			     I217_EEE_100_SUPPORTED) &&
4067 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
4068 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4069 					      E1000_PHY_CTRL_NOND0A_LPLU);
4070 		}
4071 
4072 		/*
4073 		 * For i217 Intel Rapid Start Technology support,
4074 		 * when the system is going into Sx and no manageability engine
4075 		 * is present, the driver must configure proxy to reset only on
4076 		 * power good.  LPI (Low Power Idle) state must also reset only
4077 		 * on power good, as well as the MTA (Multicast table array).
4078 		 * The SMBus release must also be disabled on LCD reset.
4079 		 */
4080 		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
4081 
4082 			/* Enable proxy to reset only on power good. */
4083 			e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
4084 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4085 			e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
4086 
4087 			/*
4088 			 * Set bit enable LPI (EEE) to reset only on
4089 			 * power good.
4090 			 */
4091 			e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
4092 			phy_reg |= I217_SxCTRL_MASK;
4093 			e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
4094 
4095 			/* Disable the SMB release on LCD reset. */
4096 			e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4097 			phy_reg &= ~I217_MEMPWR;
4098 			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4099 		}
4100 
4101 		/*
4102 		 * Enable MTA to reset for Intel Rapid Start Technology
4103 		 * Support
4104 		 */
4105 		e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4106 		phy_reg |= I217_CGFREG_MASK;
4107 		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4108 
4109 release:
4110 		hw->phy.ops.release(hw);
4111 	}
4112 out:
4113 	ew32(PHY_CTRL, phy_ctrl);
4114 
4115 	if (hw->mac.type == e1000_ich8lan)
4116 		e1000e_gig_downshift_workaround_ich8lan(hw);
4117 
4118 	if (hw->mac.type >= e1000_pchlan) {
4119 		e1000_oem_bits_config_ich8lan(hw, false);
4120 
4121 		/* Reset PHY to activate OEM bits on 82577/8 */
4122 		if (hw->mac.type == e1000_pchlan)
4123 			e1000e_phy_hw_reset_generic(hw);
4124 
4125 		ret_val = hw->phy.ops.acquire(hw);
4126 		if (ret_val)
4127 			return;
4128 		e1000_write_smbus_addr(hw);
4129 		hw->phy.ops.release(hw);
4130 	}
4131 }
4132 
4133 /**
4134  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4135  *  @hw: pointer to the HW structure
4136  *
4137  *  During Sx to S0 transitions on non-managed devices or managed devices
4138  *  on which PHY resets are not blocked, if the PHY registers cannot be
4139  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4140  *  the PHY.
4141  *  On i217, setup Intel Rapid Start Technology.
4142  **/
4143 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4144 {
4145 	s32 ret_val;
4146 
4147 	if (hw->mac.type < e1000_pch2lan)
4148 		return;
4149 
4150 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
4151 	if (ret_val) {
4152 		e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
4153 		return;
4154 	}
4155 
4156 	/*
4157 	 * For i217 Intel Rapid Start Technology support when the system
4158 	 * is transitioning from Sx and no manageability engine is present
4159 	 * configure SMBus to restore on reset, disable proxy, and enable
4160 	 * the reset on MTA (Multicast table array).
4161 	 */
4162 	if (hw->phy.type == e1000_phy_i217) {
4163 		u16 phy_reg;
4164 
4165 		ret_val = hw->phy.ops.acquire(hw);
4166 		if (ret_val) {
4167 			e_dbg("Failed to setup iRST\n");
4168 			return;
4169 		}
4170 
4171 		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
4172 			/*
4173 			 * Restore clear on SMB if no manageability engine
4174 			 * is present
4175 			 */
4176 			ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4177 			if (ret_val)
4178 				goto release;
4179 			phy_reg |= I217_MEMPWR_MASK;
4180 			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4181 
4182 			/* Disable Proxy */
4183 			e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
4184 		}
4185 		/* Enable reset on MTA */
4186 		ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4187 		if (ret_val)
4188 			goto release;
4189 		phy_reg &= ~I217_CGFREG_MASK;
4190 		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4191 release:
4192 		if (ret_val)
4193 			e_dbg("Error %d in resume workarounds\n", ret_val);
4194 		hw->phy.ops.release(hw);
4195 	}
4196 }
4197 
4198 /**
4199  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
4200  *  @hw: pointer to the HW structure
4201  *
4202  *  Return the LED back to the default configuration.
4203  **/
4204 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4205 {
4206 	if (hw->phy.type == e1000_phy_ife)
4207 		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
4208 
4209 	ew32(LEDCTL, hw->mac.ledctl_default);
4210 	return 0;
4211 }
4212 
4213 /**
4214  *  e1000_led_on_ich8lan - Turn LEDs on
4215  *  @hw: pointer to the HW structure
4216  *
4217  *  Turn on the LEDs.
4218  **/
4219 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
4220 {
4221 	if (hw->phy.type == e1000_phy_ife)
4222 		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4223 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4224 
4225 	ew32(LEDCTL, hw->mac.ledctl_mode2);
4226 	return 0;
4227 }
4228 
4229 /**
4230  *  e1000_led_off_ich8lan - Turn LEDs off
4231  *  @hw: pointer to the HW structure
4232  *
4233  *  Turn off the LEDs.
4234  **/
4235 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
4236 {
4237 	if (hw->phy.type == e1000_phy_ife)
4238 		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4239 				(IFE_PSCL_PROBE_MODE |
4240 				 IFE_PSCL_PROBE_LEDS_OFF));
4241 
4242 	ew32(LEDCTL, hw->mac.ledctl_mode1);
4243 	return 0;
4244 }
4245 
4246 /**
4247  *  e1000_setup_led_pchlan - Configures SW controllable LED
4248  *  @hw: pointer to the HW structure
4249  *
4250  *  This prepares the SW controllable LED for use.
4251  **/
4252 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4253 {
4254 	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
4255 }
4256 
4257 /**
4258  *  e1000_cleanup_led_pchlan - Restore the default LED operation
4259  *  @hw: pointer to the HW structure
4260  *
4261  *  Return the LED back to the default configuration.
4262  **/
4263 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4264 {
4265 	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
4266 }
4267 
4268 /**
4269  *  e1000_led_on_pchlan - Turn LEDs on
4270  *  @hw: pointer to the HW structure
4271  *
4272  *  Turn on the LEDs.
4273  **/
4274 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4275 {
4276 	u16 data = (u16)hw->mac.ledctl_mode2;
4277 	u32 i, led;
4278 
4279 	/*
4280 	 * If no link, then turn LED on by setting the invert bit
4281 	 * for each LED that's mode is "link_up" in ledctl_mode2.
4282 	 */
4283 	if (!(er32(STATUS) & E1000_STATUS_LU)) {
4284 		for (i = 0; i < 3; i++) {
4285 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4286 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4287 			    E1000_LEDCTL_MODE_LINK_UP)
4288 				continue;
4289 			if (led & E1000_PHY_LED0_IVRT)
4290 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4291 			else
4292 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4293 		}
4294 	}
4295 
4296 	return e1e_wphy(hw, HV_LED_CONFIG, data);
4297 }
4298 
4299 /**
4300  *  e1000_led_off_pchlan - Turn LEDs off
4301  *  @hw: pointer to the HW structure
4302  *
4303  *  Turn off the LEDs.
4304  **/
4305 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4306 {
4307 	u16 data = (u16)hw->mac.ledctl_mode1;
4308 	u32 i, led;
4309 
4310 	/*
4311 	 * If no link, then turn LED off by clearing the invert bit
4312 	 * for each LED that's mode is "link_up" in ledctl_mode1.
4313 	 */
4314 	if (!(er32(STATUS) & E1000_STATUS_LU)) {
4315 		for (i = 0; i < 3; i++) {
4316 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4317 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4318 			    E1000_LEDCTL_MODE_LINK_UP)
4319 				continue;
4320 			if (led & E1000_PHY_LED0_IVRT)
4321 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4322 			else
4323 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4324 		}
4325 	}
4326 
4327 	return e1e_wphy(hw, HV_LED_CONFIG, data);
4328 }
4329 
4330 /**
4331  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4332  *  @hw: pointer to the HW structure
4333  *
4334  *  Read appropriate register for the config done bit for completion status
4335  *  and configure the PHY through s/w for EEPROM-less parts.
4336  *
4337  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
4338  *  config done bit, so only an error is logged and continues.  If we were
4339  *  to return with error, EEPROM-less silicon would not be able to be reset
4340  *  or change link.
4341  **/
4342 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4343 {
4344 	s32 ret_val = 0;
4345 	u32 bank = 0;
4346 	u32 status;
4347 
4348 	e1000e_get_cfg_done(hw);
4349 
4350 	/* Wait for indication from h/w that it has completed basic config */
4351 	if (hw->mac.type >= e1000_ich10lan) {
4352 		e1000_lan_init_done_ich8lan(hw);
4353 	} else {
4354 		ret_val = e1000e_get_auto_rd_done(hw);
4355 		if (ret_val) {
4356 			/*
4357 			 * When auto config read does not complete, do not
4358 			 * return with an error. This can happen in situations
4359 			 * where there is no eeprom and prevents getting link.
4360 			 */
4361 			e_dbg("Auto Read Done did not complete\n");
4362 			ret_val = 0;
4363 		}
4364 	}
4365 
4366 	/* Clear PHY Reset Asserted bit */
4367 	status = er32(STATUS);
4368 	if (status & E1000_STATUS_PHYRA)
4369 		ew32(STATUS, status & ~E1000_STATUS_PHYRA);
4370 	else
4371 		e_dbg("PHY Reset Asserted not set - needs delay\n");
4372 
4373 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
4374 	if (hw->mac.type <= e1000_ich9lan) {
4375 		if (!(er32(EECD) & E1000_EECD_PRES) &&
4376 		    (hw->phy.type == e1000_phy_igp_3)) {
4377 			e1000e_phy_init_script_igp3(hw);
4378 		}
4379 	} else {
4380 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4381 			/* Maybe we should do a basic PHY config */
4382 			e_dbg("EEPROM not present\n");
4383 			ret_val = -E1000_ERR_CONFIG;
4384 		}
4385 	}
4386 
4387 	return ret_val;
4388 }
4389 
4390 /**
4391  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4392  * @hw: pointer to the HW structure
4393  *
4394  * In the case of a PHY power down to save power, or to turn off link during a
4395  * driver unload, or wake on lan is not enabled, remove the link.
4396  **/
4397 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4398 {
4399 	/* If the management interface is not enabled, then power down */
4400 	if (!(hw->mac.ops.check_mng_mode(hw) ||
4401 	      hw->phy.ops.check_reset_block(hw)))
4402 		e1000_power_down_phy_copper(hw);
4403 }
4404 
4405 /**
4406  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4407  *  @hw: pointer to the HW structure
4408  *
4409  *  Clears hardware counters specific to the silicon family and calls
4410  *  clear_hw_cntrs_generic to clear all general purpose counters.
4411  **/
4412 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4413 {
4414 	u16 phy_data;
4415 	s32 ret_val;
4416 
4417 	e1000e_clear_hw_cntrs_base(hw);
4418 
4419 	er32(ALGNERRC);
4420 	er32(RXERRC);
4421 	er32(TNCRS);
4422 	er32(CEXTERR);
4423 	er32(TSCTC);
4424 	er32(TSCTFC);
4425 
4426 	er32(MGTPRC);
4427 	er32(MGTPDC);
4428 	er32(MGTPTC);
4429 
4430 	er32(IAC);
4431 	er32(ICRXOC);
4432 
4433 	/* Clear PHY statistics registers */
4434 	if ((hw->phy.type == e1000_phy_82578) ||
4435 	    (hw->phy.type == e1000_phy_82579) ||
4436 	    (hw->phy.type == e1000_phy_i217) ||
4437 	    (hw->phy.type == e1000_phy_82577)) {
4438 		ret_val = hw->phy.ops.acquire(hw);
4439 		if (ret_val)
4440 			return;
4441 		ret_val = hw->phy.ops.set_page(hw,
4442 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
4443 		if (ret_val)
4444 			goto release;
4445 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4446 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4447 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4448 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4449 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4450 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4451 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4452 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4453 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4454 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4455 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4456 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4457 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4458 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4459 release:
4460 		hw->phy.ops.release(hw);
4461 	}
4462 }
4463 
4464 static const struct e1000_mac_operations ich8_mac_ops = {
4465 	/* check_mng_mode dependent on mac type */
4466 	.check_for_link		= e1000_check_for_copper_link_ich8lan,
4467 	/* cleanup_led dependent on mac type */
4468 	.clear_hw_cntrs		= e1000_clear_hw_cntrs_ich8lan,
4469 	.get_bus_info		= e1000_get_bus_info_ich8lan,
4470 	.set_lan_id		= e1000_set_lan_id_single_port,
4471 	.get_link_up_info	= e1000_get_link_up_info_ich8lan,
4472 	/* led_on dependent on mac type */
4473 	/* led_off dependent on mac type */
4474 	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
4475 	.reset_hw		= e1000_reset_hw_ich8lan,
4476 	.init_hw		= e1000_init_hw_ich8lan,
4477 	.setup_link		= e1000_setup_link_ich8lan,
4478 	.setup_physical_interface= e1000_setup_copper_link_ich8lan,
4479 	/* id_led_init dependent on mac type */
4480 	.config_collision_dist	= e1000e_config_collision_dist_generic,
4481 	.rar_set		= e1000e_rar_set_generic,
4482 };
4483 
4484 static const struct e1000_phy_operations ich8_phy_ops = {
4485 	.acquire		= e1000_acquire_swflag_ich8lan,
4486 	.check_reset_block	= e1000_check_reset_block_ich8lan,
4487 	.commit			= NULL,
4488 	.get_cfg_done		= e1000_get_cfg_done_ich8lan,
4489 	.get_cable_length	= e1000e_get_cable_length_igp_2,
4490 	.read_reg		= e1000e_read_phy_reg_igp,
4491 	.release		= e1000_release_swflag_ich8lan,
4492 	.reset			= e1000_phy_hw_reset_ich8lan,
4493 	.set_d0_lplu_state	= e1000_set_d0_lplu_state_ich8lan,
4494 	.set_d3_lplu_state	= e1000_set_d3_lplu_state_ich8lan,
4495 	.write_reg		= e1000e_write_phy_reg_igp,
4496 };
4497 
4498 static const struct e1000_nvm_operations ich8_nvm_ops = {
4499 	.acquire		= e1000_acquire_nvm_ich8lan,
4500 	.read		 	= e1000_read_nvm_ich8lan,
4501 	.release		= e1000_release_nvm_ich8lan,
4502 	.reload			= e1000e_reload_nvm_generic,
4503 	.update			= e1000_update_nvm_checksum_ich8lan,
4504 	.valid_led_default	= e1000_valid_led_default_ich8lan,
4505 	.validate		= e1000_validate_nvm_checksum_ich8lan,
4506 	.write			= e1000_write_nvm_ich8lan,
4507 };
4508 
4509 const struct e1000_info e1000_ich8_info = {
4510 	.mac			= e1000_ich8lan,
4511 	.flags			= FLAG_HAS_WOL
4512 				  | FLAG_IS_ICH
4513 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4514 				  | FLAG_HAS_AMT
4515 				  | FLAG_HAS_FLASH
4516 				  | FLAG_APME_IN_WUC,
4517 	.pba			= 8,
4518 	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
4519 	.get_variants		= e1000_get_variants_ich8lan,
4520 	.mac_ops		= &ich8_mac_ops,
4521 	.phy_ops		= &ich8_phy_ops,
4522 	.nvm_ops		= &ich8_nvm_ops,
4523 };
4524 
4525 const struct e1000_info e1000_ich9_info = {
4526 	.mac			= e1000_ich9lan,
4527 	.flags			= FLAG_HAS_JUMBO_FRAMES
4528 				  | FLAG_IS_ICH
4529 				  | FLAG_HAS_WOL
4530 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4531 				  | FLAG_HAS_AMT
4532 				  | FLAG_HAS_FLASH
4533 				  | FLAG_APME_IN_WUC,
4534 	.pba			= 18,
4535 	.max_hw_frame_size	= DEFAULT_JUMBO,
4536 	.get_variants		= e1000_get_variants_ich8lan,
4537 	.mac_ops		= &ich8_mac_ops,
4538 	.phy_ops		= &ich8_phy_ops,
4539 	.nvm_ops		= &ich8_nvm_ops,
4540 };
4541 
4542 const struct e1000_info e1000_ich10_info = {
4543 	.mac			= e1000_ich10lan,
4544 	.flags			= FLAG_HAS_JUMBO_FRAMES
4545 				  | FLAG_IS_ICH
4546 				  | FLAG_HAS_WOL
4547 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4548 				  | FLAG_HAS_AMT
4549 				  | FLAG_HAS_FLASH
4550 				  | FLAG_APME_IN_WUC,
4551 	.pba			= 18,
4552 	.max_hw_frame_size	= DEFAULT_JUMBO,
4553 	.get_variants		= e1000_get_variants_ich8lan,
4554 	.mac_ops		= &ich8_mac_ops,
4555 	.phy_ops		= &ich8_phy_ops,
4556 	.nvm_ops		= &ich8_nvm_ops,
4557 };
4558 
4559 const struct e1000_info e1000_pch_info = {
4560 	.mac			= e1000_pchlan,
4561 	.flags			= FLAG_IS_ICH
4562 				  | FLAG_HAS_WOL
4563 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4564 				  | FLAG_HAS_AMT
4565 				  | FLAG_HAS_FLASH
4566 				  | FLAG_HAS_JUMBO_FRAMES
4567 				  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
4568 				  | FLAG_APME_IN_WUC,
4569 	.flags2			= FLAG2_HAS_PHY_STATS,
4570 	.pba			= 26,
4571 	.max_hw_frame_size	= 4096,
4572 	.get_variants		= e1000_get_variants_ich8lan,
4573 	.mac_ops		= &ich8_mac_ops,
4574 	.phy_ops		= &ich8_phy_ops,
4575 	.nvm_ops		= &ich8_nvm_ops,
4576 };
4577 
4578 const struct e1000_info e1000_pch2_info = {
4579 	.mac			= e1000_pch2lan,
4580 	.flags			= FLAG_IS_ICH
4581 				  | FLAG_HAS_WOL
4582 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4583 				  | FLAG_HAS_AMT
4584 				  | FLAG_HAS_FLASH
4585 				  | FLAG_HAS_JUMBO_FRAMES
4586 				  | FLAG_APME_IN_WUC,
4587 	.flags2			= FLAG2_HAS_PHY_STATS
4588 				  | FLAG2_HAS_EEE,
4589 	.pba			= 26,
4590 	.max_hw_frame_size	= DEFAULT_JUMBO,
4591 	.get_variants		= e1000_get_variants_ich8lan,
4592 	.mac_ops		= &ich8_mac_ops,
4593 	.phy_ops		= &ich8_phy_ops,
4594 	.nvm_ops		= &ich8_nvm_ops,
4595 };
4596 
4597 const struct e1000_info e1000_pch_lpt_info = {
4598 	.mac			= e1000_pch_lpt,
4599 	.flags			= FLAG_IS_ICH
4600 				  | FLAG_HAS_WOL
4601 				  | FLAG_HAS_CTRLEXT_ON_LOAD
4602 				  | FLAG_HAS_AMT
4603 				  | FLAG_HAS_FLASH
4604 				  | FLAG_HAS_JUMBO_FRAMES
4605 				  | FLAG_APME_IN_WUC,
4606 	.flags2			= FLAG2_HAS_PHY_STATS
4607 				  | FLAG2_HAS_EEE,
4608 	.pba			= 26,
4609 	.max_hw_frame_size	= DEFAULT_JUMBO,
4610 	.get_variants		= e1000_get_variants_ich8lan,
4611 	.mac_ops		= &ich8_mac_ops,
4612 	.phy_ops		= &ich8_phy_ops,
4613 	.nvm_ops		= &ich8_nvm_ops,
4614 };
4615