1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 /* 82562G 10/100 Network Connection
36 * 82562G-2 10/100 Network Connection
37 * 82562GT 10/100 Network Connection
38 * 82562GT-2 10/100 Network Connection
39 * 82562V 10/100 Network Connection
40 * 82562V-2 10/100 Network Connection
41 * 82566DC-2 Gigabit Network Connection
42 * 82566DC Gigabit Network Connection
43 * 82566DM-2 Gigabit Network Connection
44 * 82566DM Gigabit Network Connection
45 * 82566MC Gigabit Network Connection
46 * 82566MM Gigabit Network Connection
47 * 82567LM Gigabit Network Connection
48 * 82567LF Gigabit Network Connection
49 * 82567V Gigabit Network Connection
50 * 82567LM-2 Gigabit Network Connection
51 * 82567LF-2 Gigabit Network Connection
52 * 82567V-2 Gigabit Network Connection
53 * 82567LF-3 Gigabit Network Connection
54 * 82567LM-3 Gigabit Network Connection
55 * 82567LM-4 Gigabit Network Connection
56 * 82577LM Gigabit Network Connection
57 * 82577LC Gigabit Network Connection
58 * 82578DM Gigabit Network Connection
59 * 82578DC Gigabit Network Connection
60 * 82579LM Gigabit Network Connection
61 * 82579V Gigabit Network Connection
62 * Ethernet Connection I217-LM
63 * Ethernet Connection I217-V
64 * Ethernet Connection I218-V
65 * Ethernet Connection I218-LM
66 * Ethernet Connection (2) I218-LM
67 * Ethernet Connection (2) I218-V
68 * Ethernet Connection (3) I218-LM
69 * Ethernet Connection (3) I218-V
70 */
71
72 #include "e1000_api.h"
73
74 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
84 u8 *mc_addr_list,
85 u32 mc_addr_count);
86 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
90 bool active);
91 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
92 bool active);
93 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 u16 words, u16 *data);
95 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
96 u16 *data);
97 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98 u16 words, u16 *data);
99 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
103 u16 *data);
104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112 u16 *speed, u16 *duplex);
113 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
115 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
116 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
118 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
120 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126 u32 offset, u8 *data);
127 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
128 u8 size, u16 *data);
129 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
130 u32 *data);
131 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
132 u32 offset, u32 *data);
133 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
134 u32 offset, u32 data);
135 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
136 u32 offset, u32 dword);
137 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
138 u32 offset, u16 *data);
139 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
140 u32 offset, u8 byte);
141 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
142 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
143 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
144 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
145 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
146 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
147 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
148
149 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
150 /* Offset 04h HSFSTS */
151 union ich8_hws_flash_status {
152 struct ich8_hsfsts {
153 u16 flcdone:1; /* bit 0 Flash Cycle Done */
154 u16 flcerr:1; /* bit 1 Flash Cycle Error */
155 u16 dael:1; /* bit 2 Direct Access error Log */
156 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
157 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
158 u16 reserved1:2; /* bit 13:6 Reserved */
159 u16 reserved2:6; /* bit 13:6 Reserved */
160 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
161 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
162 } hsf_status;
163 u16 regval;
164 };
165
166 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
167 /* Offset 06h FLCTL */
168 union ich8_hws_flash_ctrl {
169 struct ich8_hsflctl {
170 u16 flcgo:1; /* 0 Flash Cycle Go */
171 u16 flcycle:2; /* 2:1 Flash Cycle */
172 u16 reserved:5; /* 7:3 Reserved */
173 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
174 u16 flockdn:6; /* 15:10 Reserved */
175 } hsf_ctrl;
176 u16 regval;
177 };
178
179 /* ICH Flash Region Access Permissions */
180 union ich8_hws_flash_regacc {
181 struct ich8_flracc {
182 u32 grra:8; /* 0:7 GbE region Read Access */
183 u32 grwa:8; /* 8:15 GbE region Write Access */
184 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
185 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
186 } hsf_flregacc;
187 u16 regval;
188 };
189
190 /**
191 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
192 * @hw: pointer to the HW structure
193 *
194 * Test access to the PHY registers by reading the PHY ID registers. If
195 * the PHY ID is already known (e.g. resume path) compare it with known ID,
196 * otherwise assume the read PHY ID is correct if it is valid.
197 *
198 * Assumes the sw/fw/hw semaphore is already acquired.
199 **/
e1000_phy_is_accessible_pchlan(struct e1000_hw * hw)200 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
201 {
202 u16 phy_reg = 0;
203 u32 phy_id = 0;
204 s32 ret_val = 0;
205 u16 retry_count;
206 u32 mac_reg = 0;
207
208 for (retry_count = 0; retry_count < 2; retry_count++) {
209 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
210 if (ret_val || (phy_reg == 0xFFFF))
211 continue;
212 phy_id = (u32)(phy_reg << 16);
213
214 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
215 if (ret_val || (phy_reg == 0xFFFF)) {
216 phy_id = 0;
217 continue;
218 }
219 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
220 break;
221 }
222
223 if (hw->phy.id) {
224 if (hw->phy.id == phy_id)
225 goto out;
226 } else if (phy_id) {
227 hw->phy.id = phy_id;
228 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
229 goto out;
230 }
231
232 /* In case the PHY needs to be in mdio slow mode,
233 * set slow mode and try to get the PHY id again.
234 */
235 if (hw->mac.type < e1000_pch_lpt) {
236 hw->phy.ops.release(hw);
237 ret_val = e1000_set_mdio_slow_mode_hv(hw);
238 if (!ret_val)
239 ret_val = e1000_get_phy_id(hw);
240 hw->phy.ops.acquire(hw);
241 }
242
243 if (ret_val)
244 return FALSE;
245 out:
246 if ((hw->mac.type == e1000_pch_lpt) ||
247 (hw->mac.type == e1000_pch_spt)) {
248 /* Only unforce SMBus if ME is not active */
249 if (!(E1000_READ_REG(hw, E1000_FWSM) &
250 E1000_ICH_FWSM_FW_VALID)) {
251 /* Unforce SMBus mode in PHY */
252 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
253 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
254 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
255
256 /* Unforce SMBus mode in MAC */
257 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
258 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
259 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
260 }
261 }
262
263 return TRUE;
264 }
265
266 /**
267 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
268 * @hw: pointer to the HW structure
269 *
270 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
271 * used to reset the PHY to a quiescent state when necessary.
272 **/
e1000_toggle_lanphypc_pch_lpt(struct e1000_hw * hw)273 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
274 {
275 u32 mac_reg;
276
277 DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
278
279 /* Set Phy Config Counter to 50msec */
280 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
281 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
282 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
283 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
284
285 /* Toggle LANPHYPC Value bit */
286 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
287 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
288 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
289 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
290 E1000_WRITE_FLUSH(hw);
291 usec_delay(10);
292 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
293 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
294 E1000_WRITE_FLUSH(hw);
295
296 if (hw->mac.type < e1000_pch_lpt) {
297 msec_delay(50);
298 } else {
299 u16 count = 20;
300
301 do {
302 msec_delay(5);
303 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
304 E1000_CTRL_EXT_LPCD) && count--);
305
306 msec_delay(30);
307 }
308 }
309
310 /**
311 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
312 * @hw: pointer to the HW structure
313 *
314 * Workarounds/flow necessary for PHY initialization during driver load
315 * and resume paths.
316 **/
e1000_init_phy_workarounds_pchlan(struct e1000_hw * hw)317 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
318 {
319 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
320 s32 ret_val;
321
322 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
323
324 /* Gate automatic PHY configuration by hardware on managed and
325 * non-managed 82579 and newer adapters.
326 */
327 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
328
329 /* It is not possible to be certain of the current state of ULP
330 * so forcibly disable it.
331 */
332 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
333 e1000_disable_ulp_lpt_lp(hw, TRUE);
334
335 ret_val = hw->phy.ops.acquire(hw);
336 if (ret_val) {
337 DEBUGOUT("Failed to initialize PHY flow\n");
338 goto out;
339 }
340
341 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
342 * inaccessible and resetting the PHY is not blocked, toggle the
343 * LANPHYPC Value bit to force the interconnect to PCIe mode.
344 */
345 switch (hw->mac.type) {
346 case e1000_pch_lpt:
347 case e1000_pch_spt:
348 if (e1000_phy_is_accessible_pchlan(hw))
349 break;
350
351 /* Before toggling LANPHYPC, see if PHY is accessible by
352 * forcing MAC to SMBus mode first.
353 */
354 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
355 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
356 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
357
358 /* Wait 50 milliseconds for MAC to finish any retries
359 * that it might be trying to perform from previous
360 * attempts to acknowledge any phy read requests.
361 */
362 msec_delay(50);
363
364 /* fall-through */
365 case e1000_pch2lan:
366 if (e1000_phy_is_accessible_pchlan(hw))
367 break;
368
369 /* fall-through */
370 case e1000_pchlan:
371 if ((hw->mac.type == e1000_pchlan) &&
372 (fwsm & E1000_ICH_FWSM_FW_VALID))
373 break;
374
375 if (hw->phy.ops.check_reset_block(hw)) {
376 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
377 ret_val = -E1000_ERR_PHY;
378 break;
379 }
380
381 /* Toggle LANPHYPC Value bit */
382 e1000_toggle_lanphypc_pch_lpt(hw);
383 if (hw->mac.type >= e1000_pch_lpt) {
384 if (e1000_phy_is_accessible_pchlan(hw))
385 break;
386
387 /* Toggling LANPHYPC brings the PHY out of SMBus mode
388 * so ensure that the MAC is also out of SMBus mode
389 */
390 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
391 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
392 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
393
394 if (e1000_phy_is_accessible_pchlan(hw))
395 break;
396
397 ret_val = -E1000_ERR_PHY;
398 }
399 break;
400 default:
401 break;
402 }
403
404 hw->phy.ops.release(hw);
405 if (!ret_val) {
406
407 /* Check to see if able to reset PHY. Print error if not */
408 if (hw->phy.ops.check_reset_block(hw)) {
409 ERROR_REPORT("Reset blocked by ME\n");
410 goto out;
411 }
412
413 /* Reset the PHY before any access to it. Doing so, ensures
414 * that the PHY is in a known good state before we read/write
415 * PHY registers. The generic reset is sufficient here,
416 * because we haven't determined the PHY type yet.
417 */
418 ret_val = e1000_phy_hw_reset_generic(hw);
419 if (ret_val)
420 goto out;
421
422 /* On a successful reset, possibly need to wait for the PHY
423 * to quiesce to an accessible state before returning control
424 * to the calling function. If the PHY does not quiesce, then
425 * return E1000E_BLK_PHY_RESET, as this is the condition that
426 * the PHY is in.
427 */
428 ret_val = hw->phy.ops.check_reset_block(hw);
429 if (ret_val)
430 ERROR_REPORT("ME blocked access to PHY after reset\n");
431 }
432
433 out:
434 /* Ungate automatic PHY configuration on non-managed 82579 */
435 if ((hw->mac.type == e1000_pch2lan) &&
436 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
437 msec_delay(10);
438 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
439 }
440
441 return ret_val;
442 }
443
444 /**
445 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
446 * @hw: pointer to the HW structure
447 *
448 * Initialize family-specific PHY parameters and function pointers.
449 **/
e1000_init_phy_params_pchlan(struct e1000_hw * hw)450 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
451 {
452 struct e1000_phy_info *phy = &hw->phy;
453 s32 ret_val;
454
455 DEBUGFUNC("e1000_init_phy_params_pchlan");
456
457 phy->addr = 1;
458 phy->reset_delay_us = 100;
459
460 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
461 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
462 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
463 phy->ops.set_page = e1000_set_page_igp;
464 phy->ops.read_reg = e1000_read_phy_reg_hv;
465 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
466 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
467 phy->ops.release = e1000_release_swflag_ich8lan;
468 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
469 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
470 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
471 phy->ops.write_reg = e1000_write_phy_reg_hv;
472 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
473 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
474 phy->ops.power_up = e1000_power_up_phy_copper;
475 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
476 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
477
478 phy->id = e1000_phy_unknown;
479
480 ret_val = e1000_init_phy_workarounds_pchlan(hw);
481 if (ret_val)
482 return ret_val;
483
484 if (phy->id == e1000_phy_unknown)
485 switch (hw->mac.type) {
486 default:
487 ret_val = e1000_get_phy_id(hw);
488 if (ret_val)
489 return ret_val;
490 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
491 break;
492 /* fall-through */
493 case e1000_pch2lan:
494 case e1000_pch_lpt:
495 case e1000_pch_spt:
496 /* In case the PHY needs to be in mdio slow mode,
497 * set slow mode and try to get the PHY id again.
498 */
499 ret_val = e1000_set_mdio_slow_mode_hv(hw);
500 if (ret_val)
501 return ret_val;
502 ret_val = e1000_get_phy_id(hw);
503 if (ret_val)
504 return ret_val;
505 break;
506 }
507 phy->type = e1000_get_phy_type_from_id(phy->id);
508
509 switch (phy->type) {
510 case e1000_phy_82577:
511 case e1000_phy_82579:
512 case e1000_phy_i217:
513 phy->ops.check_polarity = e1000_check_polarity_82577;
514 phy->ops.force_speed_duplex =
515 e1000_phy_force_speed_duplex_82577;
516 phy->ops.get_cable_length = e1000_get_cable_length_82577;
517 phy->ops.get_info = e1000_get_phy_info_82577;
518 phy->ops.commit = e1000_phy_sw_reset_generic;
519 break;
520 case e1000_phy_82578:
521 phy->ops.check_polarity = e1000_check_polarity_m88;
522 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
523 phy->ops.get_cable_length = e1000_get_cable_length_m88;
524 phy->ops.get_info = e1000_get_phy_info_m88;
525 break;
526 default:
527 ret_val = -E1000_ERR_PHY;
528 break;
529 }
530
531 return ret_val;
532 }
533
534 /**
535 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
536 * @hw: pointer to the HW structure
537 *
538 * Initialize family-specific PHY parameters and function pointers.
539 **/
e1000_init_phy_params_ich8lan(struct e1000_hw * hw)540 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
541 {
542 struct e1000_phy_info *phy = &hw->phy;
543 s32 ret_val;
544 u16 i = 0;
545
546 DEBUGFUNC("e1000_init_phy_params_ich8lan");
547
548 phy->addr = 1;
549 phy->reset_delay_us = 100;
550
551 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
552 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
553 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
554 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
555 phy->ops.read_reg = e1000_read_phy_reg_igp;
556 phy->ops.release = e1000_release_swflag_ich8lan;
557 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
558 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
559 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
560 phy->ops.write_reg = e1000_write_phy_reg_igp;
561 phy->ops.power_up = e1000_power_up_phy_copper;
562 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
563
564 /* We may need to do this twice - once for IGP and if that fails,
565 * we'll set BM func pointers and try again
566 */
567 ret_val = e1000_determine_phy_address(hw);
568 if (ret_val) {
569 phy->ops.write_reg = e1000_write_phy_reg_bm;
570 phy->ops.read_reg = e1000_read_phy_reg_bm;
571 ret_val = e1000_determine_phy_address(hw);
572 if (ret_val) {
573 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
574 return ret_val;
575 }
576 }
577
578 phy->id = 0;
579 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
580 (i++ < 100)) {
581 msec_delay(1);
582 ret_val = e1000_get_phy_id(hw);
583 if (ret_val)
584 return ret_val;
585 }
586
587 /* Verify phy id */
588 switch (phy->id) {
589 case IGP03E1000_E_PHY_ID:
590 phy->type = e1000_phy_igp_3;
591 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
592 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
593 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
594 phy->ops.get_info = e1000_get_phy_info_igp;
595 phy->ops.check_polarity = e1000_check_polarity_igp;
596 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
597 break;
598 case IFE_E_PHY_ID:
599 case IFE_PLUS_E_PHY_ID:
600 case IFE_C_E_PHY_ID:
601 phy->type = e1000_phy_ife;
602 phy->autoneg_mask = E1000_ALL_NOT_GIG;
603 phy->ops.get_info = e1000_get_phy_info_ife;
604 phy->ops.check_polarity = e1000_check_polarity_ife;
605 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
606 break;
607 case BME1000_E_PHY_ID:
608 phy->type = e1000_phy_bm;
609 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
610 phy->ops.read_reg = e1000_read_phy_reg_bm;
611 phy->ops.write_reg = e1000_write_phy_reg_bm;
612 phy->ops.commit = e1000_phy_sw_reset_generic;
613 phy->ops.get_info = e1000_get_phy_info_m88;
614 phy->ops.check_polarity = e1000_check_polarity_m88;
615 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
616 break;
617 default:
618 return -E1000_ERR_PHY;
619 break;
620 }
621
622 return E1000_SUCCESS;
623 }
624
625 /**
626 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
627 * @hw: pointer to the HW structure
628 *
629 * Initialize family-specific NVM parameters and function
630 * pointers.
631 **/
e1000_init_nvm_params_ich8lan(struct e1000_hw * hw)632 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
633 {
634 struct e1000_nvm_info *nvm = &hw->nvm;
635 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
636 u32 gfpreg, sector_base_addr, sector_end_addr;
637 u16 i;
638 u32 nvm_size;
639
640 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
641
642 nvm->type = e1000_nvm_flash_sw;
643
644 if (hw->mac.type == e1000_pch_spt) {
645 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
646 * STRAP register. This is because in SPT the GbE Flash region
647 * is no longer accessed through the flash registers. Instead,
648 * the mechanism has changed, and the Flash region access
649 * registers are now implemented in GbE memory space.
650 */
651 nvm->flash_base_addr = 0;
652 nvm_size =
653 (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
654 * NVM_SIZE_MULTIPLIER;
655 nvm->flash_bank_size = nvm_size / 2;
656 /* Adjust to word count */
657 nvm->flash_bank_size /= sizeof(u16);
658 /* Set the base address for flash register access */
659 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
660 } else {
661 /* Can't read flash registers if register set isn't mapped. */
662 if (!hw->flash_address) {
663 DEBUGOUT("ERROR: Flash registers not mapped\n");
664 return -E1000_ERR_CONFIG;
665 }
666
667 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
668
669 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
670 * Add 1 to sector_end_addr since this sector is included in
671 * the overall size.
672 */
673 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
674 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
675
676 /* flash_base_addr is byte-aligned */
677 nvm->flash_base_addr = sector_base_addr
678 << FLASH_SECTOR_ADDR_SHIFT;
679
680 /* find total size of the NVM, then cut in half since the total
681 * size represents two separate NVM banks.
682 */
683 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
684 << FLASH_SECTOR_ADDR_SHIFT);
685 nvm->flash_bank_size /= 2;
686 /* Adjust to word count */
687 nvm->flash_bank_size /= sizeof(u16);
688 }
689
690 nvm->word_size = E1000_SHADOW_RAM_WORDS;
691
692 /* Clear shadow ram */
693 for (i = 0; i < nvm->word_size; i++) {
694 dev_spec->shadow_ram[i].modified = FALSE;
695 dev_spec->shadow_ram[i].value = 0xFFFF;
696 }
697
698 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
699 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
700
701 /* Function Pointers */
702 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
703 nvm->ops.release = e1000_release_nvm_ich8lan;
704 if (hw->mac.type == e1000_pch_spt) {
705 nvm->ops.read = e1000_read_nvm_spt;
706 nvm->ops.update = e1000_update_nvm_checksum_spt;
707 } else {
708 nvm->ops.read = e1000_read_nvm_ich8lan;
709 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
710 }
711 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
712 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
713 nvm->ops.write = e1000_write_nvm_ich8lan;
714
715 return E1000_SUCCESS;
716 }
717
718 /**
719 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
720 * @hw: pointer to the HW structure
721 *
722 * Initialize family-specific MAC parameters and function
723 * pointers.
724 **/
e1000_init_mac_params_ich8lan(struct e1000_hw * hw)725 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
726 {
727 struct e1000_mac_info *mac = &hw->mac;
728
729 DEBUGFUNC("e1000_init_mac_params_ich8lan");
730
731 /* Set media type function pointer */
732 hw->phy.media_type = e1000_media_type_copper;
733
734 /* Set mta register count */
735 mac->mta_reg_count = 32;
736 /* Set rar entry count */
737 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
738 if (mac->type == e1000_ich8lan)
739 mac->rar_entry_count--;
740 /* Set if part includes ASF firmware */
741 mac->asf_firmware_present = TRUE;
742 /* FWSM register */
743 mac->has_fwsm = TRUE;
744 /* ARC subsystem not supported */
745 mac->arc_subsystem_valid = FALSE;
746 /* Adaptive IFS supported */
747 mac->adaptive_ifs = TRUE;
748
749 /* Function pointers */
750
751 /* bus type/speed/width */
752 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
753 /* function id */
754 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
755 /* reset */
756 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
757 /* hw initialization */
758 mac->ops.init_hw = e1000_init_hw_ich8lan;
759 /* link setup */
760 mac->ops.setup_link = e1000_setup_link_ich8lan;
761 /* physical interface setup */
762 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
763 /* check for link */
764 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
765 /* link info */
766 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
767 /* multicast address update */
768 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
769 /* clear hardware counters */
770 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
771
772 /* LED and other operations */
773 switch (mac->type) {
774 case e1000_ich8lan:
775 case e1000_ich9lan:
776 case e1000_ich10lan:
777 /* check management mode */
778 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
779 /* ID LED init */
780 mac->ops.id_led_init = e1000_id_led_init_generic;
781 /* blink LED */
782 mac->ops.blink_led = e1000_blink_led_generic;
783 /* setup LED */
784 mac->ops.setup_led = e1000_setup_led_generic;
785 /* cleanup LED */
786 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
787 /* turn on/off LED */
788 mac->ops.led_on = e1000_led_on_ich8lan;
789 mac->ops.led_off = e1000_led_off_ich8lan;
790 break;
791 case e1000_pch2lan:
792 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
793 mac->ops.rar_set = e1000_rar_set_pch2lan;
794 /* fall-through */
795 case e1000_pch_lpt:
796 case e1000_pch_spt:
797 /* multicast address update for pch2 */
798 mac->ops.update_mc_addr_list =
799 e1000_update_mc_addr_list_pch2lan;
800 /* fall-through */
801 case e1000_pchlan:
802 /* check management mode */
803 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
804 /* ID LED init */
805 mac->ops.id_led_init = e1000_id_led_init_pchlan;
806 /* setup LED */
807 mac->ops.setup_led = e1000_setup_led_pchlan;
808 /* cleanup LED */
809 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
810 /* turn on/off LED */
811 mac->ops.led_on = e1000_led_on_pchlan;
812 mac->ops.led_off = e1000_led_off_pchlan;
813 break;
814 default:
815 break;
816 }
817
818 if ((mac->type == e1000_pch_lpt) ||
819 (mac->type == e1000_pch_spt)) {
820 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
821 mac->ops.rar_set = e1000_rar_set_pch_lpt;
822 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
823 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
824 }
825
826 /* Enable PCS Lock-loss workaround for ICH8 */
827 if (mac->type == e1000_ich8lan)
828 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
829
830 return E1000_SUCCESS;
831 }
832
833 /**
834 * __e1000_access_emi_reg_locked - Read/write EMI register
835 * @hw: pointer to the HW structure
836 * @addr: EMI address to program
837 * @data: pointer to value to read/write from/to the EMI address
838 * @read: boolean flag to indicate read or write
839 *
840 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
841 **/
__e1000_access_emi_reg_locked(struct e1000_hw * hw,u16 address,u16 * data,bool read)842 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
843 u16 *data, bool read)
844 {
845 s32 ret_val;
846
847 DEBUGFUNC("__e1000_access_emi_reg_locked");
848
849 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
850 if (ret_val)
851 return ret_val;
852
853 if (read)
854 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
855 data);
856 else
857 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
858 *data);
859
860 return ret_val;
861 }
862
863 /**
864 * e1000_read_emi_reg_locked - Read Extended Management Interface register
865 * @hw: pointer to the HW structure
866 * @addr: EMI address to program
867 * @data: value to be read from the EMI address
868 *
869 * Assumes the SW/FW/HW Semaphore is already acquired.
870 **/
e1000_read_emi_reg_locked(struct e1000_hw * hw,u16 addr,u16 * data)871 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
872 {
873 DEBUGFUNC("e1000_read_emi_reg_locked");
874
875 return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
876 }
877
878 /**
879 * e1000_write_emi_reg_locked - Write Extended Management Interface register
880 * @hw: pointer to the HW structure
881 * @addr: EMI address to program
882 * @data: value to be written to the EMI address
883 *
884 * Assumes the SW/FW/HW Semaphore is already acquired.
885 **/
e1000_write_emi_reg_locked(struct e1000_hw * hw,u16 addr,u16 data)886 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
887 {
888 DEBUGFUNC("e1000_read_emi_reg_locked");
889
890 return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
891 }
892
893 /**
894 * e1000_set_eee_pchlan - Enable/disable EEE support
895 * @hw: pointer to the HW structure
896 *
897 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
898 * the link and the EEE capabilities of the link partner. The LPI Control
899 * register bits will remain set only if/when link is up.
900 *
901 * EEE LPI must not be asserted earlier than one second after link is up.
902 * On 82579, EEE LPI should not be enabled until such time otherwise there
903 * can be link issues with some switches. Other devices can have EEE LPI
904 * enabled immediately upon link up since they have a timer in hardware which
905 * prevents LPI from being asserted too early.
906 **/
e1000_set_eee_pchlan(struct e1000_hw * hw)907 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
908 {
909 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
910 s32 ret_val;
911 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
912
913 DEBUGFUNC("e1000_set_eee_pchlan");
914
915 switch (hw->phy.type) {
916 case e1000_phy_82579:
917 lpa = I82579_EEE_LP_ABILITY;
918 pcs_status = I82579_EEE_PCS_STATUS;
919 adv_addr = I82579_EEE_ADVERTISEMENT;
920 break;
921 case e1000_phy_i217:
922 lpa = I217_EEE_LP_ABILITY;
923 pcs_status = I217_EEE_PCS_STATUS;
924 adv_addr = I217_EEE_ADVERTISEMENT;
925 break;
926 default:
927 return E1000_SUCCESS;
928 }
929
930 ret_val = hw->phy.ops.acquire(hw);
931 if (ret_val)
932 return ret_val;
933
934 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
935 if (ret_val)
936 goto release;
937
938 /* Clear bits that enable EEE in various speeds */
939 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
940
941 /* Enable EEE if not disabled by user */
942 if (!dev_spec->eee_disable) {
943 /* Save off link partner's EEE ability */
944 ret_val = e1000_read_emi_reg_locked(hw, lpa,
945 &dev_spec->eee_lp_ability);
946 if (ret_val)
947 goto release;
948
949 /* Read EEE advertisement */
950 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
951 if (ret_val)
952 goto release;
953
954 /* Enable EEE only for speeds in which the link partner is
955 * EEE capable and for which we advertise EEE.
956 */
957 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
958 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
959
960 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
961 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
962 if (data & NWAY_LPAR_100TX_FD_CAPS)
963 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
964 else
965 /* EEE is not supported in 100Half, so ignore
966 * partner's EEE in 100 ability if full-duplex
967 * is not advertised.
968 */
969 dev_spec->eee_lp_ability &=
970 ~I82579_EEE_100_SUPPORTED;
971 }
972 }
973
974 if (hw->phy.type == e1000_phy_82579) {
975 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
976 &data);
977 if (ret_val)
978 goto release;
979
980 data &= ~I82579_LPI_100_PLL_SHUT;
981 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
982 data);
983 }
984
985 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
986 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
987 if (ret_val)
988 goto release;
989
990 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
991 release:
992 hw->phy.ops.release(hw);
993
994 return ret_val;
995 }
996
997 /**
998 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
999 * @hw: pointer to the HW structure
1000 * @link: link up bool flag
1001 *
1002 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1003 * preventing further DMA write requests. Workaround the issue by disabling
1004 * the de-assertion of the clock request when in 1Gpbs mode.
1005 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1006 * speeds in order to avoid Tx hangs.
1007 **/
e1000_k1_workaround_lpt_lp(struct e1000_hw * hw,bool link)1008 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1009 {
1010 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1011 u32 status = E1000_READ_REG(hw, E1000_STATUS);
1012 s32 ret_val = E1000_SUCCESS;
1013 u16 reg;
1014
1015 if (link && (status & E1000_STATUS_SPEED_1000)) {
1016 ret_val = hw->phy.ops.acquire(hw);
1017 if (ret_val)
1018 return ret_val;
1019
1020 ret_val =
1021 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1022 ®);
1023 if (ret_val)
1024 goto release;
1025
1026 ret_val =
1027 e1000_write_kmrn_reg_locked(hw,
1028 E1000_KMRNCTRLSTA_K1_CONFIG,
1029 reg &
1030 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1031 if (ret_val)
1032 goto release;
1033
1034 usec_delay(10);
1035
1036 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1037 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1038
1039 ret_val =
1040 e1000_write_kmrn_reg_locked(hw,
1041 E1000_KMRNCTRLSTA_K1_CONFIG,
1042 reg);
1043 release:
1044 hw->phy.ops.release(hw);
1045 } else {
1046 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1047 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1048
1049 if ((hw->phy.revision > 5) || !link ||
1050 ((status & E1000_STATUS_SPEED_100) &&
1051 (status & E1000_STATUS_FD)))
1052 goto update_fextnvm6;
1053
1054 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®);
1055 if (ret_val)
1056 return ret_val;
1057
1058 /* Clear link status transmit timeout */
1059 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1060
1061 if (status & E1000_STATUS_SPEED_100) {
1062 /* Set inband Tx timeout to 5x10us for 100Half */
1063 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1064
1065 /* Do not extend the K1 entry latency for 100Half */
1066 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1067 } else {
1068 /* Set inband Tx timeout to 50x10us for 10Full/Half */
1069 reg |= 50 <<
1070 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1071
1072 /* Extend the K1 entry latency for 10 Mbps */
1073 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1074 }
1075
1076 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1077 if (ret_val)
1078 return ret_val;
1079
1080 update_fextnvm6:
1081 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1082 }
1083
1084 return ret_val;
1085 }
1086
e1000_ltr2ns(u16 ltr)1087 static u64 e1000_ltr2ns(u16 ltr)
1088 {
1089 u32 value, scale;
1090
1091 /* Determine the latency in nsec based on the LTR value & scale */
1092 value = ltr & E1000_LTRV_VALUE_MASK;
1093 scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1094
1095 return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1096 }
1097
1098 /**
1099 * e1000_platform_pm_pch_lpt - Set platform power management values
1100 * @hw: pointer to the HW structure
1101 * @link: bool indicating link status
1102 *
1103 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1104 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1105 * when link is up (which must not exceed the maximum latency supported
1106 * by the platform), otherwise specify there is no LTR requirement.
1107 * Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1108 * latencies in the LTR Extended Capability Structure in the PCIe Extended
1109 * Capability register set, on this device LTR is set by writing the
1110 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1111 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1112 * message to the PMC.
1113 *
1114 * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1115 * high-water mark.
1116 **/
e1000_platform_pm_pch_lpt(struct e1000_hw * hw,bool link)1117 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1118 {
1119 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1120 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1121 u16 lat_enc = 0; /* latency encoded */
1122 s32 obff_hwm = 0;
1123
1124 DEBUGFUNC("e1000_platform_pm_pch_lpt");
1125
1126 if (link) {
1127 u16 speed, duplex, scale = 0;
1128 u16 max_snoop, max_nosnoop;
1129 u16 max_ltr_enc; /* max LTR latency encoded */
1130 s64 lat_ns;
1131 s64 value;
1132 u32 rxa;
1133
1134 if (!hw->mac.max_frame_size) {
1135 DEBUGOUT("max_frame_size not set.\n");
1136 return -E1000_ERR_CONFIG;
1137 }
1138
1139 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1140 if (!speed) {
1141 DEBUGOUT("Speed not set.\n");
1142 return -E1000_ERR_CONFIG;
1143 }
1144
1145 /* Rx Packet Buffer Allocation size (KB) */
1146 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1147
1148 /* Determine the maximum latency tolerated by the device.
1149 *
1150 * Per the PCIe spec, the tolerated latencies are encoded as
1151 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1152 * a 10-bit value (0-1023) to provide a range from 1 ns to
1153 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
1154 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1155 */
1156 lat_ns = ((s64)rxa * 1024 -
1157 (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1158 if (lat_ns < 0)
1159 lat_ns = 0;
1160 else
1161 lat_ns /= speed;
1162 value = lat_ns;
1163
1164 while (value > E1000_LTRV_VALUE_MASK) {
1165 scale++;
1166 value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1167 }
1168 if (scale > E1000_LTRV_SCALE_MAX) {
1169 DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1170 return -E1000_ERR_CONFIG;
1171 }
1172 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1173
1174 /* Determine the maximum latency tolerated by the platform */
1175 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1176 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1177 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1178
1179 if (lat_enc > max_ltr_enc) {
1180 lat_enc = max_ltr_enc;
1181 lat_ns = e1000_ltr2ns(max_ltr_enc);
1182 }
1183
1184 if (lat_ns) {
1185 lat_ns *= speed * 1000;
1186 lat_ns /= 8;
1187 lat_ns /= 1000000000;
1188 obff_hwm = (s32)(rxa - lat_ns);
1189 }
1190 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1191 DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1192 return -E1000_ERR_CONFIG;
1193 }
1194 }
1195
1196 /* Set Snoop and No-Snoop latencies the same */
1197 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1198 E1000_WRITE_REG(hw, E1000_LTRV, reg);
1199
1200 /* Set OBFF high water mark */
1201 reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1202 reg |= obff_hwm;
1203 E1000_WRITE_REG(hw, E1000_SVT, reg);
1204
1205 /* Enable OBFF */
1206 reg = E1000_READ_REG(hw, E1000_SVCR);
1207 reg |= E1000_SVCR_OFF_EN;
1208 /* Always unblock interrupts to the CPU even when the system is
1209 * in OBFF mode. This ensures that small round-robin traffic
1210 * (like ping) does not get dropped or experience long latency.
1211 */
1212 reg |= E1000_SVCR_OFF_MASKINT;
1213 E1000_WRITE_REG(hw, E1000_SVCR, reg);
1214
1215 return E1000_SUCCESS;
1216 }
1217
1218 /**
1219 * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1220 * @hw: pointer to the HW structure
1221 * @itr: interrupt throttling rate
1222 *
1223 * Configure OBFF with the updated interrupt rate.
1224 **/
e1000_set_obff_timer_pch_lpt(struct e1000_hw * hw,u32 itr)1225 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1226 {
1227 u32 svcr;
1228 s32 timer;
1229
1230 DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1231
1232 /* Convert ITR value into microseconds for OBFF timer */
1233 timer = itr & E1000_ITR_MASK;
1234 timer = (timer * E1000_ITR_MULT) / 1000;
1235
1236 if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1237 DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1238 return -E1000_ERR_CONFIG;
1239 }
1240
1241 svcr = E1000_READ_REG(hw, E1000_SVCR);
1242 svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1243 svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1244 E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1245
1246 return E1000_SUCCESS;
1247 }
1248
1249 /**
1250 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1251 * @hw: pointer to the HW structure
1252 * @to_sx: boolean indicating a system power state transition to Sx
1253 *
1254 * When link is down, configure ULP mode to significantly reduce the power
1255 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
1256 * ME firmware to start the ULP configuration. If not on an ME enabled
1257 * system, configure the ULP mode by software.
1258 */
e1000_enable_ulp_lpt_lp(struct e1000_hw * hw,bool to_sx)1259 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1260 {
1261 u32 mac_reg;
1262 s32 ret_val = E1000_SUCCESS;
1263 u16 phy_reg;
1264 u16 oem_reg = 0;
1265
1266 if ((hw->mac.type < e1000_pch_lpt) ||
1267 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1268 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1269 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1270 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1271 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1272 return 0;
1273
1274 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1275 /* Request ME configure ULP mode in the PHY */
1276 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1277 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1278 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1279
1280 goto out;
1281 }
1282
1283 if (!to_sx) {
1284 int i = 0;
1285
1286 /* Poll up to 5 seconds for Cable Disconnected indication */
1287 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1288 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1289 /* Bail if link is re-acquired */
1290 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1291 return -E1000_ERR_PHY;
1292
1293 if (i++ == 100)
1294 break;
1295
1296 msec_delay(50);
1297 }
1298 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1299 (E1000_READ_REG(hw, E1000_FEXT) &
1300 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1301 i * 50);
1302 }
1303
1304 ret_val = hw->phy.ops.acquire(hw);
1305 if (ret_val)
1306 goto out;
1307
1308 /* Force SMBus mode in PHY */
1309 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1310 if (ret_val)
1311 goto release;
1312 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1313 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1314
1315 /* Force SMBus mode in MAC */
1316 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1317 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1318 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1319
1320 /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
1321 * LPLU and disable Gig speed when entering ULP
1322 */
1323 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1324 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1325 &oem_reg);
1326 if (ret_val)
1327 goto release;
1328
1329 phy_reg = oem_reg;
1330 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1331
1332 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1333 phy_reg);
1334
1335 if (ret_val)
1336 goto release;
1337 }
1338
1339 /* Set Inband ULP Exit, Reset to SMBus mode and
1340 * Disable SMBus Release on PERST# in PHY
1341 */
1342 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1343 if (ret_val)
1344 goto release;
1345 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1346 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1347 if (to_sx) {
1348 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1349 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1350 else
1351 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1352
1353 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1354 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1355 } else {
1356 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1357 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1358 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1359 }
1360 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1361
1362 /* Set Disable SMBus Release on PERST# in MAC */
1363 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1364 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1365 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1366
1367 /* Commit ULP changes in PHY by starting auto ULP configuration */
1368 phy_reg |= I218_ULP_CONFIG1_START;
1369 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1370
1371 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1372 to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1373 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1374 oem_reg);
1375 if (ret_val)
1376 goto release;
1377 }
1378
1379 release:
1380 hw->phy.ops.release(hw);
1381 out:
1382 if (ret_val)
1383 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1384 else
1385 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1386
1387 return ret_val;
1388 }
1389
1390 /**
1391 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1392 * @hw: pointer to the HW structure
1393 * @force: boolean indicating whether or not to force disabling ULP
1394 *
1395 * Un-configure ULP mode when link is up, the system is transitioned from
1396 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
1397 * system, poll for an indication from ME that ULP has been un-configured.
1398 * If not on an ME enabled system, un-configure the ULP mode by software.
1399 *
1400 * During nominal operation, this function is called when link is acquired
1401 * to disable ULP mode (force=FALSE); otherwise, for example when unloading
1402 * the driver or during Sx->S0 transitions, this is called with force=TRUE
1403 * to forcibly disable ULP.
1404 */
e1000_disable_ulp_lpt_lp(struct e1000_hw * hw,bool force)1405 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1406 {
1407 s32 ret_val = E1000_SUCCESS;
1408 u32 mac_reg;
1409 u16 phy_reg;
1410 int i = 0;
1411
1412 if ((hw->mac.type < e1000_pch_lpt) ||
1413 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1414 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1415 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1416 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1417 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1418 return 0;
1419
1420 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1421 if (force) {
1422 /* Request ME un-configure ULP mode in the PHY */
1423 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1424 mac_reg &= ~E1000_H2ME_ULP;
1425 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1426 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1427 }
1428
1429 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1430 while (E1000_READ_REG(hw, E1000_FWSM) &
1431 E1000_FWSM_ULP_CFG_DONE) {
1432 if (i++ == 30) {
1433 ret_val = -E1000_ERR_PHY;
1434 goto out;
1435 }
1436
1437 msec_delay(10);
1438 }
1439 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1440
1441 if (force) {
1442 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1443 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1444 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1445 } else {
1446 /* Clear H2ME.ULP after ME ULP configuration */
1447 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1448 mac_reg &= ~E1000_H2ME_ULP;
1449 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1450 }
1451
1452 goto out;
1453 }
1454
1455 ret_val = hw->phy.ops.acquire(hw);
1456 if (ret_val)
1457 goto out;
1458
1459 if (force)
1460 /* Toggle LANPHYPC Value bit */
1461 e1000_toggle_lanphypc_pch_lpt(hw);
1462
1463 /* Unforce SMBus mode in PHY */
1464 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1465 if (ret_val) {
1466 /* The MAC might be in PCIe mode, so temporarily force to
1467 * SMBus mode in order to access the PHY.
1468 */
1469 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1470 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1471 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1472
1473 msec_delay(50);
1474
1475 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1476 &phy_reg);
1477 if (ret_val)
1478 goto release;
1479 }
1480 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1481 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1482
1483 /* Unforce SMBus mode in MAC */
1484 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1485 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1486 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1487
1488 /* When ULP mode was previously entered, K1 was disabled by the
1489 * hardware. Re-Enable K1 in the PHY when exiting ULP.
1490 */
1491 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1492 if (ret_val)
1493 goto release;
1494 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1495 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1496
1497 /* Clear ULP enabled configuration */
1498 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1499 if (ret_val)
1500 goto release;
1501 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1502 I218_ULP_CONFIG1_STICKY_ULP |
1503 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1504 I218_ULP_CONFIG1_WOL_HOST |
1505 I218_ULP_CONFIG1_INBAND_EXIT |
1506 I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1507 I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1508 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1509 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1510
1511 /* Commit ULP changes by starting auto ULP configuration */
1512 phy_reg |= I218_ULP_CONFIG1_START;
1513 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1514
1515 /* Clear Disable SMBus Release on PERST# in MAC */
1516 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1517 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1518 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1519
1520 release:
1521 hw->phy.ops.release(hw);
1522 if (force) {
1523 hw->phy.ops.reset(hw);
1524 msec_delay(50);
1525 }
1526 out:
1527 if (ret_val)
1528 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1529 else
1530 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1531
1532 return ret_val;
1533 }
1534
1535 /**
1536 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1537 * @hw: pointer to the HW structure
1538 *
1539 * Checks to see of the link status of the hardware has changed. If a
1540 * change in link status has been detected, then we read the PHY registers
1541 * to get the current speed/duplex if link exists.
1542 **/
e1000_check_for_copper_link_ich8lan(struct e1000_hw * hw)1543 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1544 {
1545 struct e1000_mac_info *mac = &hw->mac;
1546 s32 ret_val, tipg_reg = 0;
1547 u16 emi_addr, emi_val = 0;
1548 bool link;
1549 u16 phy_reg;
1550
1551 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1552
1553 /* We only want to go out to the PHY registers to see if Auto-Neg
1554 * has completed and/or if our link status has changed. The
1555 * get_link_status flag is set upon receiving a Link Status
1556 * Change or Rx Sequence Error interrupt.
1557 */
1558 if (!mac->get_link_status)
1559 return E1000_SUCCESS;
1560
1561 /* First we want to see if the MII Status Register reports
1562 * link. If so, then we want to get the current speed/duplex
1563 * of the PHY.
1564 */
1565 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1566 if (ret_val)
1567 return ret_val;
1568
1569 if (hw->mac.type == e1000_pchlan) {
1570 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1571 if (ret_val)
1572 return ret_val;
1573 }
1574
1575 /* When connected at 10Mbps half-duplex, some parts are excessively
1576 * aggressive resulting in many collisions. To avoid this, increase
1577 * the IPG and reduce Rx latency in the PHY.
1578 */
1579 if (((hw->mac.type == e1000_pch2lan) ||
1580 (hw->mac.type == e1000_pch_lpt) ||
1581 (hw->mac.type == e1000_pch_spt)) && link) {
1582 u16 speed, duplex;
1583
1584 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1585 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1586 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1587
1588 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1589 tipg_reg |= 0xFF;
1590 /* Reduce Rx latency in analog PHY */
1591 emi_val = 0;
1592 } else if (hw->mac.type == e1000_pch_spt &&
1593 duplex == FULL_DUPLEX && speed != SPEED_1000) {
1594 tipg_reg |= 0xC;
1595 emi_val = 1;
1596 } else {
1597 /* Roll back the default values */
1598 tipg_reg |= 0x08;
1599 emi_val = 1;
1600 }
1601
1602 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1603
1604 ret_val = hw->phy.ops.acquire(hw);
1605 if (ret_val)
1606 return ret_val;
1607
1608 if (hw->mac.type == e1000_pch2lan)
1609 emi_addr = I82579_RX_CONFIG;
1610 else
1611 emi_addr = I217_RX_CONFIG;
1612 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1613
1614 if (hw->mac.type == e1000_pch_lpt ||
1615 hw->mac.type == e1000_pch_spt) {
1616 u16 phy_reg;
1617
1618 hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1619 &phy_reg);
1620 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1621 if (speed == SPEED_100 || speed == SPEED_10)
1622 phy_reg |= 0x3E8;
1623 else
1624 phy_reg |= 0xFA;
1625 hw->phy.ops.write_reg_locked(hw,
1626 I217_PLL_CLOCK_GATE_REG,
1627 phy_reg);
1628 }
1629 hw->phy.ops.release(hw);
1630
1631 if (ret_val)
1632 return ret_val;
1633
1634 if (hw->mac.type == e1000_pch_spt) {
1635 u16 data;
1636 u16 ptr_gap;
1637
1638 if (speed == SPEED_1000) {
1639 ret_val = hw->phy.ops.acquire(hw);
1640 if (ret_val)
1641 return ret_val;
1642
1643 ret_val = hw->phy.ops.read_reg_locked(hw,
1644 PHY_REG(776, 20),
1645 &data);
1646 if (ret_val) {
1647 hw->phy.ops.release(hw);
1648 return ret_val;
1649 }
1650
1651 ptr_gap = (data & (0x3FF << 2)) >> 2;
1652 if (ptr_gap < 0x18) {
1653 data &= ~(0x3FF << 2);
1654 data |= (0x18 << 2);
1655 ret_val =
1656 hw->phy.ops.write_reg_locked(hw,
1657 PHY_REG(776, 20), data);
1658 }
1659 hw->phy.ops.release(hw);
1660 if (ret_val)
1661 return ret_val;
1662 } else {
1663 ret_val = hw->phy.ops.acquire(hw);
1664 if (ret_val)
1665 return ret_val;
1666
1667 ret_val = hw->phy.ops.write_reg_locked(hw,
1668 PHY_REG(776, 20),
1669 0xC023);
1670 hw->phy.ops.release(hw);
1671 if (ret_val)
1672 return ret_val;
1673
1674 }
1675 }
1676 }
1677
1678 /* I217 Packet Loss issue:
1679 * ensure that FEXTNVM4 Beacon Duration is set correctly
1680 * on power up.
1681 * Set the Beacon Duration for I217 to 8 usec
1682 */
1683 if ((hw->mac.type == e1000_pch_lpt) ||
1684 (hw->mac.type == e1000_pch_spt)) {
1685 u32 mac_reg;
1686
1687 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1688 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1689 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1690 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1691 }
1692
1693 /* Work-around I218 hang issue */
1694 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1695 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1696 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1697 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1698 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1699 if (ret_val)
1700 return ret_val;
1701 }
1702 if ((hw->mac.type == e1000_pch_lpt) ||
1703 (hw->mac.type == e1000_pch_spt)) {
1704 /* Set platform power management values for
1705 * Latency Tolerance Reporting (LTR)
1706 * Optimized Buffer Flush/Fill (OBFF)
1707 */
1708 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1709 if (ret_val)
1710 return ret_val;
1711 }
1712
1713 /* Clear link partner's EEE ability */
1714 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1715
1716 /* FEXTNVM6 K1-off workaround */
1717 if (hw->mac.type == e1000_pch_spt) {
1718 u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1719 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1720
1721 if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1722 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1723 else
1724 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1725
1726 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1727 }
1728
1729 if (!link)
1730 return E1000_SUCCESS; /* No link detected */
1731
1732 mac->get_link_status = FALSE;
1733
1734 switch (hw->mac.type) {
1735 case e1000_pch2lan:
1736 ret_val = e1000_k1_workaround_lv(hw);
1737 if (ret_val)
1738 return ret_val;
1739 /* fall-thru */
1740 case e1000_pchlan:
1741 if (hw->phy.type == e1000_phy_82578) {
1742 ret_val = e1000_link_stall_workaround_hv(hw);
1743 if (ret_val)
1744 return ret_val;
1745 }
1746
1747 /* Workaround for PCHx parts in half-duplex:
1748 * Set the number of preambles removed from the packet
1749 * when it is passed from the PHY to the MAC to prevent
1750 * the MAC from misinterpreting the packet type.
1751 */
1752 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1753 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1754
1755 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1756 E1000_STATUS_FD)
1757 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1758
1759 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1760 break;
1761 default:
1762 break;
1763 }
1764
1765 /* Check if there was DownShift, must be checked
1766 * immediately after link-up
1767 */
1768 e1000_check_downshift_generic(hw);
1769
1770 /* Enable/Disable EEE after link up */
1771 if (hw->phy.type > e1000_phy_82579) {
1772 ret_val = e1000_set_eee_pchlan(hw);
1773 if (ret_val)
1774 return ret_val;
1775 }
1776
1777 /* If we are forcing speed/duplex, then we simply return since
1778 * we have already determined whether we have link or not.
1779 */
1780 if (!mac->autoneg)
1781 return -E1000_ERR_CONFIG;
1782
1783 /* Auto-Neg is enabled. Auto Speed Detection takes care
1784 * of MAC speed/duplex configuration. So we only need to
1785 * configure Collision Distance in the MAC.
1786 */
1787 mac->ops.config_collision_dist(hw);
1788
1789 /* Configure Flow Control now that Auto-Neg has completed.
1790 * First, we need to restore the desired flow control
1791 * settings because we may have had to re-autoneg with a
1792 * different link partner.
1793 */
1794 ret_val = e1000_config_fc_after_link_up_generic(hw);
1795 if (ret_val)
1796 DEBUGOUT("Error configuring flow control\n");
1797
1798 return ret_val;
1799 }
1800
1801 /**
1802 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1803 * @hw: pointer to the HW structure
1804 *
1805 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1806 **/
e1000_init_function_pointers_ich8lan(struct e1000_hw * hw)1807 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1808 {
1809 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1810
1811 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1812 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1813 switch (hw->mac.type) {
1814 case e1000_ich8lan:
1815 case e1000_ich9lan:
1816 case e1000_ich10lan:
1817 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1818 break;
1819 case e1000_pchlan:
1820 case e1000_pch2lan:
1821 case e1000_pch_lpt:
1822 case e1000_pch_spt:
1823 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1824 break;
1825 default:
1826 break;
1827 }
1828 }
1829
1830 /**
1831 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1832 * @hw: pointer to the HW structure
1833 *
1834 * Acquires the mutex for performing NVM operations.
1835 **/
e1000_acquire_nvm_ich8lan(struct e1000_hw * hw)1836 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1837 {
1838 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1839
1840 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1841
1842 return E1000_SUCCESS;
1843 }
1844
1845 /**
1846 * e1000_release_nvm_ich8lan - Release NVM mutex
1847 * @hw: pointer to the HW structure
1848 *
1849 * Releases the mutex used while performing NVM operations.
1850 **/
e1000_release_nvm_ich8lan(struct e1000_hw * hw)1851 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1852 {
1853 DEBUGFUNC("e1000_release_nvm_ich8lan");
1854
1855 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1856
1857 return;
1858 }
1859
1860 /**
1861 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1862 * @hw: pointer to the HW structure
1863 *
1864 * Acquires the software control flag for performing PHY and select
1865 * MAC CSR accesses.
1866 **/
e1000_acquire_swflag_ich8lan(struct e1000_hw * hw)1867 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1868 {
1869 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1870 s32 ret_val = E1000_SUCCESS;
1871
1872 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1873
1874 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1875
1876 while (timeout) {
1877 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1878 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1879 break;
1880
1881 msec_delay_irq(1);
1882 timeout--;
1883 }
1884
1885 if (!timeout) {
1886 DEBUGOUT("SW has already locked the resource.\n");
1887 ret_val = -E1000_ERR_CONFIG;
1888 goto out;
1889 }
1890
1891 timeout = SW_FLAG_TIMEOUT;
1892
1893 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1894 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1895
1896 while (timeout) {
1897 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1898 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1899 break;
1900
1901 msec_delay_irq(1);
1902 timeout--;
1903 }
1904
1905 if (!timeout) {
1906 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1907 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1908 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1909 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1910 ret_val = -E1000_ERR_CONFIG;
1911 goto out;
1912 }
1913
1914 out:
1915 if (ret_val)
1916 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1917
1918 return ret_val;
1919 }
1920
1921 /**
1922 * e1000_release_swflag_ich8lan - Release software control flag
1923 * @hw: pointer to the HW structure
1924 *
1925 * Releases the software control flag for performing PHY and select
1926 * MAC CSR accesses.
1927 **/
e1000_release_swflag_ich8lan(struct e1000_hw * hw)1928 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1929 {
1930 u32 extcnf_ctrl;
1931
1932 DEBUGFUNC("e1000_release_swflag_ich8lan");
1933
1934 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1935
1936 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1937 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1938 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1939 } else {
1940 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1941 }
1942
1943 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1944
1945 return;
1946 }
1947
1948 /**
1949 * e1000_check_mng_mode_ich8lan - Checks management mode
1950 * @hw: pointer to the HW structure
1951 *
1952 * This checks if the adapter has any manageability enabled.
1953 * This is a function pointer entry point only called by read/write
1954 * routines for the PHY and NVM parts.
1955 **/
e1000_check_mng_mode_ich8lan(struct e1000_hw * hw)1956 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1957 {
1958 u32 fwsm;
1959
1960 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1961
1962 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1963
1964 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1965 ((fwsm & E1000_FWSM_MODE_MASK) ==
1966 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1967 }
1968
1969 /**
1970 * e1000_check_mng_mode_pchlan - Checks management mode
1971 * @hw: pointer to the HW structure
1972 *
1973 * This checks if the adapter has iAMT enabled.
1974 * This is a function pointer entry point only called by read/write
1975 * routines for the PHY and NVM parts.
1976 **/
e1000_check_mng_mode_pchlan(struct e1000_hw * hw)1977 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1978 {
1979 u32 fwsm;
1980
1981 DEBUGFUNC("e1000_check_mng_mode_pchlan");
1982
1983 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1984
1985 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1986 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1987 }
1988
1989 /**
1990 * e1000_rar_set_pch2lan - Set receive address register
1991 * @hw: pointer to the HW structure
1992 * @addr: pointer to the receive address
1993 * @index: receive address array register
1994 *
1995 * Sets the receive address array register at index to the address passed
1996 * in by addr. For 82579, RAR[0] is the base address register that is to
1997 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1998 * Use SHRA[0-3] in place of those reserved for ME.
1999 **/
e1000_rar_set_pch2lan(struct e1000_hw * hw,u8 * addr,u32 index)2000 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
2001 {
2002 u32 rar_low, rar_high;
2003
2004 DEBUGFUNC("e1000_rar_set_pch2lan");
2005
2006 /* HW expects these in little endian so we reverse the byte order
2007 * from network order (big endian) to little endian
2008 */
2009 rar_low = ((u32) addr[0] |
2010 ((u32) addr[1] << 8) |
2011 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2012
2013 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2014
2015 /* If MAC address zero, no need to set the AV bit */
2016 if (rar_low || rar_high)
2017 rar_high |= E1000_RAH_AV;
2018
2019 if (index == 0) {
2020 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2021 E1000_WRITE_FLUSH(hw);
2022 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2023 E1000_WRITE_FLUSH(hw);
2024 return E1000_SUCCESS;
2025 }
2026
2027 /* RAR[1-6] are owned by manageability. Skip those and program the
2028 * next address into the SHRA register array.
2029 */
2030 if (index < (u32) (hw->mac.rar_entry_count)) {
2031 s32 ret_val;
2032
2033 ret_val = e1000_acquire_swflag_ich8lan(hw);
2034 if (ret_val)
2035 goto out;
2036
2037 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2038 E1000_WRITE_FLUSH(hw);
2039 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2040 E1000_WRITE_FLUSH(hw);
2041
2042 e1000_release_swflag_ich8lan(hw);
2043
2044 /* verify the register updates */
2045 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2046 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2047 return E1000_SUCCESS;
2048
2049 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2050 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2051 }
2052
2053 out:
2054 DEBUGOUT1("Failed to write receive address at index %d\n", index);
2055 return -E1000_ERR_CONFIG;
2056 }
2057
2058 /**
2059 * e1000_rar_set_pch_lpt - Set receive address registers
2060 * @hw: pointer to the HW structure
2061 * @addr: pointer to the receive address
2062 * @index: receive address array register
2063 *
2064 * Sets the receive address register array at index to the address passed
2065 * in by addr. For LPT, RAR[0] is the base address register that is to
2066 * contain the MAC address. SHRA[0-10] are the shared receive address
2067 * registers that are shared between the Host and manageability engine (ME).
2068 **/
e1000_rar_set_pch_lpt(struct e1000_hw * hw,u8 * addr,u32 index)2069 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2070 {
2071 u32 rar_low, rar_high;
2072 u32 wlock_mac;
2073
2074 DEBUGFUNC("e1000_rar_set_pch_lpt");
2075
2076 /* HW expects these in little endian so we reverse the byte order
2077 * from network order (big endian) to little endian
2078 */
2079 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2080 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2081
2082 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2083
2084 /* If MAC address zero, no need to set the AV bit */
2085 if (rar_low || rar_high)
2086 rar_high |= E1000_RAH_AV;
2087
2088 if (index == 0) {
2089 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2090 E1000_WRITE_FLUSH(hw);
2091 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2092 E1000_WRITE_FLUSH(hw);
2093 return E1000_SUCCESS;
2094 }
2095
2096 /* The manageability engine (ME) can lock certain SHRAR registers that
2097 * it is using - those registers are unavailable for use.
2098 */
2099 if (index < hw->mac.rar_entry_count) {
2100 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2101 E1000_FWSM_WLOCK_MAC_MASK;
2102 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2103
2104 /* Check if all SHRAR registers are locked */
2105 if (wlock_mac == 1)
2106 goto out;
2107
2108 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2109 s32 ret_val;
2110
2111 ret_val = e1000_acquire_swflag_ich8lan(hw);
2112
2113 if (ret_val)
2114 goto out;
2115
2116 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2117 rar_low);
2118 E1000_WRITE_FLUSH(hw);
2119 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2120 rar_high);
2121 E1000_WRITE_FLUSH(hw);
2122
2123 e1000_release_swflag_ich8lan(hw);
2124
2125 /* verify the register updates */
2126 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2127 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2128 return E1000_SUCCESS;
2129 }
2130 }
2131
2132 out:
2133 DEBUGOUT1("Failed to write receive address at index %d\n", index);
2134 return -E1000_ERR_CONFIG;
2135 }
2136
2137 /**
2138 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2139 * @hw: pointer to the HW structure
2140 * @mc_addr_list: array of multicast addresses to program
2141 * @mc_addr_count: number of multicast addresses to program
2142 *
2143 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2144 * The caller must have a packed mc_addr_list of multicast addresses.
2145 **/
e1000_update_mc_addr_list_pch2lan(struct e1000_hw * hw,u8 * mc_addr_list,u32 mc_addr_count)2146 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2147 u8 *mc_addr_list,
2148 u32 mc_addr_count)
2149 {
2150 u16 phy_reg = 0;
2151 int i;
2152 s32 ret_val;
2153
2154 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2155
2156 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2157
2158 ret_val = hw->phy.ops.acquire(hw);
2159 if (ret_val)
2160 return;
2161
2162 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2163 if (ret_val)
2164 goto release;
2165
2166 for (i = 0; i < hw->mac.mta_reg_count; i++) {
2167 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2168 (u16)(hw->mac.mta_shadow[i] &
2169 0xFFFF));
2170 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2171 (u16)((hw->mac.mta_shadow[i] >> 16) &
2172 0xFFFF));
2173 }
2174
2175 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2176
2177 release:
2178 hw->phy.ops.release(hw);
2179 }
2180
2181 /**
2182 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2183 * @hw: pointer to the HW structure
2184 *
2185 * Checks if firmware is blocking the reset of the PHY.
2186 * This is a function pointer entry point only called by
2187 * reset routines.
2188 **/
e1000_check_reset_block_ich8lan(struct e1000_hw * hw)2189 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2190 {
2191 u32 fwsm;
2192 bool blocked = FALSE;
2193 int i = 0;
2194
2195 DEBUGFUNC("e1000_check_reset_block_ich8lan");
2196
2197 do {
2198 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2199 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2200 blocked = TRUE;
2201 msec_delay(10);
2202 continue;
2203 }
2204 blocked = FALSE;
2205 } while (blocked && (i++ < 30));
2206 return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2207 }
2208
2209 /**
2210 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2211 * @hw: pointer to the HW structure
2212 *
2213 * Assumes semaphore already acquired.
2214 *
2215 **/
e1000_write_smbus_addr(struct e1000_hw * hw)2216 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2217 {
2218 u16 phy_data;
2219 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2220 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2221 E1000_STRAP_SMT_FREQ_SHIFT;
2222 s32 ret_val;
2223
2224 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2225
2226 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2227 if (ret_val)
2228 return ret_val;
2229
2230 phy_data &= ~HV_SMB_ADDR_MASK;
2231 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2232 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2233
2234 if (hw->phy.type == e1000_phy_i217) {
2235 /* Restore SMBus frequency */
2236 if (freq--) {
2237 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2238 phy_data |= (freq & (1 << 0)) <<
2239 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2240 phy_data |= (freq & (1 << 1)) <<
2241 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2242 } else {
2243 DEBUGOUT("Unsupported SMB frequency in PHY\n");
2244 }
2245 }
2246
2247 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2248 }
2249
2250 /**
2251 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2252 * @hw: pointer to the HW structure
2253 *
2254 * SW should configure the LCD from the NVM extended configuration region
2255 * as a workaround for certain parts.
2256 **/
e1000_sw_lcd_config_ich8lan(struct e1000_hw * hw)2257 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2258 {
2259 struct e1000_phy_info *phy = &hw->phy;
2260 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2261 s32 ret_val = E1000_SUCCESS;
2262 u16 word_addr, reg_data, reg_addr, phy_page = 0;
2263
2264 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2265
2266 /* Initialize the PHY from the NVM on ICH platforms. This
2267 * is needed due to an issue where the NVM configuration is
2268 * not properly autoloaded after power transitions.
2269 * Therefore, after each PHY reset, we will load the
2270 * configuration data out of the NVM manually.
2271 */
2272 switch (hw->mac.type) {
2273 case e1000_ich8lan:
2274 if (phy->type != e1000_phy_igp_3)
2275 return ret_val;
2276
2277 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2278 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2279 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2280 break;
2281 }
2282 /* Fall-thru */
2283 case e1000_pchlan:
2284 case e1000_pch2lan:
2285 case e1000_pch_lpt:
2286 case e1000_pch_spt:
2287 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2288 break;
2289 default:
2290 return ret_val;
2291 }
2292
2293 ret_val = hw->phy.ops.acquire(hw);
2294 if (ret_val)
2295 return ret_val;
2296
2297 data = E1000_READ_REG(hw, E1000_FEXTNVM);
2298 if (!(data & sw_cfg_mask))
2299 goto release;
2300
2301 /* Make sure HW does not configure LCD from PHY
2302 * extended configuration before SW configuration
2303 */
2304 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2305 if ((hw->mac.type < e1000_pch2lan) &&
2306 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2307 goto release;
2308
2309 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2310 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2311 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2312 if (!cnf_size)
2313 goto release;
2314
2315 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2316 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2317
2318 if (((hw->mac.type == e1000_pchlan) &&
2319 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2320 (hw->mac.type > e1000_pchlan)) {
2321 /* HW configures the SMBus address and LEDs when the
2322 * OEM and LCD Write Enable bits are set in the NVM.
2323 * When both NVM bits are cleared, SW will configure
2324 * them instead.
2325 */
2326 ret_val = e1000_write_smbus_addr(hw);
2327 if (ret_val)
2328 goto release;
2329
2330 data = E1000_READ_REG(hw, E1000_LEDCTL);
2331 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2332 (u16)data);
2333 if (ret_val)
2334 goto release;
2335 }
2336
2337 /* Configure LCD from extended configuration region. */
2338
2339 /* cnf_base_addr is in DWORD */
2340 word_addr = (u16)(cnf_base_addr << 1);
2341
2342 for (i = 0; i < cnf_size; i++) {
2343 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2344 ®_data);
2345 if (ret_val)
2346 goto release;
2347
2348 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2349 1, ®_addr);
2350 if (ret_val)
2351 goto release;
2352
2353 /* Save off the PHY page for future writes. */
2354 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2355 phy_page = reg_data;
2356 continue;
2357 }
2358
2359 reg_addr &= PHY_REG_MASK;
2360 reg_addr |= phy_page;
2361
2362 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2363 reg_data);
2364 if (ret_val)
2365 goto release;
2366 }
2367
2368 release:
2369 hw->phy.ops.release(hw);
2370 return ret_val;
2371 }
2372
2373 /**
2374 * e1000_k1_gig_workaround_hv - K1 Si workaround
2375 * @hw: pointer to the HW structure
2376 * @link: link up bool flag
2377 *
2378 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2379 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
2380 * If link is down, the function will restore the default K1 setting located
2381 * in the NVM.
2382 **/
e1000_k1_gig_workaround_hv(struct e1000_hw * hw,bool link)2383 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2384 {
2385 s32 ret_val = E1000_SUCCESS;
2386 u16 status_reg = 0;
2387 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2388
2389 DEBUGFUNC("e1000_k1_gig_workaround_hv");
2390
2391 if (hw->mac.type != e1000_pchlan)
2392 return E1000_SUCCESS;
2393
2394 /* Wrap the whole flow with the sw flag */
2395 ret_val = hw->phy.ops.acquire(hw);
2396 if (ret_val)
2397 return ret_val;
2398
2399 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2400 if (link) {
2401 if (hw->phy.type == e1000_phy_82578) {
2402 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2403 &status_reg);
2404 if (ret_val)
2405 goto release;
2406
2407 status_reg &= (BM_CS_STATUS_LINK_UP |
2408 BM_CS_STATUS_RESOLVED |
2409 BM_CS_STATUS_SPEED_MASK);
2410
2411 if (status_reg == (BM_CS_STATUS_LINK_UP |
2412 BM_CS_STATUS_RESOLVED |
2413 BM_CS_STATUS_SPEED_1000))
2414 k1_enable = FALSE;
2415 }
2416
2417 if (hw->phy.type == e1000_phy_82577) {
2418 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2419 &status_reg);
2420 if (ret_val)
2421 goto release;
2422
2423 status_reg &= (HV_M_STATUS_LINK_UP |
2424 HV_M_STATUS_AUTONEG_COMPLETE |
2425 HV_M_STATUS_SPEED_MASK);
2426
2427 if (status_reg == (HV_M_STATUS_LINK_UP |
2428 HV_M_STATUS_AUTONEG_COMPLETE |
2429 HV_M_STATUS_SPEED_1000))
2430 k1_enable = FALSE;
2431 }
2432
2433 /* Link stall fix for link up */
2434 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2435 0x0100);
2436 if (ret_val)
2437 goto release;
2438
2439 } else {
2440 /* Link stall fix for link down */
2441 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2442 0x4100);
2443 if (ret_val)
2444 goto release;
2445 }
2446
2447 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2448
2449 release:
2450 hw->phy.ops.release(hw);
2451
2452 return ret_val;
2453 }
2454
2455 /**
2456 * e1000_configure_k1_ich8lan - Configure K1 power state
2457 * @hw: pointer to the HW structure
2458 * @enable: K1 state to configure
2459 *
2460 * Configure the K1 power state based on the provided parameter.
2461 * Assumes semaphore already acquired.
2462 *
2463 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2464 **/
e1000_configure_k1_ich8lan(struct e1000_hw * hw,bool k1_enable)2465 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2466 {
2467 s32 ret_val;
2468 u32 ctrl_reg = 0;
2469 u32 ctrl_ext = 0;
2470 u32 reg = 0;
2471 u16 kmrn_reg = 0;
2472
2473 DEBUGFUNC("e1000_configure_k1_ich8lan");
2474
2475 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2476 &kmrn_reg);
2477 if (ret_val)
2478 return ret_val;
2479
2480 if (k1_enable)
2481 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2482 else
2483 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2484
2485 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2486 kmrn_reg);
2487 if (ret_val)
2488 return ret_val;
2489
2490 usec_delay(20);
2491 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2492 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2493
2494 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2495 reg |= E1000_CTRL_FRCSPD;
2496 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2497
2498 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2499 E1000_WRITE_FLUSH(hw);
2500 usec_delay(20);
2501 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2502 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2503 E1000_WRITE_FLUSH(hw);
2504 usec_delay(20);
2505
2506 return E1000_SUCCESS;
2507 }
2508
2509 /**
2510 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2511 * @hw: pointer to the HW structure
2512 * @d0_state: boolean if entering d0 or d3 device state
2513 *
2514 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2515 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
2516 * in NVM determines whether HW should configure LPLU and Gbe Disable.
2517 **/
e1000_oem_bits_config_ich8lan(struct e1000_hw * hw,bool d0_state)2518 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2519 {
2520 s32 ret_val = 0;
2521 u32 mac_reg;
2522 u16 oem_reg;
2523
2524 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2525
2526 if (hw->mac.type < e1000_pchlan)
2527 return ret_val;
2528
2529 ret_val = hw->phy.ops.acquire(hw);
2530 if (ret_val)
2531 return ret_val;
2532
2533 if (hw->mac.type == e1000_pchlan) {
2534 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2535 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2536 goto release;
2537 }
2538
2539 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2540 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2541 goto release;
2542
2543 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2544
2545 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2546 if (ret_val)
2547 goto release;
2548
2549 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2550
2551 if (d0_state) {
2552 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2553 oem_reg |= HV_OEM_BITS_GBE_DIS;
2554
2555 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2556 oem_reg |= HV_OEM_BITS_LPLU;
2557 } else {
2558 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2559 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2560 oem_reg |= HV_OEM_BITS_GBE_DIS;
2561
2562 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2563 E1000_PHY_CTRL_NOND0A_LPLU))
2564 oem_reg |= HV_OEM_BITS_LPLU;
2565 }
2566
2567 /* Set Restart auto-neg to activate the bits */
2568 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2569 !hw->phy.ops.check_reset_block(hw))
2570 oem_reg |= HV_OEM_BITS_RESTART_AN;
2571
2572 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2573
2574 release:
2575 hw->phy.ops.release(hw);
2576
2577 return ret_val;
2578 }
2579
2580
2581 /**
2582 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2583 * @hw: pointer to the HW structure
2584 **/
e1000_set_mdio_slow_mode_hv(struct e1000_hw * hw)2585 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2586 {
2587 s32 ret_val;
2588 u16 data;
2589
2590 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2591
2592 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2593 if (ret_val)
2594 return ret_val;
2595
2596 data |= HV_KMRN_MDIO_SLOW;
2597
2598 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2599
2600 return ret_val;
2601 }
2602
2603 /**
2604 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2605 * done after every PHY reset.
2606 **/
e1000_hv_phy_workarounds_ich8lan(struct e1000_hw * hw)2607 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2608 {
2609 s32 ret_val = E1000_SUCCESS;
2610 u16 phy_data;
2611
2612 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2613
2614 if (hw->mac.type != e1000_pchlan)
2615 return E1000_SUCCESS;
2616
2617 /* Set MDIO slow mode before any other MDIO access */
2618 if (hw->phy.type == e1000_phy_82577) {
2619 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2620 if (ret_val)
2621 return ret_val;
2622 }
2623
2624 if (((hw->phy.type == e1000_phy_82577) &&
2625 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2626 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2627 /* Disable generation of early preamble */
2628 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2629 if (ret_val)
2630 return ret_val;
2631
2632 /* Preamble tuning for SSC */
2633 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2634 0xA204);
2635 if (ret_val)
2636 return ret_val;
2637 }
2638
2639 if (hw->phy.type == e1000_phy_82578) {
2640 /* Return registers to default by doing a soft reset then
2641 * writing 0x3140 to the control register.
2642 */
2643 if (hw->phy.revision < 2) {
2644 e1000_phy_sw_reset_generic(hw);
2645 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2646 0x3140);
2647 }
2648 }
2649
2650 /* Select page 0 */
2651 ret_val = hw->phy.ops.acquire(hw);
2652 if (ret_val)
2653 return ret_val;
2654
2655 hw->phy.addr = 1;
2656 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2657 hw->phy.ops.release(hw);
2658 if (ret_val)
2659 return ret_val;
2660
2661 /* Configure the K1 Si workaround during phy reset assuming there is
2662 * link so that it disables K1 if link is in 1Gbps.
2663 */
2664 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2665 if (ret_val)
2666 return ret_val;
2667
2668 /* Workaround for link disconnects on a busy hub in half duplex */
2669 ret_val = hw->phy.ops.acquire(hw);
2670 if (ret_val)
2671 return ret_val;
2672 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2673 if (ret_val)
2674 goto release;
2675 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2676 phy_data & 0x00FF);
2677 if (ret_val)
2678 goto release;
2679
2680 /* set MSE higher to enable link to stay up when noise is high */
2681 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2682 release:
2683 hw->phy.ops.release(hw);
2684
2685 return ret_val;
2686 }
2687
2688 /**
2689 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2690 * @hw: pointer to the HW structure
2691 **/
e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw * hw)2692 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2693 {
2694 u32 mac_reg;
2695 u16 i, phy_reg = 0;
2696 s32 ret_val;
2697
2698 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2699
2700 ret_val = hw->phy.ops.acquire(hw);
2701 if (ret_val)
2702 return;
2703 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2704 if (ret_val)
2705 goto release;
2706
2707 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2708 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2709 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2710 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2711 (u16)(mac_reg & 0xFFFF));
2712 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2713 (u16)((mac_reg >> 16) & 0xFFFF));
2714
2715 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2716 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2717 (u16)(mac_reg & 0xFFFF));
2718 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2719 (u16)((mac_reg & E1000_RAH_AV)
2720 >> 16));
2721 }
2722
2723 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2724
2725 release:
2726 hw->phy.ops.release(hw);
2727 }
2728
e1000_calc_rx_da_crc(u8 mac[])2729 static u32 e1000_calc_rx_da_crc(u8 mac[])
2730 {
2731 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
2732 u32 i, j, mask, crc;
2733
2734 DEBUGFUNC("e1000_calc_rx_da_crc");
2735
2736 crc = 0xffffffff;
2737 for (i = 0; i < 6; i++) {
2738 crc = crc ^ mac[i];
2739 for (j = 8; j > 0; j--) {
2740 mask = (crc & 1) * (-1);
2741 crc = (crc >> 1) ^ (poly & mask);
2742 }
2743 }
2744 return ~crc;
2745 }
2746
2747 /**
2748 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2749 * with 82579 PHY
2750 * @hw: pointer to the HW structure
2751 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
2752 **/
e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw * hw,bool enable)2753 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2754 {
2755 s32 ret_val = E1000_SUCCESS;
2756 u16 phy_reg, data;
2757 u32 mac_reg;
2758 u16 i;
2759
2760 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2761
2762 if (hw->mac.type < e1000_pch2lan)
2763 return E1000_SUCCESS;
2764
2765 /* disable Rx path while enabling/disabling workaround */
2766 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2767 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2768 phy_reg | (1 << 14));
2769 if (ret_val)
2770 return ret_val;
2771
2772 if (enable) {
2773 /* Write Rx addresses (rar_entry_count for RAL/H, and
2774 * SHRAL/H) and initial CRC values to the MAC
2775 */
2776 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2777 u8 mac_addr[ETH_ADDR_LEN] = {0};
2778 u32 addr_high, addr_low;
2779
2780 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2781 if (!(addr_high & E1000_RAH_AV))
2782 continue;
2783 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2784 mac_addr[0] = (addr_low & 0xFF);
2785 mac_addr[1] = ((addr_low >> 8) & 0xFF);
2786 mac_addr[2] = ((addr_low >> 16) & 0xFF);
2787 mac_addr[3] = ((addr_low >> 24) & 0xFF);
2788 mac_addr[4] = (addr_high & 0xFF);
2789 mac_addr[5] = ((addr_high >> 8) & 0xFF);
2790
2791 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2792 e1000_calc_rx_da_crc(mac_addr));
2793 }
2794
2795 /* Write Rx addresses to the PHY */
2796 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2797
2798 /* Enable jumbo frame workaround in the MAC */
2799 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2800 mac_reg &= ~(1 << 14);
2801 mac_reg |= (7 << 15);
2802 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2803
2804 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2805 mac_reg |= E1000_RCTL_SECRC;
2806 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2807
2808 ret_val = e1000_read_kmrn_reg_generic(hw,
2809 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2810 &data);
2811 if (ret_val)
2812 return ret_val;
2813 ret_val = e1000_write_kmrn_reg_generic(hw,
2814 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2815 data | (1 << 0));
2816 if (ret_val)
2817 return ret_val;
2818 ret_val = e1000_read_kmrn_reg_generic(hw,
2819 E1000_KMRNCTRLSTA_HD_CTRL,
2820 &data);
2821 if (ret_val)
2822 return ret_val;
2823 data &= ~(0xF << 8);
2824 data |= (0xB << 8);
2825 ret_val = e1000_write_kmrn_reg_generic(hw,
2826 E1000_KMRNCTRLSTA_HD_CTRL,
2827 data);
2828 if (ret_val)
2829 return ret_val;
2830
2831 /* Enable jumbo frame workaround in the PHY */
2832 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2833 data &= ~(0x7F << 5);
2834 data |= (0x37 << 5);
2835 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2836 if (ret_val)
2837 return ret_val;
2838 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2839 data &= ~(1 << 13);
2840 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2841 if (ret_val)
2842 return ret_val;
2843 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2844 data &= ~(0x3FF << 2);
2845 data |= (E1000_TX_PTR_GAP << 2);
2846 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2847 if (ret_val)
2848 return ret_val;
2849 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2850 if (ret_val)
2851 return ret_val;
2852 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2853 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2854 (1 << 10));
2855 if (ret_val)
2856 return ret_val;
2857 } else {
2858 /* Write MAC register values back to h/w defaults */
2859 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2860 mac_reg &= ~(0xF << 14);
2861 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2862
2863 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2864 mac_reg &= ~E1000_RCTL_SECRC;
2865 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2866
2867 ret_val = e1000_read_kmrn_reg_generic(hw,
2868 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2869 &data);
2870 if (ret_val)
2871 return ret_val;
2872 ret_val = e1000_write_kmrn_reg_generic(hw,
2873 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2874 data & ~(1 << 0));
2875 if (ret_val)
2876 return ret_val;
2877 ret_val = e1000_read_kmrn_reg_generic(hw,
2878 E1000_KMRNCTRLSTA_HD_CTRL,
2879 &data);
2880 if (ret_val)
2881 return ret_val;
2882 data &= ~(0xF << 8);
2883 data |= (0xB << 8);
2884 ret_val = e1000_write_kmrn_reg_generic(hw,
2885 E1000_KMRNCTRLSTA_HD_CTRL,
2886 data);
2887 if (ret_val)
2888 return ret_val;
2889
2890 /* Write PHY register values back to h/w defaults */
2891 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2892 data &= ~(0x7F << 5);
2893 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2894 if (ret_val)
2895 return ret_val;
2896 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2897 data |= (1 << 13);
2898 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2899 if (ret_val)
2900 return ret_val;
2901 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2902 data &= ~(0x3FF << 2);
2903 data |= (0x8 << 2);
2904 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2905 if (ret_val)
2906 return ret_val;
2907 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2908 if (ret_val)
2909 return ret_val;
2910 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2911 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2912 ~(1 << 10));
2913 if (ret_val)
2914 return ret_val;
2915 }
2916
2917 /* re-enable Rx path after enabling/disabling workaround */
2918 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2919 ~(1 << 14));
2920 }
2921
2922 /**
2923 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2924 * done after every PHY reset.
2925 **/
e1000_lv_phy_workarounds_ich8lan(struct e1000_hw * hw)2926 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2927 {
2928 s32 ret_val = E1000_SUCCESS;
2929
2930 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2931
2932 if (hw->mac.type != e1000_pch2lan)
2933 return E1000_SUCCESS;
2934
2935 /* Set MDIO slow mode before any other MDIO access */
2936 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2937 if (ret_val)
2938 return ret_val;
2939
2940 ret_val = hw->phy.ops.acquire(hw);
2941 if (ret_val)
2942 return ret_val;
2943 /* set MSE higher to enable link to stay up when noise is high */
2944 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2945 if (ret_val)
2946 goto release;
2947 /* drop link after 5 times MSE threshold was reached */
2948 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2949 release:
2950 hw->phy.ops.release(hw);
2951
2952 return ret_val;
2953 }
2954
2955 /**
2956 * e1000_k1_gig_workaround_lv - K1 Si workaround
2957 * @hw: pointer to the HW structure
2958 *
2959 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2960 * Disable K1 for 1000 and 100 speeds
2961 **/
e1000_k1_workaround_lv(struct e1000_hw * hw)2962 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2963 {
2964 s32 ret_val = E1000_SUCCESS;
2965 u16 status_reg = 0;
2966
2967 DEBUGFUNC("e1000_k1_workaround_lv");
2968
2969 if (hw->mac.type != e1000_pch2lan)
2970 return E1000_SUCCESS;
2971
2972 /* Set K1 beacon duration based on 10Mbs speed */
2973 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2974 if (ret_val)
2975 return ret_val;
2976
2977 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2978 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2979 if (status_reg &
2980 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2981 u16 pm_phy_reg;
2982
2983 /* LV 1G/100 Packet drop issue wa */
2984 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2985 &pm_phy_reg);
2986 if (ret_val)
2987 return ret_val;
2988 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2989 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2990 pm_phy_reg);
2991 if (ret_val)
2992 return ret_val;
2993 } else {
2994 u32 mac_reg;
2995 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2996 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2997 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2998 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2999 }
3000 }
3001
3002 return ret_val;
3003 }
3004
3005 /**
3006 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
3007 * @hw: pointer to the HW structure
3008 * @gate: boolean set to TRUE to gate, FALSE to ungate
3009 *
3010 * Gate/ungate the automatic PHY configuration via hardware; perform
3011 * the configuration via software instead.
3012 **/
e1000_gate_hw_phy_config_ich8lan(struct e1000_hw * hw,bool gate)3013 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
3014 {
3015 u32 extcnf_ctrl;
3016
3017 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3018
3019 if (hw->mac.type < e1000_pch2lan)
3020 return;
3021
3022 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3023
3024 if (gate)
3025 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3026 else
3027 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3028
3029 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3030 }
3031
3032 /**
3033 * e1000_lan_init_done_ich8lan - Check for PHY config completion
3034 * @hw: pointer to the HW structure
3035 *
3036 * Check the appropriate indication the MAC has finished configuring the
3037 * PHY after a software reset.
3038 **/
e1000_lan_init_done_ich8lan(struct e1000_hw * hw)3039 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3040 {
3041 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3042
3043 DEBUGFUNC("e1000_lan_init_done_ich8lan");
3044
3045 /* Wait for basic configuration completes before proceeding */
3046 do {
3047 data = E1000_READ_REG(hw, E1000_STATUS);
3048 data &= E1000_STATUS_LAN_INIT_DONE;
3049 usec_delay(100);
3050 } while ((!data) && --loop);
3051
3052 /* If basic configuration is incomplete before the above loop
3053 * count reaches 0, loading the configuration from NVM will
3054 * leave the PHY in a bad state possibly resulting in no link.
3055 */
3056 if (loop == 0)
3057 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3058
3059 /* Clear the Init Done bit for the next init event */
3060 data = E1000_READ_REG(hw, E1000_STATUS);
3061 data &= ~E1000_STATUS_LAN_INIT_DONE;
3062 E1000_WRITE_REG(hw, E1000_STATUS, data);
3063 }
3064
3065 /**
3066 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3067 * @hw: pointer to the HW structure
3068 **/
e1000_post_phy_reset_ich8lan(struct e1000_hw * hw)3069 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3070 {
3071 s32 ret_val = E1000_SUCCESS;
3072 u16 reg;
3073
3074 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3075
3076 if (hw->phy.ops.check_reset_block(hw))
3077 return E1000_SUCCESS;
3078
3079 /* Allow time for h/w to get to quiescent state after reset */
3080 msec_delay(10);
3081
3082 /* Perform any necessary post-reset workarounds */
3083 switch (hw->mac.type) {
3084 case e1000_pchlan:
3085 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3086 if (ret_val)
3087 return ret_val;
3088 break;
3089 case e1000_pch2lan:
3090 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3091 if (ret_val)
3092 return ret_val;
3093 break;
3094 default:
3095 break;
3096 }
3097
3098 /* Clear the host wakeup bit after lcd reset */
3099 if (hw->mac.type >= e1000_pchlan) {
3100 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
3101 reg &= ~BM_WUC_HOST_WU_BIT;
3102 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3103 }
3104
3105 /* Configure the LCD with the extended configuration region in NVM */
3106 ret_val = e1000_sw_lcd_config_ich8lan(hw);
3107 if (ret_val)
3108 return ret_val;
3109
3110 /* Configure the LCD with the OEM bits in NVM */
3111 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3112
3113 if (hw->mac.type == e1000_pch2lan) {
3114 /* Ungate automatic PHY configuration on non-managed 82579 */
3115 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3116 E1000_ICH_FWSM_FW_VALID)) {
3117 msec_delay(10);
3118 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3119 }
3120
3121 /* Set EEE LPI Update Timer to 200usec */
3122 ret_val = hw->phy.ops.acquire(hw);
3123 if (ret_val)
3124 return ret_val;
3125 ret_val = e1000_write_emi_reg_locked(hw,
3126 I82579_LPI_UPDATE_TIMER,
3127 0x1387);
3128 hw->phy.ops.release(hw);
3129 }
3130
3131 return ret_val;
3132 }
3133
3134 /**
3135 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3136 * @hw: pointer to the HW structure
3137 *
3138 * Resets the PHY
3139 * This is a function pointer entry point called by drivers
3140 * or other shared routines.
3141 **/
e1000_phy_hw_reset_ich8lan(struct e1000_hw * hw)3142 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3143 {
3144 s32 ret_val = E1000_SUCCESS;
3145
3146 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3147
3148 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3149 if ((hw->mac.type == e1000_pch2lan) &&
3150 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3151 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3152
3153 ret_val = e1000_phy_hw_reset_generic(hw);
3154 if (ret_val)
3155 return ret_val;
3156
3157 return e1000_post_phy_reset_ich8lan(hw);
3158 }
3159
3160 /**
3161 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3162 * @hw: pointer to the HW structure
3163 * @active: TRUE to enable LPLU, FALSE to disable
3164 *
3165 * Sets the LPLU state according to the active flag. For PCH, if OEM write
3166 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3167 * the phy speed. This function will manually set the LPLU bit and restart
3168 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
3169 * since it configures the same bit.
3170 **/
e1000_set_lplu_state_pchlan(struct e1000_hw * hw,bool active)3171 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3172 {
3173 s32 ret_val;
3174 u16 oem_reg;
3175
3176 DEBUGFUNC("e1000_set_lplu_state_pchlan");
3177 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3178 if (ret_val)
3179 return ret_val;
3180
3181 if (active)
3182 oem_reg |= HV_OEM_BITS_LPLU;
3183 else
3184 oem_reg &= ~HV_OEM_BITS_LPLU;
3185
3186 if (!hw->phy.ops.check_reset_block(hw))
3187 oem_reg |= HV_OEM_BITS_RESTART_AN;
3188
3189 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3190 }
3191
3192 /**
3193 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3194 * @hw: pointer to the HW structure
3195 * @active: TRUE to enable LPLU, FALSE to disable
3196 *
3197 * Sets the LPLU D0 state according to the active flag. When
3198 * activating LPLU this function also disables smart speed
3199 * and vice versa. LPLU will not be activated unless the
3200 * device autonegotiation advertisement meets standards of
3201 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3202 * This is a function pointer entry point only called by
3203 * PHY setup routines.
3204 **/
e1000_set_d0_lplu_state_ich8lan(struct e1000_hw * hw,bool active)3205 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3206 {
3207 struct e1000_phy_info *phy = &hw->phy;
3208 u32 phy_ctrl;
3209 s32 ret_val = E1000_SUCCESS;
3210 u16 data;
3211
3212 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3213
3214 if (phy->type == e1000_phy_ife)
3215 return E1000_SUCCESS;
3216
3217 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3218
3219 if (active) {
3220 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3221 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3222
3223 if (phy->type != e1000_phy_igp_3)
3224 return E1000_SUCCESS;
3225
3226 /* Call gig speed drop workaround on LPLU before accessing
3227 * any PHY registers
3228 */
3229 if (hw->mac.type == e1000_ich8lan)
3230 e1000_gig_downshift_workaround_ich8lan(hw);
3231
3232 /* When LPLU is enabled, we should disable SmartSpeed */
3233 ret_val = phy->ops.read_reg(hw,
3234 IGP01E1000_PHY_PORT_CONFIG,
3235 &data);
3236 if (ret_val)
3237 return ret_val;
3238 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3239 ret_val = phy->ops.write_reg(hw,
3240 IGP01E1000_PHY_PORT_CONFIG,
3241 data);
3242 if (ret_val)
3243 return ret_val;
3244 } else {
3245 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3246 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3247
3248 if (phy->type != e1000_phy_igp_3)
3249 return E1000_SUCCESS;
3250
3251 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3252 * during Dx states where the power conservation is most
3253 * important. During driver activity we should enable
3254 * SmartSpeed, so performance is maintained.
3255 */
3256 if (phy->smart_speed == e1000_smart_speed_on) {
3257 ret_val = phy->ops.read_reg(hw,
3258 IGP01E1000_PHY_PORT_CONFIG,
3259 &data);
3260 if (ret_val)
3261 return ret_val;
3262
3263 data |= IGP01E1000_PSCFR_SMART_SPEED;
3264 ret_val = phy->ops.write_reg(hw,
3265 IGP01E1000_PHY_PORT_CONFIG,
3266 data);
3267 if (ret_val)
3268 return ret_val;
3269 } else if (phy->smart_speed == e1000_smart_speed_off) {
3270 ret_val = phy->ops.read_reg(hw,
3271 IGP01E1000_PHY_PORT_CONFIG,
3272 &data);
3273 if (ret_val)
3274 return ret_val;
3275
3276 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3277 ret_val = phy->ops.write_reg(hw,
3278 IGP01E1000_PHY_PORT_CONFIG,
3279 data);
3280 if (ret_val)
3281 return ret_val;
3282 }
3283 }
3284
3285 return E1000_SUCCESS;
3286 }
3287
3288 /**
3289 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3290 * @hw: pointer to the HW structure
3291 * @active: TRUE to enable LPLU, FALSE to disable
3292 *
3293 * Sets the LPLU D3 state according to the active flag. When
3294 * activating LPLU this function also disables smart speed
3295 * and vice versa. LPLU will not be activated unless the
3296 * device autonegotiation advertisement meets standards of
3297 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3298 * This is a function pointer entry point only called by
3299 * PHY setup routines.
3300 **/
e1000_set_d3_lplu_state_ich8lan(struct e1000_hw * hw,bool active)3301 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3302 {
3303 struct e1000_phy_info *phy = &hw->phy;
3304 u32 phy_ctrl;
3305 s32 ret_val = E1000_SUCCESS;
3306 u16 data;
3307
3308 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3309
3310 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3311
3312 if (!active) {
3313 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3314 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3315
3316 if (phy->type != e1000_phy_igp_3)
3317 return E1000_SUCCESS;
3318
3319 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3320 * during Dx states where the power conservation is most
3321 * important. During driver activity we should enable
3322 * SmartSpeed, so performance is maintained.
3323 */
3324 if (phy->smart_speed == e1000_smart_speed_on) {
3325 ret_val = phy->ops.read_reg(hw,
3326 IGP01E1000_PHY_PORT_CONFIG,
3327 &data);
3328 if (ret_val)
3329 return ret_val;
3330
3331 data |= IGP01E1000_PSCFR_SMART_SPEED;
3332 ret_val = phy->ops.write_reg(hw,
3333 IGP01E1000_PHY_PORT_CONFIG,
3334 data);
3335 if (ret_val)
3336 return ret_val;
3337 } else if (phy->smart_speed == e1000_smart_speed_off) {
3338 ret_val = phy->ops.read_reg(hw,
3339 IGP01E1000_PHY_PORT_CONFIG,
3340 &data);
3341 if (ret_val)
3342 return ret_val;
3343
3344 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3345 ret_val = phy->ops.write_reg(hw,
3346 IGP01E1000_PHY_PORT_CONFIG,
3347 data);
3348 if (ret_val)
3349 return ret_val;
3350 }
3351 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3352 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3353 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3354 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3355 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3356
3357 if (phy->type != e1000_phy_igp_3)
3358 return E1000_SUCCESS;
3359
3360 /* Call gig speed drop workaround on LPLU before accessing
3361 * any PHY registers
3362 */
3363 if (hw->mac.type == e1000_ich8lan)
3364 e1000_gig_downshift_workaround_ich8lan(hw);
3365
3366 /* When LPLU is enabled, we should disable SmartSpeed */
3367 ret_val = phy->ops.read_reg(hw,
3368 IGP01E1000_PHY_PORT_CONFIG,
3369 &data);
3370 if (ret_val)
3371 return ret_val;
3372
3373 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3374 ret_val = phy->ops.write_reg(hw,
3375 IGP01E1000_PHY_PORT_CONFIG,
3376 data);
3377 }
3378
3379 return ret_val;
3380 }
3381
3382 /**
3383 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3384 * @hw: pointer to the HW structure
3385 * @bank: pointer to the variable that returns the active bank
3386 *
3387 * Reads signature byte from the NVM using the flash access registers.
3388 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3389 **/
e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw * hw,u32 * bank)3390 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3391 {
3392 u32 eecd;
3393 struct e1000_nvm_info *nvm = &hw->nvm;
3394 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3395 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3396 u32 nvm_dword = 0;
3397 u8 sig_byte = 0;
3398 s32 ret_val;
3399
3400 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3401
3402 switch (hw->mac.type) {
3403 case e1000_pch_spt:
3404 bank1_offset = nvm->flash_bank_size;
3405 act_offset = E1000_ICH_NVM_SIG_WORD;
3406
3407 /* set bank to 0 in case flash read fails */
3408 *bank = 0;
3409
3410 /* Check bank 0 */
3411 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3412 &nvm_dword);
3413 if (ret_val)
3414 return ret_val;
3415 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3416 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3417 E1000_ICH_NVM_SIG_VALUE) {
3418 *bank = 0;
3419 return E1000_SUCCESS;
3420 }
3421
3422 /* Check bank 1 */
3423 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3424 bank1_offset,
3425 &nvm_dword);
3426 if (ret_val)
3427 return ret_val;
3428 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3429 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3430 E1000_ICH_NVM_SIG_VALUE) {
3431 *bank = 1;
3432 return E1000_SUCCESS;
3433 }
3434
3435 DEBUGOUT("ERROR: No valid NVM bank present\n");
3436 return -E1000_ERR_NVM;
3437 case e1000_ich8lan:
3438 case e1000_ich9lan:
3439 eecd = E1000_READ_REG(hw, E1000_EECD);
3440 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3441 E1000_EECD_SEC1VAL_VALID_MASK) {
3442 if (eecd & E1000_EECD_SEC1VAL)
3443 *bank = 1;
3444 else
3445 *bank = 0;
3446
3447 return E1000_SUCCESS;
3448 }
3449 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3450 /* fall-thru */
3451 default:
3452 /* set bank to 0 in case flash read fails */
3453 *bank = 0;
3454
3455 /* Check bank 0 */
3456 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3457 &sig_byte);
3458 if (ret_val)
3459 return ret_val;
3460 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3461 E1000_ICH_NVM_SIG_VALUE) {
3462 *bank = 0;
3463 return E1000_SUCCESS;
3464 }
3465
3466 /* Check bank 1 */
3467 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3468 bank1_offset,
3469 &sig_byte);
3470 if (ret_val)
3471 return ret_val;
3472 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3473 E1000_ICH_NVM_SIG_VALUE) {
3474 *bank = 1;
3475 return E1000_SUCCESS;
3476 }
3477
3478 DEBUGOUT("ERROR: No valid NVM bank present\n");
3479 return -E1000_ERR_NVM;
3480 }
3481 }
3482
3483 /**
3484 * e1000_read_nvm_spt - NVM access for SPT
3485 * @hw: pointer to the HW structure
3486 * @offset: The offset (in bytes) of the word(s) to read.
3487 * @words: Size of data to read in words.
3488 * @data: pointer to the word(s) to read at offset.
3489 *
3490 * Reads a word(s) from the NVM
3491 **/
e1000_read_nvm_spt(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)3492 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3493 u16 *data)
3494 {
3495 struct e1000_nvm_info *nvm = &hw->nvm;
3496 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3497 u32 act_offset;
3498 s32 ret_val = E1000_SUCCESS;
3499 u32 bank = 0;
3500 u32 dword = 0;
3501 u16 offset_to_read;
3502 u16 i;
3503
3504 DEBUGFUNC("e1000_read_nvm_spt");
3505
3506 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3507 (words == 0)) {
3508 DEBUGOUT("nvm parameter(s) out of bounds\n");
3509 ret_val = -E1000_ERR_NVM;
3510 goto out;
3511 }
3512
3513 nvm->ops.acquire(hw);
3514
3515 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3516 if (ret_val != E1000_SUCCESS) {
3517 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3518 bank = 0;
3519 }
3520
3521 act_offset = (bank) ? nvm->flash_bank_size : 0;
3522 act_offset += offset;
3523
3524 ret_val = E1000_SUCCESS;
3525
3526 for (i = 0; i < words; i += 2) {
3527 if (words - i == 1) {
3528 if (dev_spec->shadow_ram[offset+i].modified) {
3529 data[i] = dev_spec->shadow_ram[offset+i].value;
3530 } else {
3531 offset_to_read = act_offset + i -
3532 ((act_offset + i) % 2);
3533 ret_val =
3534 e1000_read_flash_dword_ich8lan(hw,
3535 offset_to_read,
3536 &dword);
3537 if (ret_val)
3538 break;
3539 if ((act_offset + i) % 2 == 0)
3540 data[i] = (u16)(dword & 0xFFFF);
3541 else
3542 data[i] = (u16)((dword >> 16) & 0xFFFF);
3543 }
3544 } else {
3545 offset_to_read = act_offset + i;
3546 if (!(dev_spec->shadow_ram[offset+i].modified) ||
3547 !(dev_spec->shadow_ram[offset+i+1].modified)) {
3548 ret_val =
3549 e1000_read_flash_dword_ich8lan(hw,
3550 offset_to_read,
3551 &dword);
3552 if (ret_val)
3553 break;
3554 }
3555 if (dev_spec->shadow_ram[offset+i].modified)
3556 data[i] = dev_spec->shadow_ram[offset+i].value;
3557 else
3558 data[i] = (u16) (dword & 0xFFFF);
3559 if (dev_spec->shadow_ram[offset+i].modified)
3560 data[i+1] =
3561 dev_spec->shadow_ram[offset+i+1].value;
3562 else
3563 data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3564 }
3565 }
3566
3567 nvm->ops.release(hw);
3568
3569 out:
3570 if (ret_val)
3571 DEBUGOUT1("NVM read error: %d\n", ret_val);
3572
3573 return ret_val;
3574 }
3575
3576 /**
3577 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
3578 * @hw: pointer to the HW structure
3579 * @offset: The offset (in bytes) of the word(s) to read.
3580 * @words: Size of data to read in words
3581 * @data: Pointer to the word(s) to read at offset.
3582 *
3583 * Reads a word(s) from the NVM using the flash access registers.
3584 **/
e1000_read_nvm_ich8lan(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)3585 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3586 u16 *data)
3587 {
3588 struct e1000_nvm_info *nvm = &hw->nvm;
3589 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3590 u32 act_offset;
3591 s32 ret_val = E1000_SUCCESS;
3592 u32 bank = 0;
3593 u16 i, word;
3594
3595 DEBUGFUNC("e1000_read_nvm_ich8lan");
3596
3597 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3598 (words == 0)) {
3599 DEBUGOUT("nvm parameter(s) out of bounds\n");
3600 ret_val = -E1000_ERR_NVM;
3601 goto out;
3602 }
3603
3604 nvm->ops.acquire(hw);
3605
3606 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3607 if (ret_val != E1000_SUCCESS) {
3608 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3609 bank = 0;
3610 }
3611
3612 act_offset = (bank) ? nvm->flash_bank_size : 0;
3613 act_offset += offset;
3614
3615 ret_val = E1000_SUCCESS;
3616 for (i = 0; i < words; i++) {
3617 if (dev_spec->shadow_ram[offset+i].modified) {
3618 data[i] = dev_spec->shadow_ram[offset+i].value;
3619 } else {
3620 ret_val = e1000_read_flash_word_ich8lan(hw,
3621 act_offset + i,
3622 &word);
3623 if (ret_val)
3624 break;
3625 data[i] = word;
3626 }
3627 }
3628
3629 nvm->ops.release(hw);
3630
3631 out:
3632 if (ret_val)
3633 DEBUGOUT1("NVM read error: %d\n", ret_val);
3634
3635 return ret_val;
3636 }
3637
3638 /**
3639 * e1000_flash_cycle_init_ich8lan - Initialize flash
3640 * @hw: pointer to the HW structure
3641 *
3642 * This function does initial flash setup so that a new read/write/erase cycle
3643 * can be started.
3644 **/
e1000_flash_cycle_init_ich8lan(struct e1000_hw * hw)3645 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3646 {
3647 union ich8_hws_flash_status hsfsts;
3648 s32 ret_val = -E1000_ERR_NVM;
3649
3650 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3651
3652 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3653
3654 /* Check if the flash descriptor is valid */
3655 if (!hsfsts.hsf_status.fldesvalid) {
3656 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
3657 return -E1000_ERR_NVM;
3658 }
3659
3660 /* Clear FCERR and DAEL in hw status by writing 1 */
3661 hsfsts.hsf_status.flcerr = 1;
3662 hsfsts.hsf_status.dael = 1;
3663 if (hw->mac.type == e1000_pch_spt)
3664 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3665 hsfsts.regval & 0xFFFF);
3666 else
3667 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3668
3669 /* Either we should have a hardware SPI cycle in progress
3670 * bit to check against, in order to start a new cycle or
3671 * FDONE bit should be changed in the hardware so that it
3672 * is 1 after hardware reset, which can then be used as an
3673 * indication whether a cycle is in progress or has been
3674 * completed.
3675 */
3676
3677 if (!hsfsts.hsf_status.flcinprog) {
3678 /* There is no cycle running at present,
3679 * so we can start a cycle.
3680 * Begin by setting Flash Cycle Done.
3681 */
3682 hsfsts.hsf_status.flcdone = 1;
3683 if (hw->mac.type == e1000_pch_spt)
3684 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3685 hsfsts.regval & 0xFFFF);
3686 else
3687 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3688 hsfsts.regval);
3689 ret_val = E1000_SUCCESS;
3690 } else {
3691 s32 i;
3692
3693 /* Otherwise poll for sometime so the current
3694 * cycle has a chance to end before giving up.
3695 */
3696 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3697 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3698 ICH_FLASH_HSFSTS);
3699 if (!hsfsts.hsf_status.flcinprog) {
3700 ret_val = E1000_SUCCESS;
3701 break;
3702 }
3703 usec_delay(1);
3704 }
3705 if (ret_val == E1000_SUCCESS) {
3706 /* Successful in waiting for previous cycle to timeout,
3707 * now set the Flash Cycle Done.
3708 */
3709 hsfsts.hsf_status.flcdone = 1;
3710 if (hw->mac.type == e1000_pch_spt)
3711 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3712 hsfsts.regval & 0xFFFF);
3713 else
3714 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3715 hsfsts.regval);
3716 } else {
3717 DEBUGOUT("Flash controller busy, cannot get access\n");
3718 }
3719 }
3720
3721 return ret_val;
3722 }
3723
3724 /**
3725 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3726 * @hw: pointer to the HW structure
3727 * @timeout: maximum time to wait for completion
3728 *
3729 * This function starts a flash cycle and waits for its completion.
3730 **/
e1000_flash_cycle_ich8lan(struct e1000_hw * hw,u32 timeout)3731 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3732 {
3733 union ich8_hws_flash_ctrl hsflctl;
3734 union ich8_hws_flash_status hsfsts;
3735 u32 i = 0;
3736
3737 DEBUGFUNC("e1000_flash_cycle_ich8lan");
3738
3739 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3740 if (hw->mac.type == e1000_pch_spt)
3741 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3742 else
3743 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3744 hsflctl.hsf_ctrl.flcgo = 1;
3745
3746 if (hw->mac.type == e1000_pch_spt)
3747 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3748 hsflctl.regval << 16);
3749 else
3750 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3751
3752 /* wait till FDONE bit is set to 1 */
3753 do {
3754 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3755 if (hsfsts.hsf_status.flcdone)
3756 break;
3757 usec_delay(1);
3758 } while (i++ < timeout);
3759
3760 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3761 return E1000_SUCCESS;
3762
3763 return -E1000_ERR_NVM;
3764 }
3765
3766 /**
3767 * e1000_read_flash_dword_ich8lan - Read dword from flash
3768 * @hw: pointer to the HW structure
3769 * @offset: offset to data location
3770 * @data: pointer to the location for storing the data
3771 *
3772 * Reads the flash dword at offset into data. Offset is converted
3773 * to bytes before read.
3774 **/
e1000_read_flash_dword_ich8lan(struct e1000_hw * hw,u32 offset,u32 * data)3775 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3776 u32 *data)
3777 {
3778 DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3779
3780 if (!data)
3781 return -E1000_ERR_NVM;
3782
3783 /* Must convert word offset into bytes. */
3784 offset <<= 1;
3785
3786 return e1000_read_flash_data32_ich8lan(hw, offset, data);
3787 }
3788
3789 /**
3790 * e1000_read_flash_word_ich8lan - Read word from flash
3791 * @hw: pointer to the HW structure
3792 * @offset: offset to data location
3793 * @data: pointer to the location for storing the data
3794 *
3795 * Reads the flash word at offset into data. Offset is converted
3796 * to bytes before read.
3797 **/
e1000_read_flash_word_ich8lan(struct e1000_hw * hw,u32 offset,u16 * data)3798 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3799 u16 *data)
3800 {
3801 DEBUGFUNC("e1000_read_flash_word_ich8lan");
3802
3803 if (!data)
3804 return -E1000_ERR_NVM;
3805
3806 /* Must convert offset into bytes. */
3807 offset <<= 1;
3808
3809 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3810 }
3811
3812 /**
3813 * e1000_read_flash_byte_ich8lan - Read byte from flash
3814 * @hw: pointer to the HW structure
3815 * @offset: The offset of the byte to read.
3816 * @data: Pointer to a byte to store the value read.
3817 *
3818 * Reads a single byte from the NVM using the flash access registers.
3819 **/
e1000_read_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 * data)3820 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3821 u8 *data)
3822 {
3823 s32 ret_val;
3824 u16 word = 0;
3825
3826 /* In SPT, only 32 bits access is supported,
3827 * so this function should not be called.
3828 */
3829 if (hw->mac.type == e1000_pch_spt)
3830 return -E1000_ERR_NVM;
3831 else
3832 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3833
3834 if (ret_val)
3835 return ret_val;
3836
3837 *data = (u8)word;
3838
3839 return E1000_SUCCESS;
3840 }
3841
3842 /**
3843 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
3844 * @hw: pointer to the HW structure
3845 * @offset: The offset (in bytes) of the byte or word to read.
3846 * @size: Size of data to read, 1=byte 2=word
3847 * @data: Pointer to the word to store the value read.
3848 *
3849 * Reads a byte or word from the NVM using the flash access registers.
3850 **/
e1000_read_flash_data_ich8lan(struct e1000_hw * hw,u32 offset,u8 size,u16 * data)3851 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3852 u8 size, u16 *data)
3853 {
3854 union ich8_hws_flash_status hsfsts;
3855 union ich8_hws_flash_ctrl hsflctl;
3856 u32 flash_linear_addr;
3857 u32 flash_data = 0;
3858 s32 ret_val = -E1000_ERR_NVM;
3859 u8 count = 0;
3860
3861 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3862
3863 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3864 return -E1000_ERR_NVM;
3865 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3866 hw->nvm.flash_base_addr);
3867
3868 do {
3869 usec_delay(1);
3870 /* Steps */
3871 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3872 if (ret_val != E1000_SUCCESS)
3873 break;
3874 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3875
3876 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3877 hsflctl.hsf_ctrl.fldbcount = size - 1;
3878 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3879 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3880 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3881
3882 ret_val = e1000_flash_cycle_ich8lan(hw,
3883 ICH_FLASH_READ_COMMAND_TIMEOUT);
3884
3885 /* Check if FCERR is set to 1, if set to 1, clear it
3886 * and try the whole sequence a few more times, else
3887 * read in (shift in) the Flash Data0, the order is
3888 * least significant byte first msb to lsb
3889 */
3890 if (ret_val == E1000_SUCCESS) {
3891 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3892 if (size == 1)
3893 *data = (u8)(flash_data & 0x000000FF);
3894 else if (size == 2)
3895 *data = (u16)(flash_data & 0x0000FFFF);
3896 break;
3897 } else {
3898 /* If we've gotten here, then things are probably
3899 * completely hosed, but if the error condition is
3900 * detected, it won't hurt to give it another try...
3901 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3902 */
3903 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3904 ICH_FLASH_HSFSTS);
3905 if (hsfsts.hsf_status.flcerr) {
3906 /* Repeat for some time before giving up. */
3907 continue;
3908 } else if (!hsfsts.hsf_status.flcdone) {
3909 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3910 break;
3911 }
3912 }
3913 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3914
3915 return ret_val;
3916 }
3917
3918 /**
3919 * e1000_read_flash_data32_ich8lan - Read dword from NVM
3920 * @hw: pointer to the HW structure
3921 * @offset: The offset (in bytes) of the dword to read.
3922 * @data: Pointer to the dword to store the value read.
3923 *
3924 * Reads a byte or word from the NVM using the flash access registers.
3925 **/
e1000_read_flash_data32_ich8lan(struct e1000_hw * hw,u32 offset,u32 * data)3926 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3927 u32 *data)
3928 {
3929 union ich8_hws_flash_status hsfsts;
3930 union ich8_hws_flash_ctrl hsflctl;
3931 u32 flash_linear_addr;
3932 s32 ret_val = -E1000_ERR_NVM;
3933 u8 count = 0;
3934
3935 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3936
3937 if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3938 hw->mac.type != e1000_pch_spt)
3939 return -E1000_ERR_NVM;
3940 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3941 hw->nvm.flash_base_addr);
3942
3943 do {
3944 usec_delay(1);
3945 /* Steps */
3946 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3947 if (ret_val != E1000_SUCCESS)
3948 break;
3949 /* In SPT, This register is in Lan memory space, not flash.
3950 * Therefore, only 32 bit access is supported
3951 */
3952 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3953
3954 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3955 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3956 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3957 /* In SPT, This register is in Lan memory space, not flash.
3958 * Therefore, only 32 bit access is supported
3959 */
3960 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3961 (u32)hsflctl.regval << 16);
3962 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3963
3964 ret_val = e1000_flash_cycle_ich8lan(hw,
3965 ICH_FLASH_READ_COMMAND_TIMEOUT);
3966
3967 /* Check if FCERR is set to 1, if set to 1, clear it
3968 * and try the whole sequence a few more times, else
3969 * read in (shift in) the Flash Data0, the order is
3970 * least significant byte first msb to lsb
3971 */
3972 if (ret_val == E1000_SUCCESS) {
3973 *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3974 break;
3975 } else {
3976 /* If we've gotten here, then things are probably
3977 * completely hosed, but if the error condition is
3978 * detected, it won't hurt to give it another try...
3979 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3980 */
3981 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3982 ICH_FLASH_HSFSTS);
3983 if (hsfsts.hsf_status.flcerr) {
3984 /* Repeat for some time before giving up. */
3985 continue;
3986 } else if (!hsfsts.hsf_status.flcdone) {
3987 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3988 break;
3989 }
3990 }
3991 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3992
3993 return ret_val;
3994 }
3995
3996 /**
3997 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
3998 * @hw: pointer to the HW structure
3999 * @offset: The offset (in bytes) of the word(s) to write.
4000 * @words: Size of data to write in words
4001 * @data: Pointer to the word(s) to write at offset.
4002 *
4003 * Writes a byte or word to the NVM using the flash access registers.
4004 **/
e1000_write_nvm_ich8lan(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)4005 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
4006 u16 *data)
4007 {
4008 struct e1000_nvm_info *nvm = &hw->nvm;
4009 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4010 u16 i;
4011
4012 DEBUGFUNC("e1000_write_nvm_ich8lan");
4013
4014 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
4015 (words == 0)) {
4016 DEBUGOUT("nvm parameter(s) out of bounds\n");
4017 return -E1000_ERR_NVM;
4018 }
4019
4020 nvm->ops.acquire(hw);
4021
4022 for (i = 0; i < words; i++) {
4023 dev_spec->shadow_ram[offset+i].modified = TRUE;
4024 dev_spec->shadow_ram[offset+i].value = data[i];
4025 }
4026
4027 nvm->ops.release(hw);
4028
4029 return E1000_SUCCESS;
4030 }
4031
4032 /**
4033 * e1000_update_nvm_checksum_spt - Update the checksum for NVM
4034 * @hw: pointer to the HW structure
4035 *
4036 * The NVM checksum is updated by calling the generic update_nvm_checksum,
4037 * which writes the checksum to the shadow ram. The changes in the shadow
4038 * ram are then committed to the EEPROM by processing each bank at a time
4039 * checking for the modified bit and writing only the pending changes.
4040 * After a successful commit, the shadow ram is cleared and is ready for
4041 * future writes.
4042 **/
e1000_update_nvm_checksum_spt(struct e1000_hw * hw)4043 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4044 {
4045 struct e1000_nvm_info *nvm = &hw->nvm;
4046 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4047 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4048 s32 ret_val;
4049 u32 dword = 0;
4050
4051 DEBUGFUNC("e1000_update_nvm_checksum_spt");
4052
4053 ret_val = e1000_update_nvm_checksum_generic(hw);
4054 if (ret_val)
4055 goto out;
4056
4057 if (nvm->type != e1000_nvm_flash_sw)
4058 goto out;
4059
4060 nvm->ops.acquire(hw);
4061
4062 /* We're writing to the opposite bank so if we're on bank 1,
4063 * write to bank 0 etc. We also need to erase the segment that
4064 * is going to be written
4065 */
4066 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4067 if (ret_val != E1000_SUCCESS) {
4068 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4069 bank = 0;
4070 }
4071
4072 if (bank == 0) {
4073 new_bank_offset = nvm->flash_bank_size;
4074 old_bank_offset = 0;
4075 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4076 if (ret_val)
4077 goto release;
4078 } else {
4079 old_bank_offset = nvm->flash_bank_size;
4080 new_bank_offset = 0;
4081 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4082 if (ret_val)
4083 goto release;
4084 }
4085 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4086 /* Determine whether to write the value stored
4087 * in the other NVM bank or a modified value stored
4088 * in the shadow RAM
4089 */
4090 ret_val = e1000_read_flash_dword_ich8lan(hw,
4091 i + old_bank_offset,
4092 &dword);
4093
4094 if (dev_spec->shadow_ram[i].modified) {
4095 dword &= 0xffff0000;
4096 dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4097 }
4098 if (dev_spec->shadow_ram[i + 1].modified) {
4099 dword &= 0x0000ffff;
4100 dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4101 << 16);
4102 }
4103 if (ret_val)
4104 break;
4105
4106 /* If the word is 0x13, then make sure the signature bits
4107 * (15:14) are 11b until the commit has completed.
4108 * This will allow us to write 10b which indicates the
4109 * signature is valid. We want to do this after the write
4110 * has completed so that we don't mark the segment valid
4111 * while the write is still in progress
4112 */
4113 if (i == E1000_ICH_NVM_SIG_WORD - 1)
4114 dword |= E1000_ICH_NVM_SIG_MASK << 16;
4115
4116 /* Convert offset to bytes. */
4117 act_offset = (i + new_bank_offset) << 1;
4118
4119 usec_delay(100);
4120
4121 /* Write the data to the new bank. Offset in words*/
4122 act_offset = i + new_bank_offset;
4123 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4124 dword);
4125 if (ret_val)
4126 break;
4127 }
4128
4129 /* Don't bother writing the segment valid bits if sector
4130 * programming failed.
4131 */
4132 if (ret_val) {
4133 DEBUGOUT("Flash commit failed.\n");
4134 goto release;
4135 }
4136
4137 /* Finally validate the new segment by setting bit 15:14
4138 * to 10b in word 0x13 , this can be done without an
4139 * erase as well since these bits are 11 to start with
4140 * and we need to change bit 14 to 0b
4141 */
4142 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4143
4144 /*offset in words but we read dword*/
4145 --act_offset;
4146 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4147
4148 if (ret_val)
4149 goto release;
4150
4151 dword &= 0xBFFFFFFF;
4152 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4153
4154 if (ret_val)
4155 goto release;
4156
4157 /* And invalidate the previously valid segment by setting
4158 * its signature word (0x13) high_byte to 0b. This can be
4159 * done without an erase because flash erase sets all bits
4160 * to 1's. We can write 1's to 0's without an erase
4161 */
4162 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4163
4164 /* offset in words but we read dword*/
4165 act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4166 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4167
4168 if (ret_val)
4169 goto release;
4170
4171 dword &= 0x00FFFFFF;
4172 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4173
4174 if (ret_val)
4175 goto release;
4176
4177 /* Great! Everything worked, we can now clear the cached entries. */
4178 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4179 dev_spec->shadow_ram[i].modified = FALSE;
4180 dev_spec->shadow_ram[i].value = 0xFFFF;
4181 }
4182
4183 release:
4184 nvm->ops.release(hw);
4185
4186 /* Reload the EEPROM, or else modifications will not appear
4187 * until after the next adapter reset.
4188 */
4189 if (!ret_val) {
4190 nvm->ops.reload(hw);
4191 msec_delay(10);
4192 }
4193
4194 out:
4195 if (ret_val)
4196 DEBUGOUT1("NVM update error: %d\n", ret_val);
4197
4198 return ret_val;
4199 }
4200
4201 /**
4202 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4203 * @hw: pointer to the HW structure
4204 *
4205 * The NVM checksum is updated by calling the generic update_nvm_checksum,
4206 * which writes the checksum to the shadow ram. The changes in the shadow
4207 * ram are then committed to the EEPROM by processing each bank at a time
4208 * checking for the modified bit and writing only the pending changes.
4209 * After a successful commit, the shadow ram is cleared and is ready for
4210 * future writes.
4211 **/
e1000_update_nvm_checksum_ich8lan(struct e1000_hw * hw)4212 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4213 {
4214 struct e1000_nvm_info *nvm = &hw->nvm;
4215 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4216 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4217 s32 ret_val;
4218 u16 data = 0;
4219
4220 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4221
4222 ret_val = e1000_update_nvm_checksum_generic(hw);
4223 if (ret_val)
4224 goto out;
4225
4226 if (nvm->type != e1000_nvm_flash_sw)
4227 goto out;
4228
4229 nvm->ops.acquire(hw);
4230
4231 /* We're writing to the opposite bank so if we're on bank 1,
4232 * write to bank 0 etc. We also need to erase the segment that
4233 * is going to be written
4234 */
4235 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4236 if (ret_val != E1000_SUCCESS) {
4237 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4238 bank = 0;
4239 }
4240
4241 if (bank == 0) {
4242 new_bank_offset = nvm->flash_bank_size;
4243 old_bank_offset = 0;
4244 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4245 if (ret_val)
4246 goto release;
4247 } else {
4248 old_bank_offset = nvm->flash_bank_size;
4249 new_bank_offset = 0;
4250 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4251 if (ret_val)
4252 goto release;
4253 }
4254 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4255 if (dev_spec->shadow_ram[i].modified) {
4256 data = dev_spec->shadow_ram[i].value;
4257 } else {
4258 ret_val = e1000_read_flash_word_ich8lan(hw, i +
4259 old_bank_offset,
4260 &data);
4261 if (ret_val)
4262 break;
4263 }
4264 /* If the word is 0x13, then make sure the signature bits
4265 * (15:14) are 11b until the commit has completed.
4266 * This will allow us to write 10b which indicates the
4267 * signature is valid. We want to do this after the write
4268 * has completed so that we don't mark the segment valid
4269 * while the write is still in progress
4270 */
4271 if (i == E1000_ICH_NVM_SIG_WORD)
4272 data |= E1000_ICH_NVM_SIG_MASK;
4273
4274 /* Convert offset to bytes. */
4275 act_offset = (i + new_bank_offset) << 1;
4276
4277 usec_delay(100);
4278
4279 /* Write the bytes to the new bank. */
4280 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4281 act_offset,
4282 (u8)data);
4283 if (ret_val)
4284 break;
4285
4286 usec_delay(100);
4287 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4288 act_offset + 1,
4289 (u8)(data >> 8));
4290 if (ret_val)
4291 break;
4292 }
4293
4294 /* Don't bother writing the segment valid bits if sector
4295 * programming failed.
4296 */
4297 if (ret_val) {
4298 DEBUGOUT("Flash commit failed.\n");
4299 goto release;
4300 }
4301
4302 /* Finally validate the new segment by setting bit 15:14
4303 * to 10b in word 0x13 , this can be done without an
4304 * erase as well since these bits are 11 to start with
4305 * and we need to change bit 14 to 0b
4306 */
4307 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4308 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4309 if (ret_val)
4310 goto release;
4311
4312 data &= 0xBFFF;
4313 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4314 (u8)(data >> 8));
4315 if (ret_val)
4316 goto release;
4317
4318 /* And invalidate the previously valid segment by setting
4319 * its signature word (0x13) high_byte to 0b. This can be
4320 * done without an erase because flash erase sets all bits
4321 * to 1's. We can write 1's to 0's without an erase
4322 */
4323 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4324
4325 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4326
4327 if (ret_val)
4328 goto release;
4329
4330 /* Great! Everything worked, we can now clear the cached entries. */
4331 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4332 dev_spec->shadow_ram[i].modified = FALSE;
4333 dev_spec->shadow_ram[i].value = 0xFFFF;
4334 }
4335
4336 release:
4337 nvm->ops.release(hw);
4338
4339 /* Reload the EEPROM, or else modifications will not appear
4340 * until after the next adapter reset.
4341 */
4342 if (!ret_val) {
4343 nvm->ops.reload(hw);
4344 msec_delay(10);
4345 }
4346
4347 out:
4348 if (ret_val)
4349 DEBUGOUT1("NVM update error: %d\n", ret_val);
4350
4351 return ret_val;
4352 }
4353
4354 /**
4355 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4356 * @hw: pointer to the HW structure
4357 *
4358 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4359 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
4360 * calculated, in which case we need to calculate the checksum and set bit 6.
4361 **/
e1000_validate_nvm_checksum_ich8lan(struct e1000_hw * hw)4362 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4363 {
4364 s32 ret_val;
4365 u16 data;
4366 u16 word;
4367 u16 valid_csum_mask;
4368
4369 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4370
4371 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
4372 * the checksum needs to be fixed. This bit is an indication that
4373 * the NVM was prepared by OEM software and did not calculate
4374 * the checksum...a likely scenario.
4375 */
4376 switch (hw->mac.type) {
4377 case e1000_pch_lpt:
4378 case e1000_pch_spt:
4379 word = NVM_COMPAT;
4380 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4381 break;
4382 default:
4383 word = NVM_FUTURE_INIT_WORD1;
4384 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4385 break;
4386 }
4387
4388 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4389 if (ret_val)
4390 return ret_val;
4391
4392 if (!(data & valid_csum_mask)) {
4393 data |= valid_csum_mask;
4394 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4395 if (ret_val)
4396 return ret_val;
4397 ret_val = hw->nvm.ops.update(hw);
4398 if (ret_val)
4399 return ret_val;
4400 }
4401
4402 return e1000_validate_nvm_checksum_generic(hw);
4403 }
4404
4405 /**
4406 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4407 * @hw: pointer to the HW structure
4408 * @offset: The offset (in bytes) of the byte/word to read.
4409 * @size: Size of data to read, 1=byte 2=word
4410 * @data: The byte(s) to write to the NVM.
4411 *
4412 * Writes one/two bytes to the NVM using the flash access registers.
4413 **/
e1000_write_flash_data_ich8lan(struct e1000_hw * hw,u32 offset,u8 size,u16 data)4414 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4415 u8 size, u16 data)
4416 {
4417 union ich8_hws_flash_status hsfsts;
4418 union ich8_hws_flash_ctrl hsflctl;
4419 u32 flash_linear_addr;
4420 u32 flash_data = 0;
4421 s32 ret_val;
4422 u8 count = 0;
4423
4424 DEBUGFUNC("e1000_write_ich8_data");
4425
4426 if (hw->mac.type == e1000_pch_spt) {
4427 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4428 return -E1000_ERR_NVM;
4429 } else {
4430 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4431 return -E1000_ERR_NVM;
4432 }
4433
4434 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4435 hw->nvm.flash_base_addr);
4436
4437 do {
4438 usec_delay(1);
4439 /* Steps */
4440 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4441 if (ret_val != E1000_SUCCESS)
4442 break;
4443 /* In SPT, This register is in Lan memory space, not
4444 * flash. Therefore, only 32 bit access is supported
4445 */
4446 if (hw->mac.type == e1000_pch_spt)
4447 hsflctl.regval =
4448 E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4449 else
4450 hsflctl.regval =
4451 E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4452
4453 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4454 hsflctl.hsf_ctrl.fldbcount = size - 1;
4455 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4456 /* In SPT, This register is in Lan memory space,
4457 * not flash. Therefore, only 32 bit access is
4458 * supported
4459 */
4460 if (hw->mac.type == e1000_pch_spt)
4461 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4462 hsflctl.regval << 16);
4463 else
4464 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4465 hsflctl.regval);
4466
4467 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4468
4469 if (size == 1)
4470 flash_data = (u32)data & 0x00FF;
4471 else
4472 flash_data = (u32)data;
4473
4474 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4475
4476 /* check if FCERR is set to 1 , if set to 1, clear it
4477 * and try the whole sequence a few more times else done
4478 */
4479 ret_val =
4480 e1000_flash_cycle_ich8lan(hw,
4481 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4482 if (ret_val == E1000_SUCCESS)
4483 break;
4484
4485 /* If we're here, then things are most likely
4486 * completely hosed, but if the error condition
4487 * is detected, it won't hurt to give it another
4488 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4489 */
4490 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4491 if (hsfsts.hsf_status.flcerr)
4492 /* Repeat for some time before giving up. */
4493 continue;
4494 if (!hsfsts.hsf_status.flcdone) {
4495 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4496 break;
4497 }
4498 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4499
4500 return ret_val;
4501 }
4502
4503 /**
4504 * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4505 * @hw: pointer to the HW structure
4506 * @offset: The offset (in bytes) of the dwords to read.
4507 * @data: The 4 bytes to write to the NVM.
4508 *
4509 * Writes one/two/four bytes to the NVM using the flash access registers.
4510 **/
e1000_write_flash_data32_ich8lan(struct e1000_hw * hw,u32 offset,u32 data)4511 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4512 u32 data)
4513 {
4514 union ich8_hws_flash_status hsfsts;
4515 union ich8_hws_flash_ctrl hsflctl;
4516 u32 flash_linear_addr;
4517 s32 ret_val;
4518 u8 count = 0;
4519
4520 DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4521
4522 if (hw->mac.type == e1000_pch_spt) {
4523 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4524 return -E1000_ERR_NVM;
4525 }
4526 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4527 hw->nvm.flash_base_addr);
4528 do {
4529 usec_delay(1);
4530 /* Steps */
4531 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4532 if (ret_val != E1000_SUCCESS)
4533 break;
4534
4535 /* In SPT, This register is in Lan memory space, not
4536 * flash. Therefore, only 32 bit access is supported
4537 */
4538 if (hw->mac.type == e1000_pch_spt)
4539 hsflctl.regval = E1000_READ_FLASH_REG(hw,
4540 ICH_FLASH_HSFSTS)
4541 >> 16;
4542 else
4543 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4544 ICH_FLASH_HSFCTL);
4545
4546 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4547 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4548
4549 /* In SPT, This register is in Lan memory space,
4550 * not flash. Therefore, only 32 bit access is
4551 * supported
4552 */
4553 if (hw->mac.type == e1000_pch_spt)
4554 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4555 hsflctl.regval << 16);
4556 else
4557 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4558 hsflctl.regval);
4559
4560 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4561
4562 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4563
4564 /* check if FCERR is set to 1 , if set to 1, clear it
4565 * and try the whole sequence a few more times else done
4566 */
4567 ret_val = e1000_flash_cycle_ich8lan(hw,
4568 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4569
4570 if (ret_val == E1000_SUCCESS)
4571 break;
4572
4573 /* If we're here, then things are most likely
4574 * completely hosed, but if the error condition
4575 * is detected, it won't hurt to give it another
4576 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4577 */
4578 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4579
4580 if (hsfsts.hsf_status.flcerr)
4581 /* Repeat for some time before giving up. */
4582 continue;
4583 if (!hsfsts.hsf_status.flcdone) {
4584 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4585 break;
4586 }
4587 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4588
4589 return ret_val;
4590 }
4591
4592 /**
4593 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4594 * @hw: pointer to the HW structure
4595 * @offset: The index of the byte to read.
4596 * @data: The byte to write to the NVM.
4597 *
4598 * Writes a single byte to the NVM using the flash access registers.
4599 **/
e1000_write_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 data)4600 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4601 u8 data)
4602 {
4603 u16 word = (u16)data;
4604
4605 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4606
4607 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4608 }
4609
4610 /**
4611 * e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4612 * @hw: pointer to the HW structure
4613 * @offset: The offset of the word to write.
4614 * @dword: The dword to write to the NVM.
4615 *
4616 * Writes a single dword to the NVM using the flash access registers.
4617 * Goes through a retry algorithm before giving up.
4618 **/
e1000_retry_write_flash_dword_ich8lan(struct e1000_hw * hw,u32 offset,u32 dword)4619 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4620 u32 offset, u32 dword)
4621 {
4622 s32 ret_val;
4623 u16 program_retries;
4624
4625 DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4626
4627 /* Must convert word offset into bytes. */
4628 offset <<= 1;
4629
4630 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4631
4632 if (!ret_val)
4633 return ret_val;
4634 for (program_retries = 0; program_retries < 100; program_retries++) {
4635 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4636 usec_delay(100);
4637 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4638 if (ret_val == E1000_SUCCESS)
4639 break;
4640 }
4641 if (program_retries == 100)
4642 return -E1000_ERR_NVM;
4643
4644 return E1000_SUCCESS;
4645 }
4646
4647 /**
4648 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4649 * @hw: pointer to the HW structure
4650 * @offset: The offset of the byte to write.
4651 * @byte: The byte to write to the NVM.
4652 *
4653 * Writes a single byte to the NVM using the flash access registers.
4654 * Goes through a retry algorithm before giving up.
4655 **/
e1000_retry_write_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 byte)4656 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4657 u32 offset, u8 byte)
4658 {
4659 s32 ret_val;
4660 u16 program_retries;
4661
4662 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4663
4664 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4665 if (!ret_val)
4666 return ret_val;
4667
4668 for (program_retries = 0; program_retries < 100; program_retries++) {
4669 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4670 usec_delay(100);
4671 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4672 if (ret_val == E1000_SUCCESS)
4673 break;
4674 }
4675 if (program_retries == 100)
4676 return -E1000_ERR_NVM;
4677
4678 return E1000_SUCCESS;
4679 }
4680
4681 /**
4682 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4683 * @hw: pointer to the HW structure
4684 * @bank: 0 for first bank, 1 for second bank, etc.
4685 *
4686 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4687 * bank N is 4096 * N + flash_reg_addr.
4688 **/
e1000_erase_flash_bank_ich8lan(struct e1000_hw * hw,u32 bank)4689 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4690 {
4691 struct e1000_nvm_info *nvm = &hw->nvm;
4692 union ich8_hws_flash_status hsfsts;
4693 union ich8_hws_flash_ctrl hsflctl;
4694 u32 flash_linear_addr;
4695 /* bank size is in 16bit words - adjust to bytes */
4696 u32 flash_bank_size = nvm->flash_bank_size * 2;
4697 s32 ret_val;
4698 s32 count = 0;
4699 s32 j, iteration, sector_size;
4700
4701 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4702
4703 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4704
4705 /* Determine HW Sector size: Read BERASE bits of hw flash status
4706 * register
4707 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4708 * consecutive sectors. The start index for the nth Hw sector
4709 * can be calculated as = bank * 4096 + n * 256
4710 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4711 * The start index for the nth Hw sector can be calculated
4712 * as = bank * 4096
4713 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4714 * (ich9 only, otherwise error condition)
4715 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4716 */
4717 switch (hsfsts.hsf_status.berasesz) {
4718 case 0:
4719 /* Hw sector size 256 */
4720 sector_size = ICH_FLASH_SEG_SIZE_256;
4721 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4722 break;
4723 case 1:
4724 sector_size = ICH_FLASH_SEG_SIZE_4K;
4725 iteration = 1;
4726 break;
4727 case 2:
4728 sector_size = ICH_FLASH_SEG_SIZE_8K;
4729 iteration = 1;
4730 break;
4731 case 3:
4732 sector_size = ICH_FLASH_SEG_SIZE_64K;
4733 iteration = 1;
4734 break;
4735 default:
4736 return -E1000_ERR_NVM;
4737 }
4738
4739 /* Start with the base address, then add the sector offset. */
4740 flash_linear_addr = hw->nvm.flash_base_addr;
4741 flash_linear_addr += (bank) ? flash_bank_size : 0;
4742
4743 for (j = 0; j < iteration; j++) {
4744 do {
4745 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4746
4747 /* Steps */
4748 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4749 if (ret_val)
4750 return ret_val;
4751
4752 /* Write a value 11 (block Erase) in Flash
4753 * Cycle field in hw flash control
4754 */
4755 if (hw->mac.type == e1000_pch_spt)
4756 hsflctl.regval =
4757 E1000_READ_FLASH_REG(hw,
4758 ICH_FLASH_HSFSTS)>>16;
4759 else
4760 hsflctl.regval =
4761 E1000_READ_FLASH_REG16(hw,
4762 ICH_FLASH_HSFCTL);
4763
4764 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4765 if (hw->mac.type == e1000_pch_spt)
4766 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4767 hsflctl.regval << 16);
4768 else
4769 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4770 hsflctl.regval);
4771
4772 /* Write the last 24 bits of an index within the
4773 * block into Flash Linear address field in Flash
4774 * Address.
4775 */
4776 flash_linear_addr += (j * sector_size);
4777 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4778 flash_linear_addr);
4779
4780 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4781 if (ret_val == E1000_SUCCESS)
4782 break;
4783
4784 /* Check if FCERR is set to 1. If 1,
4785 * clear it and try the whole sequence
4786 * a few more times else Done
4787 */
4788 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4789 ICH_FLASH_HSFSTS);
4790 if (hsfsts.hsf_status.flcerr)
4791 /* repeat for some time before giving up */
4792 continue;
4793 else if (!hsfsts.hsf_status.flcdone)
4794 return ret_val;
4795 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4796 }
4797
4798 return E1000_SUCCESS;
4799 }
4800
4801 /**
4802 * e1000_valid_led_default_ich8lan - Set the default LED settings
4803 * @hw: pointer to the HW structure
4804 * @data: Pointer to the LED settings
4805 *
4806 * Reads the LED default settings from the NVM to data. If the NVM LED
4807 * settings is all 0's or F's, set the LED default to a valid LED default
4808 * setting.
4809 **/
e1000_valid_led_default_ich8lan(struct e1000_hw * hw,u16 * data)4810 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4811 {
4812 s32 ret_val;
4813
4814 DEBUGFUNC("e1000_valid_led_default_ich8lan");
4815
4816 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4817 if (ret_val) {
4818 DEBUGOUT("NVM Read Error\n");
4819 return ret_val;
4820 }
4821
4822 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4823 *data = ID_LED_DEFAULT_ICH8LAN;
4824
4825 return E1000_SUCCESS;
4826 }
4827
4828 /**
4829 * e1000_id_led_init_pchlan - store LED configurations
4830 * @hw: pointer to the HW structure
4831 *
4832 * PCH does not control LEDs via the LEDCTL register, rather it uses
4833 * the PHY LED configuration register.
4834 *
4835 * PCH also does not have an "always on" or "always off" mode which
4836 * complicates the ID feature. Instead of using the "on" mode to indicate
4837 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4838 * use "link_up" mode. The LEDs will still ID on request if there is no
4839 * link based on logic in e1000_led_[on|off]_pchlan().
4840 **/
e1000_id_led_init_pchlan(struct e1000_hw * hw)4841 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4842 {
4843 struct e1000_mac_info *mac = &hw->mac;
4844 s32 ret_val;
4845 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4846 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4847 u16 data, i, temp, shift;
4848
4849 DEBUGFUNC("e1000_id_led_init_pchlan");
4850
4851 /* Get default ID LED modes */
4852 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4853 if (ret_val)
4854 return ret_val;
4855
4856 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4857 mac->ledctl_mode1 = mac->ledctl_default;
4858 mac->ledctl_mode2 = mac->ledctl_default;
4859
4860 for (i = 0; i < 4; i++) {
4861 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4862 shift = (i * 5);
4863 switch (temp) {
4864 case ID_LED_ON1_DEF2:
4865 case ID_LED_ON1_ON2:
4866 case ID_LED_ON1_OFF2:
4867 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4868 mac->ledctl_mode1 |= (ledctl_on << shift);
4869 break;
4870 case ID_LED_OFF1_DEF2:
4871 case ID_LED_OFF1_ON2:
4872 case ID_LED_OFF1_OFF2:
4873 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4874 mac->ledctl_mode1 |= (ledctl_off << shift);
4875 break;
4876 default:
4877 /* Do nothing */
4878 break;
4879 }
4880 switch (temp) {
4881 case ID_LED_DEF1_ON2:
4882 case ID_LED_ON1_ON2:
4883 case ID_LED_OFF1_ON2:
4884 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4885 mac->ledctl_mode2 |= (ledctl_on << shift);
4886 break;
4887 case ID_LED_DEF1_OFF2:
4888 case ID_LED_ON1_OFF2:
4889 case ID_LED_OFF1_OFF2:
4890 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4891 mac->ledctl_mode2 |= (ledctl_off << shift);
4892 break;
4893 default:
4894 /* Do nothing */
4895 break;
4896 }
4897 }
4898
4899 return E1000_SUCCESS;
4900 }
4901
4902 /**
4903 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4904 * @hw: pointer to the HW structure
4905 *
4906 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4907 * register, so the the bus width is hard coded.
4908 **/
e1000_get_bus_info_ich8lan(struct e1000_hw * hw)4909 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4910 {
4911 struct e1000_bus_info *bus = &hw->bus;
4912 s32 ret_val;
4913
4914 DEBUGFUNC("e1000_get_bus_info_ich8lan");
4915
4916 ret_val = e1000_get_bus_info_pcie_generic(hw);
4917
4918 /* ICH devices are "PCI Express"-ish. They have
4919 * a configuration space, but do not contain
4920 * PCI Express Capability registers, so bus width
4921 * must be hardcoded.
4922 */
4923 if (bus->width == e1000_bus_width_unknown)
4924 bus->width = e1000_bus_width_pcie_x1;
4925
4926 return ret_val;
4927 }
4928
4929 /**
4930 * e1000_reset_hw_ich8lan - Reset the hardware
4931 * @hw: pointer to the HW structure
4932 *
4933 * Does a full reset of the hardware which includes a reset of the PHY and
4934 * MAC.
4935 **/
e1000_reset_hw_ich8lan(struct e1000_hw * hw)4936 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4937 {
4938 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4939 u16 kum_cfg;
4940 u32 ctrl, reg;
4941 s32 ret_val;
4942
4943 DEBUGFUNC("e1000_reset_hw_ich8lan");
4944
4945 /* Prevent the PCI-E bus from sticking if there is no TLP connection
4946 * on the last TLP read/write transaction when MAC is reset.
4947 */
4948 ret_val = e1000_disable_pcie_master_generic(hw);
4949 if (ret_val)
4950 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4951
4952 DEBUGOUT("Masking off all interrupts\n");
4953 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4954
4955 /* Disable the Transmit and Receive units. Then delay to allow
4956 * any pending transactions to complete before we hit the MAC
4957 * with the global reset.
4958 */
4959 E1000_WRITE_REG(hw, E1000_RCTL, 0);
4960 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4961 E1000_WRITE_FLUSH(hw);
4962
4963 msec_delay(10);
4964
4965 /* Workaround for ICH8 bit corruption issue in FIFO memory */
4966 if (hw->mac.type == e1000_ich8lan) {
4967 /* Set Tx and Rx buffer allocation to 8k apiece. */
4968 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4969 /* Set Packet Buffer Size to 16k. */
4970 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4971 }
4972
4973 if (hw->mac.type == e1000_pchlan) {
4974 /* Save the NVM K1 bit setting*/
4975 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4976 if (ret_val)
4977 return ret_val;
4978
4979 if (kum_cfg & E1000_NVM_K1_ENABLE)
4980 dev_spec->nvm_k1_enabled = TRUE;
4981 else
4982 dev_spec->nvm_k1_enabled = FALSE;
4983 }
4984
4985 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4986
4987 if (!hw->phy.ops.check_reset_block(hw)) {
4988 /* Full-chip reset requires MAC and PHY reset at the same
4989 * time to make sure the interface between MAC and the
4990 * external PHY is reset.
4991 */
4992 ctrl |= E1000_CTRL_PHY_RST;
4993
4994 /* Gate automatic PHY configuration by hardware on
4995 * non-managed 82579
4996 */
4997 if ((hw->mac.type == e1000_pch2lan) &&
4998 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4999 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
5000 }
5001 ret_val = e1000_acquire_swflag_ich8lan(hw);
5002 DEBUGOUT("Issuing a global reset to ich8lan\n");
5003 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
5004 /* cannot issue a flush here because it hangs the hardware */
5005 msec_delay(20);
5006
5007 /* Set Phy Config Counter to 50msec */
5008 if (hw->mac.type == e1000_pch2lan) {
5009 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
5010 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
5011 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
5012 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
5013 }
5014
5015 if (!ret_val)
5016 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
5017
5018 if (ctrl & E1000_CTRL_PHY_RST) {
5019 ret_val = hw->phy.ops.get_cfg_done(hw);
5020 if (ret_val)
5021 return ret_val;
5022
5023 ret_val = e1000_post_phy_reset_ich8lan(hw);
5024 if (ret_val)
5025 return ret_val;
5026 }
5027
5028 /* For PCH, this write will make sure that any noise
5029 * will be detected as a CRC error and be dropped rather than show up
5030 * as a bad packet to the DMA engine.
5031 */
5032 if (hw->mac.type == e1000_pchlan)
5033 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5034
5035 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5036 E1000_READ_REG(hw, E1000_ICR);
5037
5038 reg = E1000_READ_REG(hw, E1000_KABGTXD);
5039 reg |= E1000_KABGTXD_BGSQLBIAS;
5040 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5041
5042 return E1000_SUCCESS;
5043 }
5044
5045 /**
5046 * e1000_init_hw_ich8lan - Initialize the hardware
5047 * @hw: pointer to the HW structure
5048 *
5049 * Prepares the hardware for transmit and receive by doing the following:
5050 * - initialize hardware bits
5051 * - initialize LED identification
5052 * - setup receive address registers
5053 * - setup flow control
5054 * - setup transmit descriptors
5055 * - clear statistics
5056 **/
e1000_init_hw_ich8lan(struct e1000_hw * hw)5057 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5058 {
5059 struct e1000_mac_info *mac = &hw->mac;
5060 u32 ctrl_ext, txdctl, snoop;
5061 s32 ret_val;
5062 u16 i;
5063
5064 DEBUGFUNC("e1000_init_hw_ich8lan");
5065
5066 e1000_initialize_hw_bits_ich8lan(hw);
5067
5068 /* Initialize identification LED */
5069 ret_val = mac->ops.id_led_init(hw);
5070 /* An error is not fatal and we should not stop init due to this */
5071 if (ret_val)
5072 DEBUGOUT("Error initializing identification LED\n");
5073
5074 /* Setup the receive address. */
5075 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5076
5077 /* Zero out the Multicast HASH table */
5078 DEBUGOUT("Zeroing the MTA\n");
5079 for (i = 0; i < mac->mta_reg_count; i++)
5080 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5081
5082 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
5083 * the ME. Disable wakeup by clearing the host wakeup bit.
5084 * Reset the phy after disabling host wakeup to reset the Rx buffer.
5085 */
5086 if (hw->phy.type == e1000_phy_82578) {
5087 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5088 i &= ~BM_WUC_HOST_WU_BIT;
5089 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5090 ret_val = e1000_phy_hw_reset_ich8lan(hw);
5091 if (ret_val)
5092 return ret_val;
5093 }
5094
5095 /* Setup link and flow control */
5096 ret_val = mac->ops.setup_link(hw);
5097
5098 /* Set the transmit descriptor write-back policy for both queues */
5099 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5100 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5101 E1000_TXDCTL_FULL_TX_DESC_WB);
5102 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5103 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5104 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5105 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5106 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5107 E1000_TXDCTL_FULL_TX_DESC_WB);
5108 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5109 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5110 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5111
5112 /* ICH8 has opposite polarity of no_snoop bits.
5113 * By default, we should use snoop behavior.
5114 */
5115 if (mac->type == e1000_ich8lan)
5116 snoop = PCIE_ICH8_SNOOP_ALL;
5117 else
5118 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5119 e1000_set_pcie_no_snoop_generic(hw, snoop);
5120
5121 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5122 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5123 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5124
5125 /* Clear all of the statistics registers (clear on read). It is
5126 * important that we do this after we have tried to establish link
5127 * because the symbol error count will increment wildly if there
5128 * is no link.
5129 */
5130 e1000_clear_hw_cntrs_ich8lan(hw);
5131
5132 return ret_val;
5133 }
5134
5135 /**
5136 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5137 * @hw: pointer to the HW structure
5138 *
5139 * Sets/Clears required hardware bits necessary for correctly setting up the
5140 * hardware for transmit and receive.
5141 **/
e1000_initialize_hw_bits_ich8lan(struct e1000_hw * hw)5142 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5143 {
5144 u32 reg;
5145
5146 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5147
5148 /* Extended Device Control */
5149 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5150 reg |= (1 << 22);
5151 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
5152 if (hw->mac.type >= e1000_pchlan)
5153 reg |= E1000_CTRL_EXT_PHYPDEN;
5154 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5155
5156 /* Transmit Descriptor Control 0 */
5157 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5158 reg |= (1 << 22);
5159 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5160
5161 /* Transmit Descriptor Control 1 */
5162 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5163 reg |= (1 << 22);
5164 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5165
5166 /* Transmit Arbitration Control 0 */
5167 reg = E1000_READ_REG(hw, E1000_TARC(0));
5168 if (hw->mac.type == e1000_ich8lan)
5169 reg |= (1 << 28) | (1 << 29);
5170 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5171 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5172
5173 /* Transmit Arbitration Control 1 */
5174 reg = E1000_READ_REG(hw, E1000_TARC(1));
5175 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5176 reg &= ~(1 << 28);
5177 else
5178 reg |= (1 << 28);
5179 reg |= (1 << 24) | (1 << 26) | (1 << 30);
5180 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5181
5182 /* Device Status */
5183 if (hw->mac.type == e1000_ich8lan) {
5184 reg = E1000_READ_REG(hw, E1000_STATUS);
5185 reg &= ~(1UL << 31);
5186 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5187 }
5188
5189 /* work-around descriptor data corruption issue during nfs v2 udp
5190 * traffic, just disable the nfs filtering capability
5191 */
5192 reg = E1000_READ_REG(hw, E1000_RFCTL);
5193 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5194
5195 /* Disable IPv6 extension header parsing because some malformed
5196 * IPv6 headers can hang the Rx.
5197 */
5198 if (hw->mac.type == e1000_ich8lan)
5199 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5200 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5201
5202 /* Enable ECC on Lynxpoint */
5203 if ((hw->mac.type == e1000_pch_lpt) ||
5204 (hw->mac.type == e1000_pch_spt)) {
5205 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5206 reg |= E1000_PBECCSTS_ECC_ENABLE;
5207 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5208
5209 reg = E1000_READ_REG(hw, E1000_CTRL);
5210 reg |= E1000_CTRL_MEHE;
5211 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5212 }
5213
5214 return;
5215 }
5216
5217 /**
5218 * e1000_setup_link_ich8lan - Setup flow control and link settings
5219 * @hw: pointer to the HW structure
5220 *
5221 * Determines which flow control settings to use, then configures flow
5222 * control. Calls the appropriate media-specific link configuration
5223 * function. Assuming the adapter has a valid link partner, a valid link
5224 * should be established. Assumes the hardware has previously been reset
5225 * and the transmitter and receiver are not enabled.
5226 **/
e1000_setup_link_ich8lan(struct e1000_hw * hw)5227 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5228 {
5229 s32 ret_val;
5230
5231 DEBUGFUNC("e1000_setup_link_ich8lan");
5232
5233 if (hw->phy.ops.check_reset_block(hw))
5234 return E1000_SUCCESS;
5235
5236 /* ICH parts do not have a word in the NVM to determine
5237 * the default flow control setting, so we explicitly
5238 * set it to full.
5239 */
5240 if (hw->fc.requested_mode == e1000_fc_default)
5241 hw->fc.requested_mode = e1000_fc_full;
5242
5243 /* Save off the requested flow control mode for use later. Depending
5244 * on the link partner's capabilities, we may or may not use this mode.
5245 */
5246 hw->fc.current_mode = hw->fc.requested_mode;
5247
5248 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5249 hw->fc.current_mode);
5250
5251 /* Continue to configure the copper link. */
5252 ret_val = hw->mac.ops.setup_physical_interface(hw);
5253 if (ret_val)
5254 return ret_val;
5255
5256 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5257 if ((hw->phy.type == e1000_phy_82578) ||
5258 (hw->phy.type == e1000_phy_82579) ||
5259 (hw->phy.type == e1000_phy_i217) ||
5260 (hw->phy.type == e1000_phy_82577)) {
5261 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5262
5263 ret_val = hw->phy.ops.write_reg(hw,
5264 PHY_REG(BM_PORT_CTRL_PAGE, 27),
5265 hw->fc.pause_time);
5266 if (ret_val)
5267 return ret_val;
5268 }
5269
5270 return e1000_set_fc_watermarks_generic(hw);
5271 }
5272
5273 /**
5274 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5275 * @hw: pointer to the HW structure
5276 *
5277 * Configures the kumeran interface to the PHY to wait the appropriate time
5278 * when polling the PHY, then call the generic setup_copper_link to finish
5279 * configuring the copper link.
5280 **/
e1000_setup_copper_link_ich8lan(struct e1000_hw * hw)5281 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5282 {
5283 u32 ctrl;
5284 s32 ret_val;
5285 u16 reg_data;
5286
5287 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5288
5289 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5290 ctrl |= E1000_CTRL_SLU;
5291 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5292 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5293
5294 /* Set the mac to wait the maximum time between each iteration
5295 * and increase the max iterations when polling the phy;
5296 * this fixes erroneous timeouts at 10Mbps.
5297 */
5298 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5299 0xFFFF);
5300 if (ret_val)
5301 return ret_val;
5302 ret_val = e1000_read_kmrn_reg_generic(hw,
5303 E1000_KMRNCTRLSTA_INBAND_PARAM,
5304 ®_data);
5305 if (ret_val)
5306 return ret_val;
5307 reg_data |= 0x3F;
5308 ret_val = e1000_write_kmrn_reg_generic(hw,
5309 E1000_KMRNCTRLSTA_INBAND_PARAM,
5310 reg_data);
5311 if (ret_val)
5312 return ret_val;
5313
5314 switch (hw->phy.type) {
5315 case e1000_phy_igp_3:
5316 ret_val = e1000_copper_link_setup_igp(hw);
5317 if (ret_val)
5318 return ret_val;
5319 break;
5320 case e1000_phy_bm:
5321 case e1000_phy_82578:
5322 ret_val = e1000_copper_link_setup_m88(hw);
5323 if (ret_val)
5324 return ret_val;
5325 break;
5326 case e1000_phy_82577:
5327 case e1000_phy_82579:
5328 ret_val = e1000_copper_link_setup_82577(hw);
5329 if (ret_val)
5330 return ret_val;
5331 break;
5332 case e1000_phy_ife:
5333 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5334 ®_data);
5335 if (ret_val)
5336 return ret_val;
5337
5338 reg_data &= ~IFE_PMC_AUTO_MDIX;
5339
5340 switch (hw->phy.mdix) {
5341 case 1:
5342 reg_data &= ~IFE_PMC_FORCE_MDIX;
5343 break;
5344 case 2:
5345 reg_data |= IFE_PMC_FORCE_MDIX;
5346 break;
5347 case 0:
5348 default:
5349 reg_data |= IFE_PMC_AUTO_MDIX;
5350 break;
5351 }
5352 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5353 reg_data);
5354 if (ret_val)
5355 return ret_val;
5356 break;
5357 default:
5358 break;
5359 }
5360
5361 return e1000_setup_copper_link_generic(hw);
5362 }
5363
5364 /**
5365 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5366 * @hw: pointer to the HW structure
5367 *
5368 * Calls the PHY specific link setup function and then calls the
5369 * generic setup_copper_link to finish configuring the link for
5370 * Lynxpoint PCH devices
5371 **/
e1000_setup_copper_link_pch_lpt(struct e1000_hw * hw)5372 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5373 {
5374 u32 ctrl;
5375 s32 ret_val;
5376
5377 DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5378
5379 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5380 ctrl |= E1000_CTRL_SLU;
5381 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5382 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5383
5384 ret_val = e1000_copper_link_setup_82577(hw);
5385 if (ret_val)
5386 return ret_val;
5387
5388 return e1000_setup_copper_link_generic(hw);
5389 }
5390
5391 /**
5392 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5393 * @hw: pointer to the HW structure
5394 * @speed: pointer to store current link speed
5395 * @duplex: pointer to store the current link duplex
5396 *
5397 * Calls the generic get_speed_and_duplex to retrieve the current link
5398 * information and then calls the Kumeran lock loss workaround for links at
5399 * gigabit speeds.
5400 **/
e1000_get_link_up_info_ich8lan(struct e1000_hw * hw,u16 * speed,u16 * duplex)5401 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5402 u16 *duplex)
5403 {
5404 s32 ret_val;
5405
5406 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5407
5408 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5409 if (ret_val)
5410 return ret_val;
5411
5412 if ((hw->mac.type == e1000_ich8lan) &&
5413 (hw->phy.type == e1000_phy_igp_3) &&
5414 (*speed == SPEED_1000)) {
5415 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5416 }
5417
5418 return ret_val;
5419 }
5420
5421 /**
5422 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5423 * @hw: pointer to the HW structure
5424 *
5425 * Work-around for 82566 Kumeran PCS lock loss:
5426 * On link status change (i.e. PCI reset, speed change) and link is up and
5427 * speed is gigabit-
5428 * 0) if workaround is optionally disabled do nothing
5429 * 1) wait 1ms for Kumeran link to come up
5430 * 2) check Kumeran Diagnostic register PCS lock loss bit
5431 * 3) if not set the link is locked (all is good), otherwise...
5432 * 4) reset the PHY
5433 * 5) repeat up to 10 times
5434 * Note: this is only called for IGP3 copper when speed is 1gb.
5435 **/
e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw * hw)5436 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5437 {
5438 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5439 u32 phy_ctrl;
5440 s32 ret_val;
5441 u16 i, data;
5442 bool link;
5443
5444 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5445
5446 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5447 return E1000_SUCCESS;
5448
5449 /* Make sure link is up before proceeding. If not just return.
5450 * Attempting this while link is negotiating fouled up link
5451 * stability
5452 */
5453 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5454 if (!link)
5455 return E1000_SUCCESS;
5456
5457 for (i = 0; i < 10; i++) {
5458 /* read once to clear */
5459 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5460 if (ret_val)
5461 return ret_val;
5462 /* and again to get new status */
5463 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5464 if (ret_val)
5465 return ret_val;
5466
5467 /* check for PCS lock */
5468 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5469 return E1000_SUCCESS;
5470
5471 /* Issue PHY reset */
5472 hw->phy.ops.reset(hw);
5473 msec_delay_irq(5);
5474 }
5475 /* Disable GigE link negotiation */
5476 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5477 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5478 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5479 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5480
5481 /* Call gig speed drop workaround on Gig disable before accessing
5482 * any PHY registers
5483 */
5484 e1000_gig_downshift_workaround_ich8lan(hw);
5485
5486 /* unable to acquire PCS lock */
5487 return -E1000_ERR_PHY;
5488 }
5489
5490 /**
5491 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5492 * @hw: pointer to the HW structure
5493 * @state: boolean value used to set the current Kumeran workaround state
5494 *
5495 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
5496 * /disabled - FALSE).
5497 **/
e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw * hw,bool state)5498 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5499 bool state)
5500 {
5501 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5502
5503 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5504
5505 if (hw->mac.type != e1000_ich8lan) {
5506 DEBUGOUT("Workaround applies to ICH8 only.\n");
5507 return;
5508 }
5509
5510 dev_spec->kmrn_lock_loss_workaround_enabled = state;
5511
5512 return;
5513 }
5514
5515 /**
5516 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5517 * @hw: pointer to the HW structure
5518 *
5519 * Workaround for 82566 power-down on D3 entry:
5520 * 1) disable gigabit link
5521 * 2) write VR power-down enable
5522 * 3) read it back
5523 * Continue if successful, else issue LCD reset and repeat
5524 **/
e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw * hw)5525 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5526 {
5527 u32 reg;
5528 u16 data;
5529 u8 retry = 0;
5530
5531 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5532
5533 if (hw->phy.type != e1000_phy_igp_3)
5534 return;
5535
5536 /* Try the workaround twice (if needed) */
5537 do {
5538 /* Disable link */
5539 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5540 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5541 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5542 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5543
5544 /* Call gig speed drop workaround on Gig disable before
5545 * accessing any PHY registers
5546 */
5547 if (hw->mac.type == e1000_ich8lan)
5548 e1000_gig_downshift_workaround_ich8lan(hw);
5549
5550 /* Write VR power-down enable */
5551 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5552 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5553 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5554 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5555
5556 /* Read it back and test */
5557 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5558 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5559 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5560 break;
5561
5562 /* Issue PHY reset and repeat at most one more time */
5563 reg = E1000_READ_REG(hw, E1000_CTRL);
5564 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5565 retry++;
5566 } while (retry);
5567 }
5568
5569 /**
5570 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5571 * @hw: pointer to the HW structure
5572 *
5573 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5574 * LPLU, Gig disable, MDIC PHY reset):
5575 * 1) Set Kumeran Near-end loopback
5576 * 2) Clear Kumeran Near-end loopback
5577 * Should only be called for ICH8[m] devices with any 1G Phy.
5578 **/
e1000_gig_downshift_workaround_ich8lan(struct e1000_hw * hw)5579 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5580 {
5581 s32 ret_val;
5582 u16 reg_data;
5583
5584 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5585
5586 if ((hw->mac.type != e1000_ich8lan) ||
5587 (hw->phy.type == e1000_phy_ife))
5588 return;
5589
5590 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5591 ®_data);
5592 if (ret_val)
5593 return;
5594 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5595 ret_val = e1000_write_kmrn_reg_generic(hw,
5596 E1000_KMRNCTRLSTA_DIAG_OFFSET,
5597 reg_data);
5598 if (ret_val)
5599 return;
5600 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5601 e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5602 reg_data);
5603 }
5604
5605 /**
5606 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5607 * @hw: pointer to the HW structure
5608 *
5609 * During S0 to Sx transition, it is possible the link remains at gig
5610 * instead of negotiating to a lower speed. Before going to Sx, set
5611 * 'Gig Disable' to force link speed negotiation to a lower speed based on
5612 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
5613 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5614 * needs to be written.
5615 * Parts that support (and are linked to a partner which support) EEE in
5616 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5617 * than 10Mbps w/o EEE.
5618 **/
e1000_suspend_workarounds_ich8lan(struct e1000_hw * hw)5619 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5620 {
5621 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5622 u32 phy_ctrl;
5623 s32 ret_val;
5624
5625 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5626
5627 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5628 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5629
5630 if (hw->phy.type == e1000_phy_i217) {
5631 u16 phy_reg, device_id = hw->device_id;
5632
5633 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5634 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5635 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5636 (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5637 (hw->mac.type == e1000_pch_spt)) {
5638 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5639
5640 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5641 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5642 }
5643
5644 ret_val = hw->phy.ops.acquire(hw);
5645 if (ret_val)
5646 goto out;
5647
5648 if (!dev_spec->eee_disable) {
5649 u16 eee_advert;
5650
5651 ret_val =
5652 e1000_read_emi_reg_locked(hw,
5653 I217_EEE_ADVERTISEMENT,
5654 &eee_advert);
5655 if (ret_val)
5656 goto release;
5657
5658 /* Disable LPLU if both link partners support 100BaseT
5659 * EEE and 100Full is advertised on both ends of the
5660 * link, and enable Auto Enable LPI since there will
5661 * be no driver to enable LPI while in Sx.
5662 */
5663 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5664 (dev_spec->eee_lp_ability &
5665 I82579_EEE_100_SUPPORTED) &&
5666 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5667 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5668 E1000_PHY_CTRL_NOND0A_LPLU);
5669
5670 /* Set Auto Enable LPI after link up */
5671 hw->phy.ops.read_reg_locked(hw,
5672 I217_LPI_GPIO_CTRL,
5673 &phy_reg);
5674 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5675 hw->phy.ops.write_reg_locked(hw,
5676 I217_LPI_GPIO_CTRL,
5677 phy_reg);
5678 }
5679 }
5680
5681 /* For i217 Intel Rapid Start Technology support,
5682 * when the system is going into Sx and no manageability engine
5683 * is present, the driver must configure proxy to reset only on
5684 * power good. LPI (Low Power Idle) state must also reset only
5685 * on power good, as well as the MTA (Multicast table array).
5686 * The SMBus release must also be disabled on LCD reset.
5687 */
5688 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5689 E1000_ICH_FWSM_FW_VALID)) {
5690 /* Enable proxy to reset only on power good. */
5691 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5692 &phy_reg);
5693 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5694 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5695 phy_reg);
5696
5697 /* Set bit enable LPI (EEE) to reset only on
5698 * power good.
5699 */
5700 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5701 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5702 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5703
5704 /* Disable the SMB release on LCD reset. */
5705 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5706 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5707 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5708 }
5709
5710 /* Enable MTA to reset for Intel Rapid Start Technology
5711 * Support
5712 */
5713 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5714 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5715 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5716
5717 release:
5718 hw->phy.ops.release(hw);
5719 }
5720 out:
5721 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5722
5723 if (hw->mac.type == e1000_ich8lan)
5724 e1000_gig_downshift_workaround_ich8lan(hw);
5725
5726 if (hw->mac.type >= e1000_pchlan) {
5727 e1000_oem_bits_config_ich8lan(hw, FALSE);
5728
5729 /* Reset PHY to activate OEM bits on 82577/8 */
5730 if (hw->mac.type == e1000_pchlan)
5731 e1000_phy_hw_reset_generic(hw);
5732
5733 ret_val = hw->phy.ops.acquire(hw);
5734 if (ret_val)
5735 return;
5736 e1000_write_smbus_addr(hw);
5737 hw->phy.ops.release(hw);
5738 }
5739
5740 return;
5741 }
5742
5743 /**
5744 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5745 * @hw: pointer to the HW structure
5746 *
5747 * During Sx to S0 transitions on non-managed devices or managed devices
5748 * on which PHY resets are not blocked, if the PHY registers cannot be
5749 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
5750 * the PHY.
5751 * On i217, setup Intel Rapid Start Technology.
5752 **/
e1000_resume_workarounds_pchlan(struct e1000_hw * hw)5753 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5754 {
5755 s32 ret_val;
5756
5757 DEBUGFUNC("e1000_resume_workarounds_pchlan");
5758 if (hw->mac.type < e1000_pch2lan)
5759 return E1000_SUCCESS;
5760
5761 ret_val = e1000_init_phy_workarounds_pchlan(hw);
5762 if (ret_val) {
5763 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5764 return ret_val;
5765 }
5766
5767 /* For i217 Intel Rapid Start Technology support when the system
5768 * is transitioning from Sx and no manageability engine is present
5769 * configure SMBus to restore on reset, disable proxy, and enable
5770 * the reset on MTA (Multicast table array).
5771 */
5772 if (hw->phy.type == e1000_phy_i217) {
5773 u16 phy_reg;
5774
5775 ret_val = hw->phy.ops.acquire(hw);
5776 if (ret_val) {
5777 DEBUGOUT("Failed to setup iRST\n");
5778 return ret_val;
5779 }
5780
5781 /* Clear Auto Enable LPI after link up */
5782 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5783 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5784 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5785
5786 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5787 E1000_ICH_FWSM_FW_VALID)) {
5788 /* Restore clear on SMB if no manageability engine
5789 * is present
5790 */
5791 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5792 &phy_reg);
5793 if (ret_val)
5794 goto release;
5795 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5796 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5797
5798 /* Disable Proxy */
5799 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5800 }
5801 /* Enable reset on MTA */
5802 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5803 &phy_reg);
5804 if (ret_val)
5805 goto release;
5806 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5807 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5808 release:
5809 if (ret_val)
5810 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5811 hw->phy.ops.release(hw);
5812 return ret_val;
5813 }
5814 return E1000_SUCCESS;
5815 }
5816
5817 /**
5818 * e1000_cleanup_led_ich8lan - Restore the default LED operation
5819 * @hw: pointer to the HW structure
5820 *
5821 * Return the LED back to the default configuration.
5822 **/
e1000_cleanup_led_ich8lan(struct e1000_hw * hw)5823 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5824 {
5825 DEBUGFUNC("e1000_cleanup_led_ich8lan");
5826
5827 if (hw->phy.type == e1000_phy_ife)
5828 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5829 0);
5830
5831 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5832 return E1000_SUCCESS;
5833 }
5834
5835 /**
5836 * e1000_led_on_ich8lan - Turn LEDs on
5837 * @hw: pointer to the HW structure
5838 *
5839 * Turn on the LEDs.
5840 **/
e1000_led_on_ich8lan(struct e1000_hw * hw)5841 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5842 {
5843 DEBUGFUNC("e1000_led_on_ich8lan");
5844
5845 if (hw->phy.type == e1000_phy_ife)
5846 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5847 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5848
5849 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5850 return E1000_SUCCESS;
5851 }
5852
5853 /**
5854 * e1000_led_off_ich8lan - Turn LEDs off
5855 * @hw: pointer to the HW structure
5856 *
5857 * Turn off the LEDs.
5858 **/
e1000_led_off_ich8lan(struct e1000_hw * hw)5859 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5860 {
5861 DEBUGFUNC("e1000_led_off_ich8lan");
5862
5863 if (hw->phy.type == e1000_phy_ife)
5864 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5865 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5866
5867 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5868 return E1000_SUCCESS;
5869 }
5870
5871 /**
5872 * e1000_setup_led_pchlan - Configures SW controllable LED
5873 * @hw: pointer to the HW structure
5874 *
5875 * This prepares the SW controllable LED for use.
5876 **/
e1000_setup_led_pchlan(struct e1000_hw * hw)5877 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5878 {
5879 DEBUGFUNC("e1000_setup_led_pchlan");
5880
5881 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5882 (u16)hw->mac.ledctl_mode1);
5883 }
5884
5885 /**
5886 * e1000_cleanup_led_pchlan - Restore the default LED operation
5887 * @hw: pointer to the HW structure
5888 *
5889 * Return the LED back to the default configuration.
5890 **/
e1000_cleanup_led_pchlan(struct e1000_hw * hw)5891 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5892 {
5893 DEBUGFUNC("e1000_cleanup_led_pchlan");
5894
5895 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5896 (u16)hw->mac.ledctl_default);
5897 }
5898
5899 /**
5900 * e1000_led_on_pchlan - Turn LEDs on
5901 * @hw: pointer to the HW structure
5902 *
5903 * Turn on the LEDs.
5904 **/
e1000_led_on_pchlan(struct e1000_hw * hw)5905 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5906 {
5907 u16 data = (u16)hw->mac.ledctl_mode2;
5908 u32 i, led;
5909
5910 DEBUGFUNC("e1000_led_on_pchlan");
5911
5912 /* If no link, then turn LED on by setting the invert bit
5913 * for each LED that's mode is "link_up" in ledctl_mode2.
5914 */
5915 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5916 for (i = 0; i < 3; i++) {
5917 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5918 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5919 E1000_LEDCTL_MODE_LINK_UP)
5920 continue;
5921 if (led & E1000_PHY_LED0_IVRT)
5922 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5923 else
5924 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5925 }
5926 }
5927
5928 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5929 }
5930
5931 /**
5932 * e1000_led_off_pchlan - Turn LEDs off
5933 * @hw: pointer to the HW structure
5934 *
5935 * Turn off the LEDs.
5936 **/
e1000_led_off_pchlan(struct e1000_hw * hw)5937 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5938 {
5939 u16 data = (u16)hw->mac.ledctl_mode1;
5940 u32 i, led;
5941
5942 DEBUGFUNC("e1000_led_off_pchlan");
5943
5944 /* If no link, then turn LED off by clearing the invert bit
5945 * for each LED that's mode is "link_up" in ledctl_mode1.
5946 */
5947 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5948 for (i = 0; i < 3; i++) {
5949 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5950 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5951 E1000_LEDCTL_MODE_LINK_UP)
5952 continue;
5953 if (led & E1000_PHY_LED0_IVRT)
5954 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5955 else
5956 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5957 }
5958 }
5959
5960 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5961 }
5962
5963 /**
5964 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5965 * @hw: pointer to the HW structure
5966 *
5967 * Read appropriate register for the config done bit for completion status
5968 * and configure the PHY through s/w for EEPROM-less parts.
5969 *
5970 * NOTE: some silicon which is EEPROM-less will fail trying to read the
5971 * config done bit, so only an error is logged and continues. If we were
5972 * to return with error, EEPROM-less silicon would not be able to be reset
5973 * or change link.
5974 **/
e1000_get_cfg_done_ich8lan(struct e1000_hw * hw)5975 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5976 {
5977 s32 ret_val = E1000_SUCCESS;
5978 u32 bank = 0;
5979 u32 status;
5980
5981 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5982
5983 e1000_get_cfg_done_generic(hw);
5984
5985 /* Wait for indication from h/w that it has completed basic config */
5986 if (hw->mac.type >= e1000_ich10lan) {
5987 e1000_lan_init_done_ich8lan(hw);
5988 } else {
5989 ret_val = e1000_get_auto_rd_done_generic(hw);
5990 if (ret_val) {
5991 /* When auto config read does not complete, do not
5992 * return with an error. This can happen in situations
5993 * where there is no eeprom and prevents getting link.
5994 */
5995 DEBUGOUT("Auto Read Done did not complete\n");
5996 ret_val = E1000_SUCCESS;
5997 }
5998 }
5999
6000 /* Clear PHY Reset Asserted bit */
6001 status = E1000_READ_REG(hw, E1000_STATUS);
6002 if (status & E1000_STATUS_PHYRA)
6003 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
6004 else
6005 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
6006
6007 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
6008 if (hw->mac.type <= e1000_ich9lan) {
6009 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
6010 (hw->phy.type == e1000_phy_igp_3)) {
6011 e1000_phy_init_script_igp3(hw);
6012 }
6013 } else {
6014 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
6015 /* Maybe we should do a basic PHY config */
6016 DEBUGOUT("EEPROM not present\n");
6017 ret_val = -E1000_ERR_CONFIG;
6018 }
6019 }
6020
6021 return ret_val;
6022 }
6023
6024 /**
6025 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6026 * @hw: pointer to the HW structure
6027 *
6028 * In the case of a PHY power down to save power, or to turn off link during a
6029 * driver unload, or wake on lan is not enabled, remove the link.
6030 **/
e1000_power_down_phy_copper_ich8lan(struct e1000_hw * hw)6031 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6032 {
6033 /* If the management interface is not enabled, then power down */
6034 if (!(hw->mac.ops.check_mng_mode(hw) ||
6035 hw->phy.ops.check_reset_block(hw)))
6036 e1000_power_down_phy_copper(hw);
6037
6038 return;
6039 }
6040
6041 /**
6042 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6043 * @hw: pointer to the HW structure
6044 *
6045 * Clears hardware counters specific to the silicon family and calls
6046 * clear_hw_cntrs_generic to clear all general purpose counters.
6047 **/
e1000_clear_hw_cntrs_ich8lan(struct e1000_hw * hw)6048 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6049 {
6050 u16 phy_data;
6051 s32 ret_val;
6052
6053 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6054
6055 e1000_clear_hw_cntrs_base_generic(hw);
6056
6057 E1000_READ_REG(hw, E1000_ALGNERRC);
6058 E1000_READ_REG(hw, E1000_RXERRC);
6059 E1000_READ_REG(hw, E1000_TNCRS);
6060 E1000_READ_REG(hw, E1000_CEXTERR);
6061 E1000_READ_REG(hw, E1000_TSCTC);
6062 E1000_READ_REG(hw, E1000_TSCTFC);
6063
6064 E1000_READ_REG(hw, E1000_MGTPRC);
6065 E1000_READ_REG(hw, E1000_MGTPDC);
6066 E1000_READ_REG(hw, E1000_MGTPTC);
6067
6068 E1000_READ_REG(hw, E1000_IAC);
6069 E1000_READ_REG(hw, E1000_ICRXOC);
6070
6071 /* Clear PHY statistics registers */
6072 if ((hw->phy.type == e1000_phy_82578) ||
6073 (hw->phy.type == e1000_phy_82579) ||
6074 (hw->phy.type == e1000_phy_i217) ||
6075 (hw->phy.type == e1000_phy_82577)) {
6076 ret_val = hw->phy.ops.acquire(hw);
6077 if (ret_val)
6078 return;
6079 ret_val = hw->phy.ops.set_page(hw,
6080 HV_STATS_PAGE << IGP_PAGE_SHIFT);
6081 if (ret_val)
6082 goto release;
6083 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6084 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6085 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6086 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6087 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6088 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6089 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6090 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6091 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6092 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6093 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6094 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6095 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6096 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6097 release:
6098 hw->phy.ops.release(hw);
6099 }
6100 }
6101
6102