1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 /* 82562G 10/100 Network Connection
36 * 82562G-2 10/100 Network Connection
37 * 82562GT 10/100 Network Connection
38 * 82562GT-2 10/100 Network Connection
39 * 82562V 10/100 Network Connection
40 * 82562V-2 10/100 Network Connection
41 * 82566DC-2 Gigabit Network Connection
42 * 82566DC Gigabit Network Connection
43 * 82566DM-2 Gigabit Network Connection
44 * 82566DM Gigabit Network Connection
45 * 82566MC Gigabit Network Connection
46 * 82566MM Gigabit Network Connection
47 * 82567LM Gigabit Network Connection
48 * 82567LF Gigabit Network Connection
49 * 82567V Gigabit Network Connection
50 * 82567LM-2 Gigabit Network Connection
51 * 82567LF-2 Gigabit Network Connection
52 * 82567V-2 Gigabit Network Connection
53 * 82567LF-3 Gigabit Network Connection
54 * 82567LM-3 Gigabit Network Connection
55 * 82567LM-4 Gigabit Network Connection
56 * 82577LM Gigabit Network Connection
57 * 82577LC Gigabit Network Connection
58 * 82578DM Gigabit Network Connection
59 * 82578DC Gigabit Network Connection
60 * 82579LM Gigabit Network Connection
61 * 82579V Gigabit Network Connection
62 * Ethernet Connection I217-LM
63 * Ethernet Connection I217-V
64 * Ethernet Connection I218-V
65 * Ethernet Connection I218-LM
66 * Ethernet Connection (2) I218-LM
67 * Ethernet Connection (2) I218-V
68 * Ethernet Connection (3) I218-LM
69 * Ethernet Connection (3) I218-V
70 */
71
72 #include "e1000_api.h"
73
74 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
84 u8 *mc_addr_list,
85 u32 mc_addr_count);
86 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
90 bool active);
91 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
92 bool active);
93 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 u16 words, u16 *data);
95 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
96 u16 *data);
97 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98 u16 words, u16 *data);
99 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
103 u16 *data);
104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112 u16 *speed, u16 *duplex);
113 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
115 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
116 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
118 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
120 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126 u32 offset, u8 *data);
127 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
128 u8 size, u16 *data);
129 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
130 u32 *data);
131 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
132 u32 offset, u32 *data);
133 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
134 u32 offset, u32 data);
135 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
136 u32 offset, u32 dword);
137 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
138 u32 offset, u16 *data);
139 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
140 u32 offset, u8 byte);
141 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
142 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
143 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
144 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
145 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
146 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
147 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
148
149 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
150 /* Offset 04h HSFSTS */
151 union ich8_hws_flash_status {
152 struct ich8_hsfsts {
153 u16 flcdone:1; /* bit 0 Flash Cycle Done */
154 u16 flcerr:1; /* bit 1 Flash Cycle Error */
155 u16 dael:1; /* bit 2 Direct Access error Log */
156 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
157 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
158 u16 reserved1:2; /* bit 13:6 Reserved */
159 u16 reserved2:6; /* bit 13:6 Reserved */
160 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
161 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
162 } hsf_status;
163 u16 regval;
164 };
165
166 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
167 /* Offset 06h FLCTL */
168 union ich8_hws_flash_ctrl {
169 struct ich8_hsflctl {
170 u16 flcgo:1; /* 0 Flash Cycle Go */
171 u16 flcycle:2; /* 2:1 Flash Cycle */
172 u16 reserved:5; /* 7:3 Reserved */
173 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
174 u16 flockdn:6; /* 15:10 Reserved */
175 } hsf_ctrl;
176 u16 regval;
177 };
178
179 /* ICH Flash Region Access Permissions */
180 union ich8_hws_flash_regacc {
181 struct ich8_flracc {
182 u32 grra:8; /* 0:7 GbE region Read Access */
183 u32 grwa:8; /* 8:15 GbE region Write Access */
184 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
185 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
186 } hsf_flregacc;
187 u16 regval;
188 };
189
190 /**
191 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
192 * @hw: pointer to the HW structure
193 *
194 * Test access to the PHY registers by reading the PHY ID registers. If
195 * the PHY ID is already known (e.g. resume path) compare it with known ID,
196 * otherwise assume the read PHY ID is correct if it is valid.
197 *
198 * Assumes the sw/fw/hw semaphore is already acquired.
199 **/
e1000_phy_is_accessible_pchlan(struct e1000_hw * hw)200 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
201 {
202 u16 phy_reg = 0;
203 u32 phy_id = 0;
204 s32 ret_val = 0;
205 u16 retry_count;
206 u32 mac_reg = 0;
207
208 for (retry_count = 0; retry_count < 2; retry_count++) {
209 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
210 if (ret_val || (phy_reg == 0xFFFF))
211 continue;
212 phy_id = (u32)(phy_reg << 16);
213
214 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
215 if (ret_val || (phy_reg == 0xFFFF)) {
216 phy_id = 0;
217 continue;
218 }
219 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
220 break;
221 }
222
223 if (hw->phy.id) {
224 if (hw->phy.id == phy_id)
225 goto out;
226 } else if (phy_id) {
227 hw->phy.id = phy_id;
228 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
229 goto out;
230 }
231
232 /* In case the PHY needs to be in mdio slow mode,
233 * set slow mode and try to get the PHY id again.
234 */
235 if (hw->mac.type < e1000_pch_lpt) {
236 hw->phy.ops.release(hw);
237 ret_val = e1000_set_mdio_slow_mode_hv(hw);
238 if (!ret_val)
239 ret_val = e1000_get_phy_id(hw);
240 hw->phy.ops.acquire(hw);
241 }
242
243 if (ret_val)
244 return FALSE;
245 out:
246 if (hw->mac.type >= e1000_pch_lpt) {
247 /* Only unforce SMBus if ME is not active */
248 if (!(E1000_READ_REG(hw, E1000_FWSM) &
249 E1000_ICH_FWSM_FW_VALID)) {
250 /* Unforce SMBus mode in PHY */
251 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
252 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
253 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
254
255 /* Unforce SMBus mode in MAC */
256 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
257 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
258 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
259 }
260 }
261
262 return TRUE;
263 }
264
265 /**
266 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
267 * @hw: pointer to the HW structure
268 *
269 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
270 * used to reset the PHY to a quiescent state when necessary.
271 **/
e1000_toggle_lanphypc_pch_lpt(struct e1000_hw * hw)272 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
273 {
274 u32 mac_reg;
275
276 DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
277
278 /* Set Phy Config Counter to 50msec */
279 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
280 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
281 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
282 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
283
284 /* Toggle LANPHYPC Value bit */
285 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
286 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
287 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
288 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
289 E1000_WRITE_FLUSH(hw);
290 msec_delay(1);
291 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
292 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
293 E1000_WRITE_FLUSH(hw);
294
295 if (hw->mac.type < e1000_pch_lpt) {
296 msec_delay(50);
297 } else {
298 u16 count = 20;
299
300 do {
301 msec_delay(5);
302 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
303 E1000_CTRL_EXT_LPCD) && count--);
304
305 msec_delay(30);
306 }
307 }
308
309 /**
310 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
311 * @hw: pointer to the HW structure
312 *
313 * Workarounds/flow necessary for PHY initialization during driver load
314 * and resume paths.
315 **/
e1000_init_phy_workarounds_pchlan(struct e1000_hw * hw)316 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
317 {
318 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
319 s32 ret_val;
320
321 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
322
323 /* Gate automatic PHY configuration by hardware on managed and
324 * non-managed 82579 and newer adapters.
325 */
326 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
327
328 /* It is not possible to be certain of the current state of ULP
329 * so forcibly disable it.
330 */
331 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
332 e1000_disable_ulp_lpt_lp(hw, TRUE);
333
334 ret_val = hw->phy.ops.acquire(hw);
335 if (ret_val) {
336 DEBUGOUT("Failed to initialize PHY flow\n");
337 goto out;
338 }
339
340 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
341 * inaccessible and resetting the PHY is not blocked, toggle the
342 * LANPHYPC Value bit to force the interconnect to PCIe mode.
343 */
344 switch (hw->mac.type) {
345 case e1000_pch_lpt:
346 case e1000_pch_spt:
347 case e1000_pch_cnp:
348 case e1000_pch_tgp:
349 case e1000_pch_adp:
350 case e1000_pch_mtp:
351 case e1000_pch_lnp:
352 case e1000_pch_rpl:
353 case e1000_pch_arl:
354 case e1000_pch_ptp:
355 case e1000_pch_nvl:
356 if (e1000_phy_is_accessible_pchlan(hw))
357 break;
358
359 /* Before toggling LANPHYPC, see if PHY is accessible by
360 * forcing MAC to SMBus mode first.
361 */
362 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
363 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
364 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
365
366 /* Wait 50 milliseconds for MAC to finish any retries
367 * that it might be trying to perform from previous
368 * attempts to acknowledge any phy read requests.
369 */
370 msec_delay(50);
371
372 /* fall-through */
373 case e1000_pch2lan:
374 if (e1000_phy_is_accessible_pchlan(hw))
375 break;
376
377 /* fall-through */
378 case e1000_pchlan:
379 if ((hw->mac.type == e1000_pchlan) &&
380 (fwsm & E1000_ICH_FWSM_FW_VALID))
381 break;
382
383 if (hw->phy.ops.check_reset_block(hw)) {
384 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
385 ret_val = -E1000_ERR_PHY;
386 break;
387 }
388
389 /* Toggle LANPHYPC Value bit */
390 e1000_toggle_lanphypc_pch_lpt(hw);
391 if (hw->mac.type >= e1000_pch_lpt) {
392 if (e1000_phy_is_accessible_pchlan(hw))
393 break;
394
395 /* Toggling LANPHYPC brings the PHY out of SMBus mode
396 * so ensure that the MAC is also out of SMBus mode
397 */
398 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
399 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
400 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
401
402 if (e1000_phy_is_accessible_pchlan(hw))
403 break;
404
405 ret_val = -E1000_ERR_PHY;
406 }
407 break;
408 default:
409 break;
410 }
411
412 hw->phy.ops.release(hw);
413 if (!ret_val) {
414
415 /* Check to see if able to reset PHY. Print error if not */
416 if (hw->phy.ops.check_reset_block(hw)) {
417 ERROR_REPORT("Reset blocked by ME\n");
418 goto out;
419 }
420
421 /* Reset the PHY before any access to it. Doing so, ensures
422 * that the PHY is in a known good state before we read/write
423 * PHY registers. The generic reset is sufficient here,
424 * because we haven't determined the PHY type yet.
425 */
426 ret_val = e1000_phy_hw_reset_generic(hw);
427 if (ret_val)
428 goto out;
429
430 /* On a successful reset, possibly need to wait for the PHY
431 * to quiesce to an accessible state before returning control
432 * to the calling function. If the PHY does not quiesce, then
433 * return E1000E_BLK_PHY_RESET, as this is the condition that
434 * the PHY is in.
435 */
436 ret_val = hw->phy.ops.check_reset_block(hw);
437 if (ret_val)
438 ERROR_REPORT("ME blocked access to PHY after reset\n");
439 }
440
441 out:
442 /* Ungate automatic PHY configuration on non-managed 82579 */
443 if ((hw->mac.type == e1000_pch2lan) &&
444 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
445 msec_delay(10);
446 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
447 }
448
449 return ret_val;
450 }
451
452 /**
453 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
454 * @hw: pointer to the HW structure
455 *
456 * Initialize family-specific PHY parameters and function pointers.
457 **/
e1000_init_phy_params_pchlan(struct e1000_hw * hw)458 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
459 {
460 struct e1000_phy_info *phy = &hw->phy;
461 s32 ret_val;
462
463 DEBUGFUNC("e1000_init_phy_params_pchlan");
464
465 phy->addr = 1;
466 phy->reset_delay_us = 100;
467
468 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
469 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
470 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
471 phy->ops.set_page = e1000_set_page_igp;
472 phy->ops.read_reg = e1000_read_phy_reg_hv;
473 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
474 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
475 phy->ops.release = e1000_release_swflag_ich8lan;
476 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
477 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
478 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
479 phy->ops.write_reg = e1000_write_phy_reg_hv;
480 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
481 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
482 phy->ops.power_up = e1000_power_up_phy_copper;
483 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
484 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
485
486 phy->id = e1000_phy_unknown;
487
488 ret_val = e1000_init_phy_workarounds_pchlan(hw);
489 if (ret_val)
490 return ret_val;
491
492 if (phy->id == e1000_phy_unknown)
493 switch (hw->mac.type) {
494 default:
495 ret_val = e1000_get_phy_id(hw);
496 if (ret_val)
497 return ret_val;
498 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
499 break;
500 /* fall-through */
501 case e1000_pch2lan:
502 case e1000_pch_lpt:
503 case e1000_pch_spt:
504 case e1000_pch_cnp:
505 case e1000_pch_tgp:
506 case e1000_pch_adp:
507 case e1000_pch_mtp:
508 case e1000_pch_lnp:
509 case e1000_pch_rpl:
510 case e1000_pch_arl:
511 case e1000_pch_ptp:
512 case e1000_pch_nvl:
513 /* In case the PHY needs to be in mdio slow mode,
514 * set slow mode and try to get the PHY id again.
515 */
516 ret_val = e1000_set_mdio_slow_mode_hv(hw);
517 if (ret_val)
518 return ret_val;
519 ret_val = e1000_get_phy_id(hw);
520 if (ret_val)
521 return ret_val;
522 break;
523 }
524 phy->type = e1000_get_phy_type_from_id(phy->id);
525
526 switch (phy->type) {
527 case e1000_phy_82577:
528 case e1000_phy_82579:
529 case e1000_phy_i217:
530 phy->ops.check_polarity = e1000_check_polarity_82577;
531 phy->ops.force_speed_duplex =
532 e1000_phy_force_speed_duplex_82577;
533 phy->ops.get_cable_length = e1000_get_cable_length_82577;
534 phy->ops.get_info = e1000_get_phy_info_82577;
535 phy->ops.commit = e1000_phy_sw_reset_generic;
536 break;
537 case e1000_phy_82578:
538 phy->ops.check_polarity = e1000_check_polarity_m88;
539 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
540 phy->ops.get_cable_length = e1000_get_cable_length_m88;
541 phy->ops.get_info = e1000_get_phy_info_m88;
542 break;
543 default:
544 ret_val = -E1000_ERR_PHY;
545 break;
546 }
547
548 return ret_val;
549 }
550
551 /**
552 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
553 * @hw: pointer to the HW structure
554 *
555 * Initialize family-specific PHY parameters and function pointers.
556 **/
e1000_init_phy_params_ich8lan(struct e1000_hw * hw)557 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
558 {
559 struct e1000_phy_info *phy = &hw->phy;
560 s32 ret_val;
561 u16 i = 0;
562
563 DEBUGFUNC("e1000_init_phy_params_ich8lan");
564
565 phy->addr = 1;
566 phy->reset_delay_us = 100;
567
568 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
569 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
570 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
571 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
572 phy->ops.read_reg = e1000_read_phy_reg_igp;
573 phy->ops.release = e1000_release_swflag_ich8lan;
574 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
575 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
576 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
577 phy->ops.write_reg = e1000_write_phy_reg_igp;
578 phy->ops.power_up = e1000_power_up_phy_copper;
579 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
580
581 /* We may need to do this twice - once for IGP and if that fails,
582 * we'll set BM func pointers and try again
583 */
584 ret_val = e1000_determine_phy_address(hw);
585 if (ret_val) {
586 phy->ops.write_reg = e1000_write_phy_reg_bm;
587 phy->ops.read_reg = e1000_read_phy_reg_bm;
588 ret_val = e1000_determine_phy_address(hw);
589 if (ret_val) {
590 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
591 return ret_val;
592 }
593 }
594
595 phy->id = 0;
596 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
597 (i++ < 100)) {
598 msec_delay(1);
599 ret_val = e1000_get_phy_id(hw);
600 if (ret_val)
601 return ret_val;
602 }
603
604 /* Verify phy id */
605 switch (phy->id) {
606 case IGP03E1000_E_PHY_ID:
607 phy->type = e1000_phy_igp_3;
608 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
609 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
610 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
611 phy->ops.get_info = e1000_get_phy_info_igp;
612 phy->ops.check_polarity = e1000_check_polarity_igp;
613 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
614 break;
615 case IFE_E_PHY_ID:
616 case IFE_PLUS_E_PHY_ID:
617 case IFE_C_E_PHY_ID:
618 phy->type = e1000_phy_ife;
619 phy->autoneg_mask = E1000_ALL_NOT_GIG;
620 phy->ops.get_info = e1000_get_phy_info_ife;
621 phy->ops.check_polarity = e1000_check_polarity_ife;
622 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
623 break;
624 case BME1000_E_PHY_ID:
625 phy->type = e1000_phy_bm;
626 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
627 phy->ops.read_reg = e1000_read_phy_reg_bm;
628 phy->ops.write_reg = e1000_write_phy_reg_bm;
629 phy->ops.commit = e1000_phy_sw_reset_generic;
630 phy->ops.get_info = e1000_get_phy_info_m88;
631 phy->ops.check_polarity = e1000_check_polarity_m88;
632 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
633 break;
634 default:
635 return -E1000_ERR_PHY;
636 break;
637 }
638
639 return E1000_SUCCESS;
640 }
641
642 /**
643 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
644 * @hw: pointer to the HW structure
645 *
646 * Initialize family-specific NVM parameters and function
647 * pointers.
648 **/
e1000_init_nvm_params_ich8lan(struct e1000_hw * hw)649 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
650 {
651 struct e1000_nvm_info *nvm = &hw->nvm;
652 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
653 u32 gfpreg, sector_base_addr, sector_end_addr;
654 u16 i;
655 u32 nvm_size;
656
657 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
658
659 nvm->type = e1000_nvm_flash_sw;
660
661 if (hw->mac.type >= e1000_pch_spt) {
662 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
663 * STRAP register. This is because in SPT the GbE Flash region
664 * is no longer accessed through the flash registers. Instead,
665 * the mechanism has changed, and the Flash region access
666 * registers are now implemented in GbE memory space.
667 */
668 nvm->flash_base_addr = 0;
669 nvm_size =
670 (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
671 * NVM_SIZE_MULTIPLIER;
672 nvm->flash_bank_size = nvm_size / 2;
673 /* Adjust to word count */
674 nvm->flash_bank_size /= sizeof(u16);
675 /* Set the base address for flash register access */
676 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
677 } else {
678 /* Can't read flash registers if register set isn't mapped. */
679 if (!hw->flash_address) {
680 DEBUGOUT("ERROR: Flash registers not mapped\n");
681 return -E1000_ERR_CONFIG;
682 }
683
684 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
685
686 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
687 * Add 1 to sector_end_addr since this sector is included in
688 * the overall size.
689 */
690 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
691 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
692
693 /* flash_base_addr is byte-aligned */
694 nvm->flash_base_addr = sector_base_addr
695 << FLASH_SECTOR_ADDR_SHIFT;
696
697 /* find total size of the NVM, then cut in half since the total
698 * size represents two separate NVM banks.
699 */
700 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
701 << FLASH_SECTOR_ADDR_SHIFT);
702 nvm->flash_bank_size /= 2;
703 /* Adjust to word count */
704 nvm->flash_bank_size /= sizeof(u16);
705 }
706
707 nvm->word_size = E1000_SHADOW_RAM_WORDS;
708
709 /* Clear shadow ram */
710 for (i = 0; i < nvm->word_size; i++) {
711 dev_spec->shadow_ram[i].modified = FALSE;
712 dev_spec->shadow_ram[i].value = 0xFFFF;
713 }
714
715 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
716 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
717
718 /* Function Pointers */
719 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
720 nvm->ops.release = e1000_release_nvm_ich8lan;
721 if (hw->mac.type >= e1000_pch_spt) {
722 nvm->ops.read = e1000_read_nvm_spt;
723 nvm->ops.update = e1000_update_nvm_checksum_spt;
724 } else {
725 nvm->ops.read = e1000_read_nvm_ich8lan;
726 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
727 }
728 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
729 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
730 nvm->ops.write = e1000_write_nvm_ich8lan;
731
732 return E1000_SUCCESS;
733 }
734
735 /**
736 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
737 * @hw: pointer to the HW structure
738 *
739 * Initialize family-specific MAC parameters and function
740 * pointers.
741 **/
e1000_init_mac_params_ich8lan(struct e1000_hw * hw)742 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
743 {
744 struct e1000_mac_info *mac = &hw->mac;
745
746 DEBUGFUNC("e1000_init_mac_params_ich8lan");
747
748 /* Set media type function pointer */
749 hw->phy.media_type = e1000_media_type_copper;
750
751 /* Set mta register count */
752 mac->mta_reg_count = 32;
753 /* Set rar entry count */
754 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
755 if (mac->type == e1000_ich8lan)
756 mac->rar_entry_count--;
757 /* Set if part includes ASF firmware */
758 mac->asf_firmware_present = TRUE;
759 /* FWSM register */
760 mac->has_fwsm = TRUE;
761 /* ARC subsystem not supported */
762 mac->arc_subsystem_valid = FALSE;
763 /* Adaptive IFS supported */
764 mac->adaptive_ifs = TRUE;
765
766 /* Function pointers */
767
768 /* bus type/speed/width */
769 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
770 /* function id */
771 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
772 /* reset */
773 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
774 /* hw initialization */
775 mac->ops.init_hw = e1000_init_hw_ich8lan;
776 /* link setup */
777 mac->ops.setup_link = e1000_setup_link_ich8lan;
778 /* physical interface setup */
779 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
780 /* check for link */
781 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
782 /* link info */
783 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
784 /* multicast address update */
785 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
786 /* clear hardware counters */
787 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
788
789 /* LED and other operations */
790 switch (mac->type) {
791 case e1000_ich8lan:
792 case e1000_ich9lan:
793 case e1000_ich10lan:
794 /* check management mode */
795 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
796 /* ID LED init */
797 mac->ops.id_led_init = e1000_id_led_init_generic;
798 /* blink LED */
799 mac->ops.blink_led = e1000_blink_led_generic;
800 /* setup LED */
801 mac->ops.setup_led = e1000_setup_led_generic;
802 /* cleanup LED */
803 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
804 /* turn on/off LED */
805 mac->ops.led_on = e1000_led_on_ich8lan;
806 mac->ops.led_off = e1000_led_off_ich8lan;
807 break;
808 case e1000_pch2lan:
809 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
810 mac->ops.rar_set = e1000_rar_set_pch2lan;
811 /* fall-through */
812 case e1000_pch_lpt:
813 case e1000_pch_spt:
814 case e1000_pch_cnp:
815 case e1000_pch_tgp:
816 case e1000_pch_adp:
817 case e1000_pch_mtp:
818 case e1000_pch_lnp:
819 case e1000_pch_rpl:
820 case e1000_pch_arl:
821 case e1000_pch_ptp:
822 case e1000_pch_nvl:
823 /* multicast address update for pch2 */
824 mac->ops.update_mc_addr_list =
825 e1000_update_mc_addr_list_pch2lan;
826 /* fall-through */
827 case e1000_pchlan:
828 /* check management mode */
829 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
830 /* ID LED init */
831 mac->ops.id_led_init = e1000_id_led_init_pchlan;
832 /* setup LED */
833 mac->ops.setup_led = e1000_setup_led_pchlan;
834 /* cleanup LED */
835 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
836 /* turn on/off LED */
837 mac->ops.led_on = e1000_led_on_pchlan;
838 mac->ops.led_off = e1000_led_off_pchlan;
839 break;
840 default:
841 break;
842 }
843
844 if (mac->type >= e1000_pch_lpt) {
845 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
846 mac->ops.rar_set = e1000_rar_set_pch_lpt;
847 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
848 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
849 }
850
851 /* Enable PCS Lock-loss workaround for ICH8 */
852 if (mac->type == e1000_ich8lan)
853 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
854
855 return E1000_SUCCESS;
856 }
857
858 /**
859 * __e1000_access_emi_reg_locked - Read/write EMI register
860 * @hw: pointer to the HW structure
861 * @addr: EMI address to program
862 * @data: pointer to value to read/write from/to the EMI address
863 * @read: boolean flag to indicate read or write
864 *
865 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
866 **/
__e1000_access_emi_reg_locked(struct e1000_hw * hw,u16 address,u16 * data,bool read)867 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
868 u16 *data, bool read)
869 {
870 s32 ret_val;
871
872 DEBUGFUNC("__e1000_access_emi_reg_locked");
873
874 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
875 if (ret_val)
876 return ret_val;
877
878 if (read)
879 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
880 data);
881 else
882 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
883 *data);
884
885 return ret_val;
886 }
887
888 /**
889 * e1000_read_emi_reg_locked - Read Extended Management Interface register
890 * @hw: pointer to the HW structure
891 * @addr: EMI address to program
892 * @data: value to be read from the EMI address
893 *
894 * Assumes the SW/FW/HW Semaphore is already acquired.
895 **/
e1000_read_emi_reg_locked(struct e1000_hw * hw,u16 addr,u16 * data)896 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
897 {
898 DEBUGFUNC("e1000_read_emi_reg_locked");
899
900 return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
901 }
902
903 /**
904 * e1000_write_emi_reg_locked - Write Extended Management Interface register
905 * @hw: pointer to the HW structure
906 * @addr: EMI address to program
907 * @data: value to be written to the EMI address
908 *
909 * Assumes the SW/FW/HW Semaphore is already acquired.
910 **/
e1000_write_emi_reg_locked(struct e1000_hw * hw,u16 addr,u16 data)911 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
912 {
913 DEBUGFUNC("e1000_read_emi_reg_locked");
914
915 return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
916 }
917
918 /**
919 * e1000_set_eee_pchlan - Enable/disable EEE support
920 * @hw: pointer to the HW structure
921 *
922 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
923 * the link and the EEE capabilities of the link partner. The LPI Control
924 * register bits will remain set only if/when link is up.
925 *
926 * EEE LPI must not be asserted earlier than one second after link is up.
927 * On 82579, EEE LPI should not be enabled until such time otherwise there
928 * can be link issues with some switches. Other devices can have EEE LPI
929 * enabled immediately upon link up since they have a timer in hardware which
930 * prevents LPI from being asserted too early.
931 **/
e1000_set_eee_pchlan(struct e1000_hw * hw)932 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
933 {
934 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
935 s32 ret_val;
936 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
937
938 DEBUGFUNC("e1000_set_eee_pchlan");
939
940 switch (hw->phy.type) {
941 case e1000_phy_82579:
942 lpa = I82579_EEE_LP_ABILITY;
943 pcs_status = I82579_EEE_PCS_STATUS;
944 adv_addr = I82579_EEE_ADVERTISEMENT;
945 break;
946 case e1000_phy_i217:
947 lpa = I217_EEE_LP_ABILITY;
948 pcs_status = I217_EEE_PCS_STATUS;
949 adv_addr = I217_EEE_ADVERTISEMENT;
950 break;
951 default:
952 return E1000_SUCCESS;
953 }
954
955 ret_val = hw->phy.ops.acquire(hw);
956 if (ret_val)
957 return ret_val;
958
959 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
960 if (ret_val)
961 goto release;
962
963 /* Clear bits that enable EEE in various speeds */
964 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
965
966 /* Enable EEE if not disabled by user */
967 if (!dev_spec->eee_disable) {
968 /* Save off link partner's EEE ability */
969 ret_val = e1000_read_emi_reg_locked(hw, lpa,
970 &dev_spec->eee_lp_ability);
971 if (ret_val)
972 goto release;
973
974 /* Read EEE advertisement */
975 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
976 if (ret_val)
977 goto release;
978
979 /* Enable EEE only for speeds in which the link partner is
980 * EEE capable and for which we advertise EEE.
981 */
982 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
983 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
984
985 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
986 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
987 if (data & NWAY_LPAR_100TX_FD_CAPS)
988 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
989 else
990 /* EEE is not supported in 100Half, so ignore
991 * partner's EEE in 100 ability if full-duplex
992 * is not advertised.
993 */
994 dev_spec->eee_lp_ability &=
995 ~I82579_EEE_100_SUPPORTED;
996 }
997 }
998
999 if (hw->phy.type == e1000_phy_82579) {
1000 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
1001 &data);
1002 if (ret_val)
1003 goto release;
1004
1005 data &= ~I82579_LPI_100_PLL_SHUT;
1006 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
1007 data);
1008 }
1009
1010 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
1011 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
1012 if (ret_val)
1013 goto release;
1014
1015 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
1016 release:
1017 hw->phy.ops.release(hw);
1018
1019 return ret_val;
1020 }
1021
1022 /**
1023 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
1024 * @hw: pointer to the HW structure
1025 * @link: link up bool flag
1026 *
1027 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1028 * preventing further DMA write requests. Workaround the issue by disabling
1029 * the de-assertion of the clock request when in 1Gpbs mode.
1030 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1031 * speeds in order to avoid Tx hangs.
1032 **/
e1000_k1_workaround_lpt_lp(struct e1000_hw * hw,bool link)1033 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1034 {
1035 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1036 u32 status = E1000_READ_REG(hw, E1000_STATUS);
1037 s32 ret_val = E1000_SUCCESS;
1038 u16 reg;
1039
1040 if (link && (status & E1000_STATUS_SPEED_1000)) {
1041 ret_val = hw->phy.ops.acquire(hw);
1042 if (ret_val)
1043 return ret_val;
1044
1045 ret_val =
1046 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1047 ®);
1048 if (ret_val)
1049 goto release;
1050
1051 ret_val =
1052 e1000_write_kmrn_reg_locked(hw,
1053 E1000_KMRNCTRLSTA_K1_CONFIG,
1054 reg &
1055 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1056 if (ret_val)
1057 goto release;
1058
1059 usec_delay(10);
1060
1061 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1062 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1063
1064 ret_val =
1065 e1000_write_kmrn_reg_locked(hw,
1066 E1000_KMRNCTRLSTA_K1_CONFIG,
1067 reg);
1068 release:
1069 hw->phy.ops.release(hw);
1070 } else {
1071 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1072 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1073
1074 if ((hw->phy.revision > 5) || !link ||
1075 ((status & E1000_STATUS_SPEED_100) &&
1076 (status & E1000_STATUS_FD)))
1077 goto update_fextnvm6;
1078
1079 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®);
1080 if (ret_val)
1081 return ret_val;
1082
1083 /* Clear link status transmit timeout */
1084 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1085
1086 if (status & E1000_STATUS_SPEED_100) {
1087 /* Set inband Tx timeout to 5x10us for 100Half */
1088 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1089
1090 /* Do not extend the K1 entry latency for 100Half */
1091 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1092 } else {
1093 /* Set inband Tx timeout to 50x10us for 10Full/Half */
1094 reg |= 50 <<
1095 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1096
1097 /* Extend the K1 entry latency for 10 Mbps */
1098 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1099 }
1100
1101 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1102 if (ret_val)
1103 return ret_val;
1104
1105 update_fextnvm6:
1106 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1107 }
1108
1109 return ret_val;
1110 }
1111
e1000_ltr2ns(u16 ltr)1112 static u64 e1000_ltr2ns(u16 ltr)
1113 {
1114 u32 value, scale;
1115
1116 /* Determine the latency in nsec based on the LTR value & scale */
1117 value = ltr & E1000_LTRV_VALUE_MASK;
1118 scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1119
1120 return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1121 }
1122
1123 /**
1124 * e1000_platform_pm_pch_lpt - Set platform power management values
1125 * @hw: pointer to the HW structure
1126 * @link: bool indicating link status
1127 *
1128 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1129 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1130 * when link is up (which must not exceed the maximum latency supported
1131 * by the platform), otherwise specify there is no LTR requirement.
1132 * Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1133 * latencies in the LTR Extended Capability Structure in the PCIe Extended
1134 * Capability register set, on this device LTR is set by writing the
1135 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1136 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1137 * message to the PMC.
1138 *
1139 * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1140 * high-water mark.
1141 **/
e1000_platform_pm_pch_lpt(struct e1000_hw * hw,bool link)1142 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1143 {
1144 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1145 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1146 u16 lat_enc = 0; /* latency encoded */
1147 s32 obff_hwm = 0;
1148
1149 DEBUGFUNC("e1000_platform_pm_pch_lpt");
1150
1151 if (link) {
1152 u16 speed, duplex, scale = 0;
1153 u16 max_snoop, max_nosnoop;
1154 u16 max_ltr_enc; /* max LTR latency encoded */
1155 s64 lat_ns;
1156 s64 value;
1157 u32 rxa;
1158
1159 if (!hw->mac.max_frame_size) {
1160 DEBUGOUT("max_frame_size not set.\n");
1161 return -E1000_ERR_CONFIG;
1162 }
1163
1164 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1165 if (!speed) {
1166 DEBUGOUT("Speed not set.\n");
1167 return -E1000_ERR_CONFIG;
1168 }
1169
1170 /* Rx Packet Buffer Allocation size (KB) */
1171 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1172
1173 /* Determine the maximum latency tolerated by the device.
1174 *
1175 * Per the PCIe spec, the tolerated latencies are encoded as
1176 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1177 * a 10-bit value (0-1023) to provide a range from 1 ns to
1178 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
1179 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1180 */
1181 lat_ns = ((s64)rxa * 1024 -
1182 (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1183 if (lat_ns < 0)
1184 lat_ns = 0;
1185 else
1186 lat_ns /= speed;
1187 value = lat_ns;
1188
1189 while (value > E1000_LTRV_VALUE_MASK) {
1190 scale++;
1191 value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1192 }
1193 if (scale > E1000_LTRV_SCALE_MAX) {
1194 DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1195 return -E1000_ERR_CONFIG;
1196 }
1197 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1198
1199 /* Determine the maximum latency tolerated by the platform */
1200 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1201 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1202 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1203
1204 if (lat_enc > max_ltr_enc) {
1205 lat_enc = max_ltr_enc;
1206 lat_ns = e1000_ltr2ns(max_ltr_enc);
1207 }
1208
1209 if (lat_ns) {
1210 lat_ns *= speed * 1000;
1211 lat_ns /= 8;
1212 lat_ns /= 1000000000;
1213 obff_hwm = (s32)(rxa - lat_ns);
1214 }
1215 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1216 DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1217 return -E1000_ERR_CONFIG;
1218 }
1219 }
1220
1221 /* Set Snoop and No-Snoop latencies the same */
1222 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1223 E1000_WRITE_REG(hw, E1000_LTRV, reg);
1224
1225 /* Set OBFF high water mark */
1226 reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1227 reg |= obff_hwm;
1228 E1000_WRITE_REG(hw, E1000_SVT, reg);
1229
1230 /* Enable OBFF */
1231 reg = E1000_READ_REG(hw, E1000_SVCR);
1232 reg |= E1000_SVCR_OFF_EN;
1233 /* Always unblock interrupts to the CPU even when the system is
1234 * in OBFF mode. This ensures that small round-robin traffic
1235 * (like ping) does not get dropped or experience long latency.
1236 */
1237 reg |= E1000_SVCR_OFF_MASKINT;
1238 E1000_WRITE_REG(hw, E1000_SVCR, reg);
1239
1240 return E1000_SUCCESS;
1241 }
1242
1243 /**
1244 * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1245 * @hw: pointer to the HW structure
1246 * @itr: interrupt throttling rate
1247 *
1248 * Configure OBFF with the updated interrupt rate.
1249 **/
e1000_set_obff_timer_pch_lpt(struct e1000_hw * hw,u32 itr)1250 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1251 {
1252 u32 svcr;
1253 s32 timer;
1254
1255 DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1256
1257 /* Convert ITR value into microseconds for OBFF timer */
1258 timer = itr & E1000_ITR_MASK;
1259 timer = (timer * E1000_ITR_MULT) / 1000;
1260
1261 if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1262 DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1263 return -E1000_ERR_CONFIG;
1264 }
1265
1266 svcr = E1000_READ_REG(hw, E1000_SVCR);
1267 svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1268 svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1269 E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1270
1271 return E1000_SUCCESS;
1272 }
1273
1274 /**
1275 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1276 * @hw: pointer to the HW structure
1277 * @to_sx: boolean indicating a system power state transition to Sx
1278 *
1279 * When link is down, configure ULP mode to significantly reduce the power
1280 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
1281 * ME firmware to start the ULP configuration. If not on an ME enabled
1282 * system, configure the ULP mode by software.
1283 */
e1000_enable_ulp_lpt_lp(struct e1000_hw * hw,bool to_sx)1284 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1285 {
1286 u32 mac_reg;
1287 s32 ret_val = E1000_SUCCESS;
1288 u16 phy_reg;
1289 u16 oem_reg = 0;
1290
1291 if ((hw->mac.type < e1000_pch_lpt) ||
1292 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1293 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1294 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1295 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1296 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1297 return 0;
1298
1299 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1300 /* Request ME configure ULP mode in the PHY */
1301 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1302 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1303 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1304
1305 goto out;
1306 }
1307
1308 if (!to_sx) {
1309 int i = 0;
1310
1311 /* Poll up to 5 seconds for Cable Disconnected indication */
1312 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1313 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1314 /* Bail if link is re-acquired */
1315 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1316 return -E1000_ERR_PHY;
1317
1318 if (i++ == 100)
1319 break;
1320
1321 msec_delay(50);
1322 }
1323 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1324 (E1000_READ_REG(hw, E1000_FEXT) &
1325 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1326 i * 50);
1327 }
1328
1329 ret_val = hw->phy.ops.acquire(hw);
1330 if (ret_val)
1331 goto out;
1332
1333 /* Force SMBus mode in PHY */
1334 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1335 if (ret_val)
1336 goto release;
1337 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1338 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1339
1340 /* Force SMBus mode in MAC */
1341 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1342 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1343 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1344
1345 /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
1346 * LPLU and disable Gig speed when entering ULP
1347 */
1348 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1349 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1350 &oem_reg);
1351 if (ret_val)
1352 goto release;
1353
1354 phy_reg = oem_reg;
1355 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1356
1357 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1358 phy_reg);
1359
1360 if (ret_val)
1361 goto release;
1362 }
1363
1364 /* Set Inband ULP Exit, Reset to SMBus mode and
1365 * Disable SMBus Release on PERST# in PHY
1366 */
1367 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1368 if (ret_val)
1369 goto release;
1370 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1371 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1372 if (to_sx) {
1373 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1374 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1375 else
1376 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1377
1378 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1379 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1380 } else {
1381 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1382 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1383 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1384 }
1385 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1386
1387 /* Set Disable SMBus Release on PERST# in MAC */
1388 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1389 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1390 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1391
1392 /* Commit ULP changes in PHY by starting auto ULP configuration */
1393 phy_reg |= I218_ULP_CONFIG1_START;
1394 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1395
1396 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1397 to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1398 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1399 oem_reg);
1400 if (ret_val)
1401 goto release;
1402 }
1403
1404 release:
1405 hw->phy.ops.release(hw);
1406 out:
1407 if (ret_val)
1408 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1409 else
1410 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1411
1412 return ret_val;
1413 }
1414
1415 /**
1416 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1417 * @hw: pointer to the HW structure
1418 * @force: boolean indicating whether or not to force disabling ULP
1419 *
1420 * Un-configure ULP mode when link is up, the system is transitioned from
1421 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
1422 * system, poll for an indication from ME that ULP has been un-configured.
1423 * If not on an ME enabled system, un-configure the ULP mode by software.
1424 *
1425 * During nominal operation, this function is called when link is acquired
1426 * to disable ULP mode (force=FALSE); otherwise, for example when unloading
1427 * the driver or during Sx->S0 transitions, this is called with force=TRUE
1428 * to forcibly disable ULP.
1429 */
e1000_disable_ulp_lpt_lp(struct e1000_hw * hw,bool force)1430 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1431 {
1432 s32 ret_val = E1000_SUCCESS;
1433 u32 mac_reg;
1434 u16 phy_reg;
1435 int i = 0;
1436
1437 if ((hw->mac.type < e1000_pch_lpt) ||
1438 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1439 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1440 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1441 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1442 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1443 return 0;
1444
1445 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1446 if (force) {
1447 /* Request ME un-configure ULP mode in the PHY */
1448 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1449 mac_reg &= ~E1000_H2ME_ULP;
1450 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1451 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1452 }
1453
1454 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1455 while (E1000_READ_REG(hw, E1000_FWSM) &
1456 E1000_FWSM_ULP_CFG_DONE) {
1457 if (i++ == 30) {
1458 ret_val = -E1000_ERR_PHY;
1459 goto out;
1460 }
1461
1462 msec_delay(10);
1463 }
1464 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1465
1466 if (force) {
1467 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1468 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1469 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1470 } else {
1471 /* Clear H2ME.ULP after ME ULP configuration */
1472 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1473 mac_reg &= ~E1000_H2ME_ULP;
1474 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1475 }
1476
1477 goto out;
1478 }
1479
1480 ret_val = hw->phy.ops.acquire(hw);
1481 if (ret_val)
1482 goto out;
1483
1484 if (force)
1485 /* Toggle LANPHYPC Value bit */
1486 e1000_toggle_lanphypc_pch_lpt(hw);
1487
1488 /* Unforce SMBus mode in PHY */
1489 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1490 if (ret_val) {
1491 /* The MAC might be in PCIe mode, so temporarily force to
1492 * SMBus mode in order to access the PHY.
1493 */
1494 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1495 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1496 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1497
1498 msec_delay(50);
1499
1500 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1501 &phy_reg);
1502 if (ret_val)
1503 goto release;
1504 }
1505 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1506 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1507
1508 /* Unforce SMBus mode in MAC */
1509 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1510 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1511 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1512
1513 /* When ULP mode was previously entered, K1 was disabled by the
1514 * hardware. Re-Enable K1 in the PHY when exiting ULP.
1515 */
1516 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1517 if (ret_val)
1518 goto release;
1519 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1520 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1521
1522 /* Clear ULP enabled configuration */
1523 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1524 if (ret_val)
1525 goto release;
1526 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1527 I218_ULP_CONFIG1_STICKY_ULP |
1528 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1529 I218_ULP_CONFIG1_WOL_HOST |
1530 I218_ULP_CONFIG1_INBAND_EXIT |
1531 I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1532 I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1533 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1534 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1535
1536 /* Commit ULP changes by starting auto ULP configuration */
1537 phy_reg |= I218_ULP_CONFIG1_START;
1538 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1539
1540 /* Clear Disable SMBus Release on PERST# in MAC */
1541 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1542 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1543 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1544
1545 release:
1546 hw->phy.ops.release(hw);
1547 if (force) {
1548 hw->phy.ops.reset(hw);
1549 msec_delay(50);
1550 }
1551 out:
1552 if (ret_val)
1553 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1554 else
1555 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1556
1557 return ret_val;
1558 }
1559
1560 /**
1561 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1562 * @hw: pointer to the HW structure
1563 *
1564 * Checks to see of the link status of the hardware has changed. If a
1565 * change in link status has been detected, then we read the PHY registers
1566 * to get the current speed/duplex if link exists.
1567 **/
e1000_check_for_copper_link_ich8lan(struct e1000_hw * hw)1568 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1569 {
1570 struct e1000_mac_info *mac = &hw->mac;
1571 s32 ret_val, tipg_reg = 0;
1572 u16 emi_addr, emi_val = 0;
1573 bool link;
1574 u16 phy_reg;
1575
1576 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1577
1578 /* We only want to go out to the PHY registers to see if Auto-Neg
1579 * has completed and/or if our link status has changed. The
1580 * get_link_status flag is set upon receiving a Link Status
1581 * Change or Rx Sequence Error interrupt.
1582 */
1583 if (!mac->get_link_status)
1584 return E1000_SUCCESS;
1585
1586 /* First we want to see if the MII Status Register reports
1587 * link. If so, then we want to get the current speed/duplex
1588 * of the PHY.
1589 */
1590 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1591 if (ret_val)
1592 return ret_val;
1593
1594 if (hw->mac.type == e1000_pchlan) {
1595 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1596 if (ret_val)
1597 return ret_val;
1598 }
1599
1600 /* When connected at 10Mbps half-duplex, some parts are excessively
1601 * aggressive resulting in many collisions. To avoid this, increase
1602 * the IPG and reduce Rx latency in the PHY.
1603 */
1604 if ((hw->mac.type >= e1000_pch2lan) && link) {
1605 u16 speed, duplex;
1606
1607 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1608 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1609 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1610
1611 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1612 tipg_reg |= 0xFF;
1613 /* Reduce Rx latency in analog PHY */
1614 emi_val = 0;
1615 } else if (hw->mac.type >= e1000_pch_spt &&
1616 duplex == FULL_DUPLEX && speed != SPEED_1000) {
1617 tipg_reg |= 0xC;
1618 emi_val = 1;
1619 } else {
1620 /* Roll back the default values */
1621 tipg_reg |= 0x08;
1622 emi_val = 1;
1623 }
1624
1625 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1626
1627 ret_val = hw->phy.ops.acquire(hw);
1628 if (ret_val)
1629 return ret_val;
1630
1631 if (hw->mac.type == e1000_pch2lan)
1632 emi_addr = I82579_RX_CONFIG;
1633 else
1634 emi_addr = I217_RX_CONFIG;
1635 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1636
1637 if (hw->mac.type >= e1000_pch_lpt) {
1638 u16 phy_reg;
1639
1640 hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1641 &phy_reg);
1642 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1643 if (speed == SPEED_100 || speed == SPEED_10)
1644 phy_reg |= 0x3E8;
1645 else
1646 phy_reg |= 0xFA;
1647 hw->phy.ops.write_reg_locked(hw,
1648 I217_PLL_CLOCK_GATE_REG,
1649 phy_reg);
1650
1651 if (speed == SPEED_1000) {
1652 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1653 &phy_reg);
1654
1655 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1656
1657 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1658 phy_reg);
1659 }
1660 }
1661 hw->phy.ops.release(hw);
1662
1663 if (ret_val)
1664 return ret_val;
1665
1666 if (hw->mac.type >= e1000_pch_spt) {
1667 u16 data;
1668 u16 ptr_gap;
1669
1670 if (speed == SPEED_1000) {
1671 ret_val = hw->phy.ops.acquire(hw);
1672 if (ret_val)
1673 return ret_val;
1674
1675 ret_val = hw->phy.ops.read_reg_locked(hw,
1676 PHY_REG(776, 20),
1677 &data);
1678 if (ret_val) {
1679 hw->phy.ops.release(hw);
1680 return ret_val;
1681 }
1682
1683 ptr_gap = (data & (0x3FF << 2)) >> 2;
1684 if (ptr_gap < 0x18) {
1685 data &= ~(0x3FF << 2);
1686 data |= (0x18 << 2);
1687 ret_val =
1688 hw->phy.ops.write_reg_locked(hw,
1689 PHY_REG(776, 20), data);
1690 }
1691 hw->phy.ops.release(hw);
1692 if (ret_val)
1693 return ret_val;
1694 } else {
1695 ret_val = hw->phy.ops.acquire(hw);
1696 if (ret_val)
1697 return ret_val;
1698
1699 ret_val = hw->phy.ops.write_reg_locked(hw,
1700 PHY_REG(776, 20),
1701 0xC023);
1702 hw->phy.ops.release(hw);
1703 if (ret_val)
1704 return ret_val;
1705
1706 }
1707 }
1708 }
1709
1710 /* I217 Packet Loss issue:
1711 * ensure that FEXTNVM4 Beacon Duration is set correctly
1712 * on power up.
1713 * Set the Beacon Duration for I217 to 8 usec
1714 */
1715 if (hw->mac.type >= e1000_pch_lpt) {
1716 u32 mac_reg;
1717
1718 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1719 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1720 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1721 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1722 }
1723
1724 /* Work-around I218 hang issue */
1725 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1726 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1727 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1728 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1729 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1730 if (ret_val)
1731 return ret_val;
1732 }
1733 if (hw->mac.type >= e1000_pch_lpt) {
1734 /* Set platform power management values for
1735 * Latency Tolerance Reporting (LTR)
1736 * Optimized Buffer Flush/Fill (OBFF)
1737 */
1738 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1739 if (ret_val)
1740 return ret_val;
1741 }
1742
1743 /* Clear link partner's EEE ability */
1744 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1745
1746 /* FEXTNVM6 K1-off workaround - for SPT only */
1747 if (hw->mac.type == e1000_pch_spt) {
1748 u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1749 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1750
1751 if ((pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) &&
1752 (hw->dev_spec.ich8lan.disable_k1_off == FALSE))
1753 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1754 else
1755 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1756
1757 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1758 }
1759
1760 if (!link)
1761 return E1000_SUCCESS; /* No link detected */
1762
1763 mac->get_link_status = FALSE;
1764
1765 switch (hw->mac.type) {
1766 case e1000_pch2lan:
1767 ret_val = e1000_k1_workaround_lv(hw);
1768 if (ret_val)
1769 return ret_val;
1770 /* fall-thru */
1771 case e1000_pchlan:
1772 if (hw->phy.type == e1000_phy_82578) {
1773 ret_val = e1000_link_stall_workaround_hv(hw);
1774 if (ret_val)
1775 return ret_val;
1776 }
1777
1778 /* Workaround for PCHx parts in half-duplex:
1779 * Set the number of preambles removed from the packet
1780 * when it is passed from the PHY to the MAC to prevent
1781 * the MAC from misinterpreting the packet type.
1782 */
1783 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1784 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1785
1786 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1787 E1000_STATUS_FD)
1788 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1789
1790 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1791 break;
1792 default:
1793 break;
1794 }
1795
1796 /* Check if there was DownShift, must be checked
1797 * immediately after link-up
1798 */
1799 e1000_check_downshift_generic(hw);
1800
1801 /* Enable/Disable EEE after link up */
1802 if (hw->phy.type > e1000_phy_82579) {
1803 ret_val = e1000_set_eee_pchlan(hw);
1804 if (ret_val)
1805 return ret_val;
1806 }
1807
1808 /* If we are forcing speed/duplex, then we simply return since
1809 * we have already determined whether we have link or not.
1810 */
1811 if (!mac->autoneg)
1812 return -E1000_ERR_CONFIG;
1813
1814 /* Auto-Neg is enabled. Auto Speed Detection takes care
1815 * of MAC speed/duplex configuration. So we only need to
1816 * configure Collision Distance in the MAC.
1817 */
1818 mac->ops.config_collision_dist(hw);
1819
1820 /* Configure Flow Control now that Auto-Neg has completed.
1821 * First, we need to restore the desired flow control
1822 * settings because we may have had to re-autoneg with a
1823 * different link partner.
1824 */
1825 ret_val = e1000_config_fc_after_link_up_generic(hw);
1826 if (ret_val)
1827 DEBUGOUT("Error configuring flow control\n");
1828
1829 return ret_val;
1830 }
1831
1832 /**
1833 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1834 * @hw: pointer to the HW structure
1835 *
1836 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1837 **/
e1000_init_function_pointers_ich8lan(struct e1000_hw * hw)1838 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1839 {
1840 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1841
1842 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1843 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1844 switch (hw->mac.type) {
1845 case e1000_ich8lan:
1846 case e1000_ich9lan:
1847 case e1000_ich10lan:
1848 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1849 break;
1850 case e1000_pchlan:
1851 case e1000_pch2lan:
1852 case e1000_pch_lpt:
1853 case e1000_pch_spt:
1854 case e1000_pch_cnp:
1855 case e1000_pch_tgp:
1856 case e1000_pch_adp:
1857 case e1000_pch_mtp:
1858 case e1000_pch_lnp:
1859 case e1000_pch_rpl:
1860 case e1000_pch_arl:
1861 case e1000_pch_ptp:
1862 case e1000_pch_nvl:
1863 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1864 break;
1865 default:
1866 break;
1867 }
1868 }
1869
1870 /**
1871 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1872 * @hw: pointer to the HW structure
1873 *
1874 * Acquires the mutex for performing NVM operations.
1875 **/
e1000_acquire_nvm_ich8lan(struct e1000_hw * hw)1876 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1877 {
1878 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1879
1880 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1881
1882 return E1000_SUCCESS;
1883 }
1884
1885 /**
1886 * e1000_release_nvm_ich8lan - Release NVM mutex
1887 * @hw: pointer to the HW structure
1888 *
1889 * Releases the mutex used while performing NVM operations.
1890 **/
e1000_release_nvm_ich8lan(struct e1000_hw * hw)1891 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1892 {
1893 DEBUGFUNC("e1000_release_nvm_ich8lan");
1894
1895 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1896
1897 return;
1898 }
1899
1900 /**
1901 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1902 * @hw: pointer to the HW structure
1903 *
1904 * Acquires the software control flag for performing PHY and select
1905 * MAC CSR accesses.
1906 **/
e1000_acquire_swflag_ich8lan(struct e1000_hw * hw)1907 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1908 {
1909 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1910 s32 ret_val = E1000_SUCCESS;
1911
1912 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1913
1914 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1915
1916 while (timeout) {
1917 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1918 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1919 break;
1920
1921 msec_delay_irq(1);
1922 timeout--;
1923 }
1924
1925 if (!timeout) {
1926 DEBUGOUT("SW has already locked the resource.\n");
1927 ret_val = -E1000_ERR_CONFIG;
1928 goto out;
1929 }
1930
1931 timeout = SW_FLAG_TIMEOUT;
1932
1933 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1934 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1935
1936 while (timeout) {
1937 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1938 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1939 break;
1940
1941 msec_delay_irq(1);
1942 timeout--;
1943 }
1944
1945 if (!timeout) {
1946 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1947 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1948 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1949 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1950 ret_val = -E1000_ERR_CONFIG;
1951 goto out;
1952 }
1953
1954 out:
1955 if (ret_val)
1956 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1957
1958 return ret_val;
1959 }
1960
1961 /**
1962 * e1000_release_swflag_ich8lan - Release software control flag
1963 * @hw: pointer to the HW structure
1964 *
1965 * Releases the software control flag for performing PHY and select
1966 * MAC CSR accesses.
1967 **/
e1000_release_swflag_ich8lan(struct e1000_hw * hw)1968 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1969 {
1970 u32 extcnf_ctrl;
1971
1972 DEBUGFUNC("e1000_release_swflag_ich8lan");
1973
1974 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1975
1976 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1977 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1978 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1979 } else {
1980 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1981 }
1982
1983 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1984
1985 return;
1986 }
1987
1988 /**
1989 * e1000_check_mng_mode_ich8lan - Checks management mode
1990 * @hw: pointer to the HW structure
1991 *
1992 * This checks if the adapter has any manageability enabled.
1993 * This is a function pointer entry point only called by read/write
1994 * routines for the PHY and NVM parts.
1995 **/
e1000_check_mng_mode_ich8lan(struct e1000_hw * hw)1996 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1997 {
1998 u32 fwsm;
1999
2000 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
2001
2002 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2003
2004 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
2005 ((fwsm & E1000_FWSM_MODE_MASK) ==
2006 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
2007 }
2008
2009 /**
2010 * e1000_check_mng_mode_pchlan - Checks management mode
2011 * @hw: pointer to the HW structure
2012 *
2013 * This checks if the adapter has iAMT enabled.
2014 * This is a function pointer entry point only called by read/write
2015 * routines for the PHY and NVM parts.
2016 **/
e1000_check_mng_mode_pchlan(struct e1000_hw * hw)2017 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
2018 {
2019 u32 fwsm;
2020
2021 DEBUGFUNC("e1000_check_mng_mode_pchlan");
2022
2023 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2024
2025 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
2026 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
2027 }
2028
2029 /**
2030 * e1000_rar_set_pch2lan - Set receive address register
2031 * @hw: pointer to the HW structure
2032 * @addr: pointer to the receive address
2033 * @index: receive address array register
2034 *
2035 * Sets the receive address array register at index to the address passed
2036 * in by addr. For 82579, RAR[0] is the base address register that is to
2037 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
2038 * Use SHRA[0-3] in place of those reserved for ME.
2039 **/
e1000_rar_set_pch2lan(struct e1000_hw * hw,u8 * addr,u32 index)2040 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
2041 {
2042 u32 rar_low, rar_high;
2043
2044 DEBUGFUNC("e1000_rar_set_pch2lan");
2045
2046 /* HW expects these in little endian so we reverse the byte order
2047 * from network order (big endian) to little endian
2048 */
2049 rar_low = ((u32) addr[0] |
2050 ((u32) addr[1] << 8) |
2051 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2052
2053 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2054
2055 /* If MAC address zero, no need to set the AV bit */
2056 if (rar_low || rar_high)
2057 rar_high |= E1000_RAH_AV;
2058
2059 if (index == 0) {
2060 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2061 E1000_WRITE_FLUSH(hw);
2062 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2063 E1000_WRITE_FLUSH(hw);
2064 return E1000_SUCCESS;
2065 }
2066
2067 /* RAR[1-6] are owned by manageability. Skip those and program the
2068 * next address into the SHRA register array.
2069 */
2070 if (index < (u32) (hw->mac.rar_entry_count)) {
2071 s32 ret_val;
2072
2073 ret_val = e1000_acquire_swflag_ich8lan(hw);
2074 if (ret_val)
2075 goto out;
2076
2077 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2078 E1000_WRITE_FLUSH(hw);
2079 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2080 E1000_WRITE_FLUSH(hw);
2081
2082 e1000_release_swflag_ich8lan(hw);
2083
2084 /* verify the register updates */
2085 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2086 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2087 return E1000_SUCCESS;
2088
2089 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2090 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2091 }
2092
2093 out:
2094 DEBUGOUT1("Failed to write receive address at index %d\n", index);
2095 return -E1000_ERR_CONFIG;
2096 }
2097
2098 /**
2099 * e1000_rar_set_pch_lpt - Set receive address registers
2100 * @hw: pointer to the HW structure
2101 * @addr: pointer to the receive address
2102 * @index: receive address array register
2103 *
2104 * Sets the receive address register array at index to the address passed
2105 * in by addr. For LPT, RAR[0] is the base address register that is to
2106 * contain the MAC address. SHRA[0-10] are the shared receive address
2107 * registers that are shared between the Host and manageability engine (ME).
2108 **/
e1000_rar_set_pch_lpt(struct e1000_hw * hw,u8 * addr,u32 index)2109 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2110 {
2111 u32 rar_low, rar_high;
2112 u32 wlock_mac;
2113
2114 DEBUGFUNC("e1000_rar_set_pch_lpt");
2115
2116 /* HW expects these in little endian so we reverse the byte order
2117 * from network order (big endian) to little endian
2118 */
2119 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2120 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2121
2122 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2123
2124 /* If MAC address zero, no need to set the AV bit */
2125 if (rar_low || rar_high)
2126 rar_high |= E1000_RAH_AV;
2127
2128 if (index == 0) {
2129 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2130 E1000_WRITE_FLUSH(hw);
2131 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2132 E1000_WRITE_FLUSH(hw);
2133 return E1000_SUCCESS;
2134 }
2135
2136 /* The manageability engine (ME) can lock certain SHRAR registers that
2137 * it is using - those registers are unavailable for use.
2138 */
2139 if (index < hw->mac.rar_entry_count) {
2140 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2141 E1000_FWSM_WLOCK_MAC_MASK;
2142 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2143
2144 /* Check if all SHRAR registers are locked */
2145 if (wlock_mac == 1)
2146 goto out;
2147
2148 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2149 s32 ret_val;
2150
2151 ret_val = e1000_acquire_swflag_ich8lan(hw);
2152
2153 if (ret_val)
2154 goto out;
2155
2156 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2157 rar_low);
2158 E1000_WRITE_FLUSH(hw);
2159 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2160 rar_high);
2161 E1000_WRITE_FLUSH(hw);
2162
2163 e1000_release_swflag_ich8lan(hw);
2164
2165 /* verify the register updates */
2166 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2167 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2168 return E1000_SUCCESS;
2169 }
2170 }
2171
2172 out:
2173 DEBUGOUT1("Failed to write receive address at index %d\n", index);
2174 return -E1000_ERR_CONFIG;
2175 }
2176
2177 /**
2178 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2179 * @hw: pointer to the HW structure
2180 * @mc_addr_list: array of multicast addresses to program
2181 * @mc_addr_count: number of multicast addresses to program
2182 *
2183 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2184 * The caller must have a packed mc_addr_list of multicast addresses.
2185 **/
e1000_update_mc_addr_list_pch2lan(struct e1000_hw * hw,u8 * mc_addr_list,u32 mc_addr_count)2186 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2187 u8 *mc_addr_list,
2188 u32 mc_addr_count)
2189 {
2190 u16 phy_reg = 0;
2191 int i;
2192 s32 ret_val;
2193
2194 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2195
2196 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2197
2198 ret_val = hw->phy.ops.acquire(hw);
2199 if (ret_val)
2200 return;
2201
2202 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2203 if (ret_val)
2204 goto release;
2205
2206 for (i = 0; i < hw->mac.mta_reg_count; i++) {
2207 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2208 (u16)(hw->mac.mta_shadow[i] &
2209 0xFFFF));
2210 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2211 (u16)((hw->mac.mta_shadow[i] >> 16) &
2212 0xFFFF));
2213 }
2214
2215 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2216
2217 release:
2218 hw->phy.ops.release(hw);
2219 }
2220
2221 /**
2222 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2223 * @hw: pointer to the HW structure
2224 *
2225 * Checks if firmware is blocking the reset of the PHY.
2226 * This is a function pointer entry point only called by
2227 * reset routines.
2228 **/
e1000_check_reset_block_ich8lan(struct e1000_hw * hw)2229 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2230 {
2231 u32 fwsm;
2232 bool blocked = FALSE;
2233 int i = 0;
2234
2235 DEBUGFUNC("e1000_check_reset_block_ich8lan");
2236
2237 do {
2238 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2239 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2240 blocked = TRUE;
2241 msec_delay(10);
2242 continue;
2243 }
2244 blocked = FALSE;
2245 } while (blocked && (i++ < 30));
2246 return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2247 }
2248
2249 /**
2250 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2251 * @hw: pointer to the HW structure
2252 *
2253 * Assumes semaphore already acquired.
2254 *
2255 **/
e1000_write_smbus_addr(struct e1000_hw * hw)2256 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2257 {
2258 u16 phy_data;
2259 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2260 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2261 E1000_STRAP_SMT_FREQ_SHIFT;
2262 s32 ret_val;
2263
2264 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2265
2266 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2267 if (ret_val)
2268 return ret_val;
2269
2270 phy_data &= ~HV_SMB_ADDR_MASK;
2271 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2272 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2273
2274 if (hw->phy.type == e1000_phy_i217) {
2275 /* Restore SMBus frequency */
2276 if (freq--) {
2277 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2278 phy_data |= (freq & (1 << 0)) <<
2279 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2280 phy_data |= (freq & (1 << 1)) <<
2281 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2282 } else {
2283 DEBUGOUT("Unsupported SMB frequency in PHY\n");
2284 }
2285 }
2286
2287 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2288 }
2289
2290 /**
2291 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2292 * @hw: pointer to the HW structure
2293 *
2294 * SW should configure the LCD from the NVM extended configuration region
2295 * as a workaround for certain parts.
2296 **/
e1000_sw_lcd_config_ich8lan(struct e1000_hw * hw)2297 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2298 {
2299 struct e1000_phy_info *phy = &hw->phy;
2300 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2301 s32 ret_val = E1000_SUCCESS;
2302 u16 word_addr, reg_data, reg_addr, phy_page = 0;
2303
2304 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2305
2306 /* Initialize the PHY from the NVM on ICH platforms. This
2307 * is needed due to an issue where the NVM configuration is
2308 * not properly autoloaded after power transitions.
2309 * Therefore, after each PHY reset, we will load the
2310 * configuration data out of the NVM manually.
2311 */
2312 switch (hw->mac.type) {
2313 case e1000_ich8lan:
2314 if (phy->type != e1000_phy_igp_3)
2315 return ret_val;
2316
2317 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2318 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2319 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2320 break;
2321 }
2322 /* Fall-thru */
2323 case e1000_pchlan:
2324 case e1000_pch2lan:
2325 case e1000_pch_lpt:
2326 case e1000_pch_spt:
2327 case e1000_pch_cnp:
2328 case e1000_pch_tgp:
2329 case e1000_pch_adp:
2330 case e1000_pch_mtp:
2331 case e1000_pch_lnp:
2332 case e1000_pch_rpl:
2333 case e1000_pch_arl:
2334 case e1000_pch_ptp:
2335 case e1000_pch_nvl:
2336 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2337 break;
2338 default:
2339 return ret_val;
2340 }
2341
2342 ret_val = hw->phy.ops.acquire(hw);
2343 if (ret_val)
2344 return ret_val;
2345
2346 data = E1000_READ_REG(hw, E1000_FEXTNVM);
2347 if (!(data & sw_cfg_mask))
2348 goto release;
2349
2350 /* Make sure HW does not configure LCD from PHY
2351 * extended configuration before SW configuration
2352 */
2353 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2354 if ((hw->mac.type < e1000_pch2lan) &&
2355 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2356 goto release;
2357
2358 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2359 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2360 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2361 if (!cnf_size)
2362 goto release;
2363
2364 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2365 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2366
2367 if (((hw->mac.type == e1000_pchlan) &&
2368 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2369 (hw->mac.type > e1000_pchlan)) {
2370 /* HW configures the SMBus address and LEDs when the
2371 * OEM and LCD Write Enable bits are set in the NVM.
2372 * When both NVM bits are cleared, SW will configure
2373 * them instead.
2374 */
2375 ret_val = e1000_write_smbus_addr(hw);
2376 if (ret_val)
2377 goto release;
2378
2379 data = E1000_READ_REG(hw, E1000_LEDCTL);
2380 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2381 (u16)data);
2382 if (ret_val)
2383 goto release;
2384 }
2385
2386 /* Configure LCD from extended configuration region. */
2387
2388 /* cnf_base_addr is in DWORD */
2389 word_addr = (u16)(cnf_base_addr << 1);
2390
2391 for (i = 0; i < cnf_size; i++) {
2392 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2393 ®_data);
2394 if (ret_val)
2395 goto release;
2396
2397 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2398 1, ®_addr);
2399 if (ret_val)
2400 goto release;
2401
2402 /* Save off the PHY page for future writes. */
2403 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2404 phy_page = reg_data;
2405 continue;
2406 }
2407
2408 reg_addr &= PHY_REG_MASK;
2409 reg_addr |= phy_page;
2410
2411 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2412 reg_data);
2413 if (ret_val)
2414 goto release;
2415 }
2416
2417 release:
2418 hw->phy.ops.release(hw);
2419 return ret_val;
2420 }
2421
2422 /**
2423 * e1000_k1_gig_workaround_hv - K1 Si workaround
2424 * @hw: pointer to the HW structure
2425 * @link: link up bool flag
2426 *
2427 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2428 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
2429 * If link is down, the function will restore the default K1 setting located
2430 * in the NVM.
2431 **/
e1000_k1_gig_workaround_hv(struct e1000_hw * hw,bool link)2432 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2433 {
2434 s32 ret_val = E1000_SUCCESS;
2435 u16 status_reg = 0;
2436 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2437
2438 DEBUGFUNC("e1000_k1_gig_workaround_hv");
2439
2440 if (hw->mac.type != e1000_pchlan)
2441 return E1000_SUCCESS;
2442
2443 /* Wrap the whole flow with the sw flag */
2444 ret_val = hw->phy.ops.acquire(hw);
2445 if (ret_val)
2446 return ret_val;
2447
2448 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2449 if (link) {
2450 if (hw->phy.type == e1000_phy_82578) {
2451 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2452 &status_reg);
2453 if (ret_val)
2454 goto release;
2455
2456 status_reg &= (BM_CS_STATUS_LINK_UP |
2457 BM_CS_STATUS_RESOLVED |
2458 BM_CS_STATUS_SPEED_MASK);
2459
2460 if (status_reg == (BM_CS_STATUS_LINK_UP |
2461 BM_CS_STATUS_RESOLVED |
2462 BM_CS_STATUS_SPEED_1000))
2463 k1_enable = FALSE;
2464 }
2465
2466 if (hw->phy.type == e1000_phy_82577) {
2467 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2468 &status_reg);
2469 if (ret_val)
2470 goto release;
2471
2472 status_reg &= (HV_M_STATUS_LINK_UP |
2473 HV_M_STATUS_AUTONEG_COMPLETE |
2474 HV_M_STATUS_SPEED_MASK);
2475
2476 if (status_reg == (HV_M_STATUS_LINK_UP |
2477 HV_M_STATUS_AUTONEG_COMPLETE |
2478 HV_M_STATUS_SPEED_1000))
2479 k1_enable = FALSE;
2480 }
2481
2482 /* Link stall fix for link up */
2483 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2484 0x0100);
2485 if (ret_val)
2486 goto release;
2487
2488 } else {
2489 /* Link stall fix for link down */
2490 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2491 0x4100);
2492 if (ret_val)
2493 goto release;
2494 }
2495
2496 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2497
2498 release:
2499 hw->phy.ops.release(hw);
2500
2501 return ret_val;
2502 }
2503
2504 /**
2505 * e1000_configure_k1_ich8lan - Configure K1 power state
2506 * @hw: pointer to the HW structure
2507 * @enable: K1 state to configure
2508 *
2509 * Configure the K1 power state based on the provided parameter.
2510 * Assumes semaphore already acquired.
2511 *
2512 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2513 **/
e1000_configure_k1_ich8lan(struct e1000_hw * hw,bool k1_enable)2514 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2515 {
2516 s32 ret_val;
2517 u32 ctrl_reg = 0;
2518 u32 ctrl_ext = 0;
2519 u32 reg = 0;
2520 u16 kmrn_reg = 0;
2521
2522 DEBUGFUNC("e1000_configure_k1_ich8lan");
2523
2524 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2525 &kmrn_reg);
2526 if (ret_val)
2527 return ret_val;
2528
2529 if (k1_enable)
2530 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2531 else
2532 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2533
2534 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2535 kmrn_reg);
2536 if (ret_val)
2537 return ret_val;
2538
2539 usec_delay(20);
2540 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2541 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2542
2543 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2544 reg |= E1000_CTRL_FRCSPD;
2545 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2546
2547 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2548 E1000_WRITE_FLUSH(hw);
2549 usec_delay(20);
2550 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2551 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2552 E1000_WRITE_FLUSH(hw);
2553 usec_delay(20);
2554
2555 return E1000_SUCCESS;
2556 }
2557
2558 /**
2559 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2560 * @hw: pointer to the HW structure
2561 * @d0_state: boolean if entering d0 or d3 device state
2562 *
2563 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2564 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
2565 * in NVM determines whether HW should configure LPLU and Gbe Disable.
2566 **/
e1000_oem_bits_config_ich8lan(struct e1000_hw * hw,bool d0_state)2567 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2568 {
2569 s32 ret_val = 0;
2570 u32 mac_reg;
2571 u16 oem_reg;
2572
2573 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2574
2575 if (hw->mac.type < e1000_pchlan)
2576 return ret_val;
2577
2578 ret_val = hw->phy.ops.acquire(hw);
2579 if (ret_val)
2580 return ret_val;
2581
2582 if (hw->mac.type == e1000_pchlan) {
2583 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2584 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2585 goto release;
2586 }
2587
2588 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2589 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2590 goto release;
2591
2592 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2593
2594 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2595 if (ret_val)
2596 goto release;
2597
2598 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2599
2600 if (d0_state) {
2601 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2602 oem_reg |= HV_OEM_BITS_GBE_DIS;
2603
2604 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2605 oem_reg |= HV_OEM_BITS_LPLU;
2606 } else {
2607 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2608 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2609 oem_reg |= HV_OEM_BITS_GBE_DIS;
2610
2611 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2612 E1000_PHY_CTRL_NOND0A_LPLU))
2613 oem_reg |= HV_OEM_BITS_LPLU;
2614 }
2615
2616 /* Set Restart auto-neg to activate the bits */
2617 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2618 !hw->phy.ops.check_reset_block(hw))
2619 oem_reg |= HV_OEM_BITS_RESTART_AN;
2620
2621 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2622
2623 release:
2624 hw->phy.ops.release(hw);
2625
2626 return ret_val;
2627 }
2628
2629
2630 /**
2631 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2632 * @hw: pointer to the HW structure
2633 **/
e1000_set_mdio_slow_mode_hv(struct e1000_hw * hw)2634 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2635 {
2636 s32 ret_val;
2637 u16 data;
2638
2639 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2640
2641 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2642 if (ret_val)
2643 return ret_val;
2644
2645 data |= HV_KMRN_MDIO_SLOW;
2646
2647 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2648
2649 return ret_val;
2650 }
2651
2652 /**
2653 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2654 * done after every PHY reset.
2655 **/
e1000_hv_phy_workarounds_ich8lan(struct e1000_hw * hw)2656 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2657 {
2658 s32 ret_val = E1000_SUCCESS;
2659 u16 phy_data;
2660
2661 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2662
2663 if (hw->mac.type != e1000_pchlan)
2664 return E1000_SUCCESS;
2665
2666 /* Set MDIO slow mode before any other MDIO access */
2667 if (hw->phy.type == e1000_phy_82577) {
2668 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2669 if (ret_val)
2670 return ret_val;
2671 }
2672
2673 if (((hw->phy.type == e1000_phy_82577) &&
2674 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2675 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2676 /* Disable generation of early preamble */
2677 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2678 if (ret_val)
2679 return ret_val;
2680
2681 /* Preamble tuning for SSC */
2682 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2683 0xA204);
2684 if (ret_val)
2685 return ret_val;
2686 }
2687
2688 if (hw->phy.type == e1000_phy_82578) {
2689 /* Return registers to default by doing a soft reset then
2690 * writing 0x3140 to the control register.
2691 */
2692 if (hw->phy.revision < 2) {
2693 e1000_phy_sw_reset_generic(hw);
2694 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2695 0x3140);
2696 }
2697 }
2698
2699 /* Select page 0 */
2700 ret_val = hw->phy.ops.acquire(hw);
2701 if (ret_val)
2702 return ret_val;
2703
2704 hw->phy.addr = 1;
2705 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2706 hw->phy.ops.release(hw);
2707 if (ret_val)
2708 return ret_val;
2709
2710 /* Configure the K1 Si workaround during phy reset assuming there is
2711 * link so that it disables K1 if link is in 1Gbps.
2712 */
2713 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2714 if (ret_val)
2715 return ret_val;
2716
2717 /* Workaround for link disconnects on a busy hub in half duplex */
2718 ret_val = hw->phy.ops.acquire(hw);
2719 if (ret_val)
2720 return ret_val;
2721 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2722 if (ret_val)
2723 goto release;
2724 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2725 phy_data & 0x00FF);
2726 if (ret_val)
2727 goto release;
2728
2729 /* set MSE higher to enable link to stay up when noise is high */
2730 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2731 release:
2732 hw->phy.ops.release(hw);
2733
2734 return ret_val;
2735 }
2736
2737 /**
2738 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2739 * @hw: pointer to the HW structure
2740 **/
e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw * hw)2741 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2742 {
2743 u32 mac_reg;
2744 u16 i, phy_reg = 0;
2745 s32 ret_val;
2746
2747 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2748
2749 ret_val = hw->phy.ops.acquire(hw);
2750 if (ret_val)
2751 return;
2752 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2753 if (ret_val)
2754 goto release;
2755
2756 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2757 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2758 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2759 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2760 (u16)(mac_reg & 0xFFFF));
2761 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2762 (u16)((mac_reg >> 16) & 0xFFFF));
2763
2764 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2765 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2766 (u16)(mac_reg & 0xFFFF));
2767 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2768 (u16)((mac_reg & E1000_RAH_AV)
2769 >> 16));
2770 }
2771
2772 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2773
2774 release:
2775 hw->phy.ops.release(hw);
2776 }
2777
e1000_calc_rx_da_crc(u8 mac[])2778 static u32 e1000_calc_rx_da_crc(u8 mac[])
2779 {
2780 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
2781 u32 i, j, mask, crc;
2782
2783 DEBUGFUNC("e1000_calc_rx_da_crc");
2784
2785 crc = 0xffffffff;
2786 for (i = 0; i < 6; i++) {
2787 crc = crc ^ mac[i];
2788 for (j = 8; j > 0; j--) {
2789 mask = (crc & 1) * (-1);
2790 crc = (crc >> 1) ^ (poly & mask);
2791 }
2792 }
2793 return ~crc;
2794 }
2795
2796 /**
2797 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2798 * with 82579 PHY
2799 * @hw: pointer to the HW structure
2800 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
2801 **/
e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw * hw,bool enable)2802 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2803 {
2804 s32 ret_val = E1000_SUCCESS;
2805 u16 phy_reg, data;
2806 u32 mac_reg;
2807 u16 i;
2808
2809 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2810
2811 if (hw->mac.type < e1000_pch2lan)
2812 return E1000_SUCCESS;
2813
2814 /* disable Rx path while enabling/disabling workaround */
2815 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2816 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2817 phy_reg | (1 << 14));
2818 if (ret_val)
2819 return ret_val;
2820
2821 if (enable) {
2822 /* Write Rx addresses (rar_entry_count for RAL/H, and
2823 * SHRAL/H) and initial CRC values to the MAC
2824 */
2825 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2826 u8 mac_addr[ETH_ADDR_LEN] = {0};
2827 u32 addr_high, addr_low;
2828
2829 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2830 if (!(addr_high & E1000_RAH_AV))
2831 continue;
2832 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2833 mac_addr[0] = (addr_low & 0xFF);
2834 mac_addr[1] = ((addr_low >> 8) & 0xFF);
2835 mac_addr[2] = ((addr_low >> 16) & 0xFF);
2836 mac_addr[3] = ((addr_low >> 24) & 0xFF);
2837 mac_addr[4] = (addr_high & 0xFF);
2838 mac_addr[5] = ((addr_high >> 8) & 0xFF);
2839
2840 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2841 e1000_calc_rx_da_crc(mac_addr));
2842 }
2843
2844 /* Write Rx addresses to the PHY */
2845 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2846
2847 /* Enable jumbo frame workaround in the MAC */
2848 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2849 mac_reg &= ~(1 << 14);
2850 mac_reg |= (7 << 15);
2851 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2852
2853 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2854 mac_reg |= E1000_RCTL_SECRC;
2855 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2856
2857 ret_val = e1000_read_kmrn_reg_generic(hw,
2858 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2859 &data);
2860 if (ret_val)
2861 return ret_val;
2862 ret_val = e1000_write_kmrn_reg_generic(hw,
2863 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2864 data | (1 << 0));
2865 if (ret_val)
2866 return ret_val;
2867 ret_val = e1000_read_kmrn_reg_generic(hw,
2868 E1000_KMRNCTRLSTA_HD_CTRL,
2869 &data);
2870 if (ret_val)
2871 return ret_val;
2872 data &= ~(0xF << 8);
2873 data |= (0xB << 8);
2874 ret_val = e1000_write_kmrn_reg_generic(hw,
2875 E1000_KMRNCTRLSTA_HD_CTRL,
2876 data);
2877 if (ret_val)
2878 return ret_val;
2879
2880 /* Enable jumbo frame workaround in the PHY */
2881 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2882 data &= ~(0x7F << 5);
2883 data |= (0x37 << 5);
2884 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2885 if (ret_val)
2886 return ret_val;
2887 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2888 data &= ~(1 << 13);
2889 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2890 if (ret_val)
2891 return ret_val;
2892 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2893 data &= ~(0x3FF << 2);
2894 data |= (E1000_TX_PTR_GAP << 2);
2895 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2896 if (ret_val)
2897 return ret_val;
2898 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2899 if (ret_val)
2900 return ret_val;
2901 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2902 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2903 (1 << 10));
2904 if (ret_val)
2905 return ret_val;
2906 } else {
2907 /* Write MAC register values back to h/w defaults */
2908 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2909 mac_reg &= ~(0xF << 14);
2910 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2911
2912 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2913 mac_reg &= ~E1000_RCTL_SECRC;
2914 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2915
2916 ret_val = e1000_read_kmrn_reg_generic(hw,
2917 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2918 &data);
2919 if (ret_val)
2920 return ret_val;
2921 ret_val = e1000_write_kmrn_reg_generic(hw,
2922 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2923 data & ~(1 << 0));
2924 if (ret_val)
2925 return ret_val;
2926 ret_val = e1000_read_kmrn_reg_generic(hw,
2927 E1000_KMRNCTRLSTA_HD_CTRL,
2928 &data);
2929 if (ret_val)
2930 return ret_val;
2931 data &= ~(0xF << 8);
2932 data |= (0xB << 8);
2933 ret_val = e1000_write_kmrn_reg_generic(hw,
2934 E1000_KMRNCTRLSTA_HD_CTRL,
2935 data);
2936 if (ret_val)
2937 return ret_val;
2938
2939 /* Write PHY register values back to h/w defaults */
2940 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2941 data &= ~(0x7F << 5);
2942 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2943 if (ret_val)
2944 return ret_val;
2945 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2946 data |= (1 << 13);
2947 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2948 if (ret_val)
2949 return ret_val;
2950 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2951 data &= ~(0x3FF << 2);
2952 data |= (0x8 << 2);
2953 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2954 if (ret_val)
2955 return ret_val;
2956 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2957 if (ret_val)
2958 return ret_val;
2959 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2960 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2961 ~(1 << 10));
2962 if (ret_val)
2963 return ret_val;
2964 }
2965
2966 /* re-enable Rx path after enabling/disabling workaround */
2967 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2968 ~(1 << 14));
2969 }
2970
2971 /**
2972 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2973 * done after every PHY reset.
2974 **/
e1000_lv_phy_workarounds_ich8lan(struct e1000_hw * hw)2975 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2976 {
2977 s32 ret_val = E1000_SUCCESS;
2978
2979 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2980
2981 if (hw->mac.type != e1000_pch2lan)
2982 return E1000_SUCCESS;
2983
2984 /* Set MDIO slow mode before any other MDIO access */
2985 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2986 if (ret_val)
2987 return ret_val;
2988
2989 ret_val = hw->phy.ops.acquire(hw);
2990 if (ret_val)
2991 return ret_val;
2992 /* set MSE higher to enable link to stay up when noise is high */
2993 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2994 if (ret_val)
2995 goto release;
2996 /* drop link after 5 times MSE threshold was reached */
2997 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2998 release:
2999 hw->phy.ops.release(hw);
3000
3001 return ret_val;
3002 }
3003
3004 /**
3005 * e1000_k1_gig_workaround_lv - K1 Si workaround
3006 * @hw: pointer to the HW structure
3007 *
3008 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
3009 * Disable K1 for 1000 and 100 speeds
3010 **/
e1000_k1_workaround_lv(struct e1000_hw * hw)3011 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
3012 {
3013 s32 ret_val = E1000_SUCCESS;
3014 u16 status_reg = 0;
3015
3016 DEBUGFUNC("e1000_k1_workaround_lv");
3017
3018 if (hw->mac.type != e1000_pch2lan)
3019 return E1000_SUCCESS;
3020
3021 /* Set K1 beacon duration based on 10Mbs speed */
3022 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
3023 if (ret_val)
3024 return ret_val;
3025
3026 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
3027 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
3028 if (status_reg &
3029 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
3030 u16 pm_phy_reg;
3031
3032 /* LV 1G/100 Packet drop issue wa */
3033 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
3034 &pm_phy_reg);
3035 if (ret_val)
3036 return ret_val;
3037 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
3038 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
3039 pm_phy_reg);
3040 if (ret_val)
3041 return ret_val;
3042 } else {
3043 u32 mac_reg;
3044 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
3045 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
3046 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
3047 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
3048 }
3049 }
3050
3051 return ret_val;
3052 }
3053
3054 /**
3055 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
3056 * @hw: pointer to the HW structure
3057 * @gate: boolean set to TRUE to gate, FALSE to ungate
3058 *
3059 * Gate/ungate the automatic PHY configuration via hardware; perform
3060 * the configuration via software instead.
3061 **/
e1000_gate_hw_phy_config_ich8lan(struct e1000_hw * hw,bool gate)3062 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
3063 {
3064 u32 extcnf_ctrl;
3065
3066 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3067
3068 if (hw->mac.type < e1000_pch2lan)
3069 return;
3070
3071 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3072
3073 if (gate)
3074 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3075 else
3076 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3077
3078 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3079 }
3080
3081 /**
3082 * e1000_lan_init_done_ich8lan - Check for PHY config completion
3083 * @hw: pointer to the HW structure
3084 *
3085 * Check the appropriate indication the MAC has finished configuring the
3086 * PHY after a software reset.
3087 **/
e1000_lan_init_done_ich8lan(struct e1000_hw * hw)3088 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3089 {
3090 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3091
3092 DEBUGFUNC("e1000_lan_init_done_ich8lan");
3093
3094 /* Wait for basic configuration completes before proceeding */
3095 do {
3096 data = E1000_READ_REG(hw, E1000_STATUS);
3097 data &= E1000_STATUS_LAN_INIT_DONE;
3098 usec_delay(100);
3099 } while ((!data) && --loop);
3100
3101 /* If basic configuration is incomplete before the above loop
3102 * count reaches 0, loading the configuration from NVM will
3103 * leave the PHY in a bad state possibly resulting in no link.
3104 */
3105 if (loop == 0)
3106 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3107
3108 /* Clear the Init Done bit for the next init event */
3109 data = E1000_READ_REG(hw, E1000_STATUS);
3110 data &= ~E1000_STATUS_LAN_INIT_DONE;
3111 E1000_WRITE_REG(hw, E1000_STATUS, data);
3112 }
3113
3114 /**
3115 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3116 * @hw: pointer to the HW structure
3117 **/
e1000_post_phy_reset_ich8lan(struct e1000_hw * hw)3118 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3119 {
3120 s32 ret_val = E1000_SUCCESS;
3121 u16 reg;
3122
3123 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3124
3125 if (hw->phy.ops.check_reset_block(hw))
3126 return E1000_SUCCESS;
3127
3128 /* Allow time for h/w to get to quiescent state after reset */
3129 msec_delay(10);
3130
3131 /* Perform any necessary post-reset workarounds */
3132 switch (hw->mac.type) {
3133 case e1000_pchlan:
3134 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3135 if (ret_val)
3136 return ret_val;
3137 break;
3138 case e1000_pch2lan:
3139 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3140 if (ret_val)
3141 return ret_val;
3142 break;
3143 default:
3144 break;
3145 }
3146
3147 /* Clear the host wakeup bit after lcd reset */
3148 if (hw->mac.type >= e1000_pchlan) {
3149 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
3150 reg &= ~BM_WUC_HOST_WU_BIT;
3151 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3152 }
3153
3154 /* Configure the LCD with the extended configuration region in NVM */
3155 ret_val = e1000_sw_lcd_config_ich8lan(hw);
3156 if (ret_val)
3157 return ret_val;
3158
3159 /* Configure the LCD with the OEM bits in NVM */
3160 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3161
3162 if (hw->mac.type == e1000_pch2lan) {
3163 /* Ungate automatic PHY configuration on non-managed 82579 */
3164 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3165 E1000_ICH_FWSM_FW_VALID)) {
3166 msec_delay(10);
3167 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3168 }
3169
3170 /* Set EEE LPI Update Timer to 200usec */
3171 ret_val = hw->phy.ops.acquire(hw);
3172 if (ret_val)
3173 return ret_val;
3174 ret_val = e1000_write_emi_reg_locked(hw,
3175 I82579_LPI_UPDATE_TIMER,
3176 0x1387);
3177 hw->phy.ops.release(hw);
3178 }
3179
3180 return ret_val;
3181 }
3182
3183 /**
3184 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3185 * @hw: pointer to the HW structure
3186 *
3187 * Resets the PHY
3188 * This is a function pointer entry point called by drivers
3189 * or other shared routines.
3190 **/
e1000_phy_hw_reset_ich8lan(struct e1000_hw * hw)3191 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3192 {
3193 s32 ret_val = E1000_SUCCESS;
3194
3195 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3196
3197 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3198 if ((hw->mac.type == e1000_pch2lan) &&
3199 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3200 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3201
3202 ret_val = e1000_phy_hw_reset_generic(hw);
3203 if (ret_val)
3204 return ret_val;
3205
3206 return e1000_post_phy_reset_ich8lan(hw);
3207 }
3208
3209 /**
3210 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3211 * @hw: pointer to the HW structure
3212 * @active: TRUE to enable LPLU, FALSE to disable
3213 *
3214 * Sets the LPLU state according to the active flag. For PCH, if OEM write
3215 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3216 * the phy speed. This function will manually set the LPLU bit and restart
3217 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
3218 * since it configures the same bit.
3219 **/
e1000_set_lplu_state_pchlan(struct e1000_hw * hw,bool active)3220 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3221 {
3222 s32 ret_val;
3223 u16 oem_reg;
3224
3225 DEBUGFUNC("e1000_set_lplu_state_pchlan");
3226 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3227 if (ret_val)
3228 return ret_val;
3229
3230 if (active)
3231 oem_reg |= HV_OEM_BITS_LPLU;
3232 else
3233 oem_reg &= ~HV_OEM_BITS_LPLU;
3234
3235 if (!hw->phy.ops.check_reset_block(hw))
3236 oem_reg |= HV_OEM_BITS_RESTART_AN;
3237
3238 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3239 }
3240
3241 /**
3242 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3243 * @hw: pointer to the HW structure
3244 * @active: TRUE to enable LPLU, FALSE to disable
3245 *
3246 * Sets the LPLU D0 state according to the active flag. When
3247 * activating LPLU this function also disables smart speed
3248 * and vice versa. LPLU will not be activated unless the
3249 * device autonegotiation advertisement meets standards of
3250 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3251 * This is a function pointer entry point only called by
3252 * PHY setup routines.
3253 **/
e1000_set_d0_lplu_state_ich8lan(struct e1000_hw * hw,bool active)3254 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3255 {
3256 struct e1000_phy_info *phy = &hw->phy;
3257 u32 phy_ctrl;
3258 s32 ret_val = E1000_SUCCESS;
3259 u16 data;
3260
3261 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3262
3263 if (phy->type == e1000_phy_ife)
3264 return E1000_SUCCESS;
3265
3266 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3267
3268 if (active) {
3269 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3270 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3271
3272 if (phy->type != e1000_phy_igp_3)
3273 return E1000_SUCCESS;
3274
3275 /* Call gig speed drop workaround on LPLU before accessing
3276 * any PHY registers
3277 */
3278 if (hw->mac.type == e1000_ich8lan)
3279 e1000_gig_downshift_workaround_ich8lan(hw);
3280
3281 /* When LPLU is enabled, we should disable SmartSpeed */
3282 ret_val = phy->ops.read_reg(hw,
3283 IGP01E1000_PHY_PORT_CONFIG,
3284 &data);
3285 if (ret_val)
3286 return ret_val;
3287 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3288 ret_val = phy->ops.write_reg(hw,
3289 IGP01E1000_PHY_PORT_CONFIG,
3290 data);
3291 if (ret_val)
3292 return ret_val;
3293 } else {
3294 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3295 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3296
3297 if (phy->type != e1000_phy_igp_3)
3298 return E1000_SUCCESS;
3299
3300 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3301 * during Dx states where the power conservation is most
3302 * important. During driver activity we should enable
3303 * SmartSpeed, so performance is maintained.
3304 */
3305 if (phy->smart_speed == e1000_smart_speed_on) {
3306 ret_val = phy->ops.read_reg(hw,
3307 IGP01E1000_PHY_PORT_CONFIG,
3308 &data);
3309 if (ret_val)
3310 return ret_val;
3311
3312 data |= IGP01E1000_PSCFR_SMART_SPEED;
3313 ret_val = phy->ops.write_reg(hw,
3314 IGP01E1000_PHY_PORT_CONFIG,
3315 data);
3316 if (ret_val)
3317 return ret_val;
3318 } else if (phy->smart_speed == e1000_smart_speed_off) {
3319 ret_val = phy->ops.read_reg(hw,
3320 IGP01E1000_PHY_PORT_CONFIG,
3321 &data);
3322 if (ret_val)
3323 return ret_val;
3324
3325 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3326 ret_val = phy->ops.write_reg(hw,
3327 IGP01E1000_PHY_PORT_CONFIG,
3328 data);
3329 if (ret_val)
3330 return ret_val;
3331 }
3332 }
3333
3334 return E1000_SUCCESS;
3335 }
3336
3337 /**
3338 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3339 * @hw: pointer to the HW structure
3340 * @active: TRUE to enable LPLU, FALSE to disable
3341 *
3342 * Sets the LPLU D3 state according to the active flag. When
3343 * activating LPLU this function also disables smart speed
3344 * and vice versa. LPLU will not be activated unless the
3345 * device autonegotiation advertisement meets standards of
3346 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3347 * This is a function pointer entry point only called by
3348 * PHY setup routines.
3349 **/
e1000_set_d3_lplu_state_ich8lan(struct e1000_hw * hw,bool active)3350 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3351 {
3352 struct e1000_phy_info *phy = &hw->phy;
3353 u32 phy_ctrl;
3354 s32 ret_val = E1000_SUCCESS;
3355 u16 data;
3356
3357 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3358
3359 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3360
3361 if (!active) {
3362 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3363 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3364
3365 if (phy->type != e1000_phy_igp_3)
3366 return E1000_SUCCESS;
3367
3368 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3369 * during Dx states where the power conservation is most
3370 * important. During driver activity we should enable
3371 * SmartSpeed, so performance is maintained.
3372 */
3373 if (phy->smart_speed == e1000_smart_speed_on) {
3374 ret_val = phy->ops.read_reg(hw,
3375 IGP01E1000_PHY_PORT_CONFIG,
3376 &data);
3377 if (ret_val)
3378 return ret_val;
3379
3380 data |= IGP01E1000_PSCFR_SMART_SPEED;
3381 ret_val = phy->ops.write_reg(hw,
3382 IGP01E1000_PHY_PORT_CONFIG,
3383 data);
3384 if (ret_val)
3385 return ret_val;
3386 } else if (phy->smart_speed == e1000_smart_speed_off) {
3387 ret_val = phy->ops.read_reg(hw,
3388 IGP01E1000_PHY_PORT_CONFIG,
3389 &data);
3390 if (ret_val)
3391 return ret_val;
3392
3393 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3394 ret_val = phy->ops.write_reg(hw,
3395 IGP01E1000_PHY_PORT_CONFIG,
3396 data);
3397 if (ret_val)
3398 return ret_val;
3399 }
3400 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3401 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3402 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3403 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3404 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3405
3406 if (phy->type != e1000_phy_igp_3)
3407 return E1000_SUCCESS;
3408
3409 /* Call gig speed drop workaround on LPLU before accessing
3410 * any PHY registers
3411 */
3412 if (hw->mac.type == e1000_ich8lan)
3413 e1000_gig_downshift_workaround_ich8lan(hw);
3414
3415 /* When LPLU is enabled, we should disable SmartSpeed */
3416 ret_val = phy->ops.read_reg(hw,
3417 IGP01E1000_PHY_PORT_CONFIG,
3418 &data);
3419 if (ret_val)
3420 return ret_val;
3421
3422 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3423 ret_val = phy->ops.write_reg(hw,
3424 IGP01E1000_PHY_PORT_CONFIG,
3425 data);
3426 }
3427
3428 return ret_val;
3429 }
3430
3431 /**
3432 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3433 * @hw: pointer to the HW structure
3434 * @bank: pointer to the variable that returns the active bank
3435 *
3436 * Reads signature byte from the NVM using the flash access registers.
3437 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3438 **/
e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw * hw,u32 * bank)3439 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3440 {
3441 u32 eecd;
3442 struct e1000_nvm_info *nvm = &hw->nvm;
3443 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3444 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3445 u32 nvm_dword = 0;
3446 u8 sig_byte = 0;
3447 s32 ret_val;
3448
3449 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3450
3451 switch (hw->mac.type) {
3452 case e1000_pch_spt:
3453 case e1000_pch_cnp:
3454 case e1000_pch_tgp:
3455 case e1000_pch_adp:
3456 case e1000_pch_mtp:
3457 case e1000_pch_lnp:
3458 case e1000_pch_rpl:
3459 case e1000_pch_arl:
3460 case e1000_pch_ptp:
3461 case e1000_pch_nvl:
3462 bank1_offset = nvm->flash_bank_size;
3463 act_offset = E1000_ICH_NVM_SIG_WORD;
3464
3465 /* set bank to 0 in case flash read fails */
3466 *bank = 0;
3467
3468 /* Check bank 0 */
3469 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3470 &nvm_dword);
3471 if (ret_val)
3472 return ret_val;
3473 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3474 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3475 E1000_ICH_NVM_SIG_VALUE) {
3476 *bank = 0;
3477 return E1000_SUCCESS;
3478 }
3479
3480 /* Check bank 1 */
3481 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3482 bank1_offset,
3483 &nvm_dword);
3484 if (ret_val)
3485 return ret_val;
3486 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3487 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3488 E1000_ICH_NVM_SIG_VALUE) {
3489 *bank = 1;
3490 return E1000_SUCCESS;
3491 }
3492
3493 DEBUGOUT("ERROR: No valid NVM bank present\n");
3494 return -E1000_ERR_NVM;
3495 case e1000_ich8lan:
3496 case e1000_ich9lan:
3497 eecd = E1000_READ_REG(hw, E1000_EECD);
3498 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3499 E1000_EECD_SEC1VAL_VALID_MASK) {
3500 if (eecd & E1000_EECD_SEC1VAL)
3501 *bank = 1;
3502 else
3503 *bank = 0;
3504
3505 return E1000_SUCCESS;
3506 }
3507 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3508 /* fall-thru */
3509 default:
3510 /* set bank to 0 in case flash read fails */
3511 *bank = 0;
3512
3513 /* Check bank 0 */
3514 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3515 &sig_byte);
3516 if (ret_val)
3517 return ret_val;
3518 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3519 E1000_ICH_NVM_SIG_VALUE) {
3520 *bank = 0;
3521 return E1000_SUCCESS;
3522 }
3523
3524 /* Check bank 1 */
3525 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3526 bank1_offset,
3527 &sig_byte);
3528 if (ret_val)
3529 return ret_val;
3530 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3531 E1000_ICH_NVM_SIG_VALUE) {
3532 *bank = 1;
3533 return E1000_SUCCESS;
3534 }
3535
3536 DEBUGOUT("ERROR: No valid NVM bank present\n");
3537 return -E1000_ERR_NVM;
3538 }
3539 }
3540
3541 /**
3542 * e1000_read_nvm_spt - NVM access for SPT
3543 * @hw: pointer to the HW structure
3544 * @offset: The offset (in bytes) of the word(s) to read.
3545 * @words: Size of data to read in words.
3546 * @data: pointer to the word(s) to read at offset.
3547 *
3548 * Reads a word(s) from the NVM
3549 **/
e1000_read_nvm_spt(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)3550 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3551 u16 *data)
3552 {
3553 struct e1000_nvm_info *nvm = &hw->nvm;
3554 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3555 u32 act_offset;
3556 s32 ret_val = E1000_SUCCESS;
3557 u32 bank = 0;
3558 u32 dword = 0;
3559 u16 offset_to_read;
3560 u16 i;
3561
3562 DEBUGFUNC("e1000_read_nvm_spt");
3563
3564 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3565 (words == 0)) {
3566 DEBUGOUT("nvm parameter(s) out of bounds\n");
3567 ret_val = -E1000_ERR_NVM;
3568 goto out;
3569 }
3570
3571 nvm->ops.acquire(hw);
3572
3573 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3574 if (ret_val != E1000_SUCCESS) {
3575 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3576 bank = 0;
3577 }
3578
3579 act_offset = (bank) ? nvm->flash_bank_size : 0;
3580 act_offset += offset;
3581
3582 ret_val = E1000_SUCCESS;
3583
3584 for (i = 0; i < words; i += 2) {
3585 if (words - i == 1) {
3586 if (dev_spec->shadow_ram[offset+i].modified) {
3587 data[i] = dev_spec->shadow_ram[offset+i].value;
3588 } else {
3589 offset_to_read = act_offset + i -
3590 ((act_offset + i) % 2);
3591 ret_val =
3592 e1000_read_flash_dword_ich8lan(hw,
3593 offset_to_read,
3594 &dword);
3595 if (ret_val)
3596 break;
3597 if ((act_offset + i) % 2 == 0)
3598 data[i] = (u16)(dword & 0xFFFF);
3599 else
3600 data[i] = (u16)((dword >> 16) & 0xFFFF);
3601 }
3602 } else {
3603 offset_to_read = act_offset + i;
3604 if (!(dev_spec->shadow_ram[offset+i].modified) ||
3605 !(dev_spec->shadow_ram[offset+i+1].modified)) {
3606 ret_val =
3607 e1000_read_flash_dword_ich8lan(hw,
3608 offset_to_read,
3609 &dword);
3610 if (ret_val)
3611 break;
3612 }
3613 if (dev_spec->shadow_ram[offset+i].modified)
3614 data[i] = dev_spec->shadow_ram[offset+i].value;
3615 else
3616 data[i] = (u16) (dword & 0xFFFF);
3617 if (dev_spec->shadow_ram[offset+i].modified)
3618 data[i+1] =
3619 dev_spec->shadow_ram[offset+i+1].value;
3620 else
3621 data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3622 }
3623 }
3624
3625 nvm->ops.release(hw);
3626
3627 out:
3628 if (ret_val)
3629 DEBUGOUT1("NVM read error: %d\n", ret_val);
3630
3631 return ret_val;
3632 }
3633
3634 /**
3635 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
3636 * @hw: pointer to the HW structure
3637 * @offset: The offset (in bytes) of the word(s) to read.
3638 * @words: Size of data to read in words
3639 * @data: Pointer to the word(s) to read at offset.
3640 *
3641 * Reads a word(s) from the NVM using the flash access registers.
3642 **/
e1000_read_nvm_ich8lan(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)3643 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3644 u16 *data)
3645 {
3646 struct e1000_nvm_info *nvm = &hw->nvm;
3647 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3648 u32 act_offset;
3649 s32 ret_val = E1000_SUCCESS;
3650 u32 bank = 0;
3651 u16 i, word;
3652
3653 DEBUGFUNC("e1000_read_nvm_ich8lan");
3654
3655 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3656 (words == 0)) {
3657 DEBUGOUT("nvm parameter(s) out of bounds\n");
3658 ret_val = -E1000_ERR_NVM;
3659 goto out;
3660 }
3661
3662 nvm->ops.acquire(hw);
3663
3664 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3665 if (ret_val != E1000_SUCCESS) {
3666 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3667 bank = 0;
3668 }
3669
3670 act_offset = (bank) ? nvm->flash_bank_size : 0;
3671 act_offset += offset;
3672
3673 ret_val = E1000_SUCCESS;
3674 for (i = 0; i < words; i++) {
3675 if (dev_spec->shadow_ram[offset+i].modified) {
3676 data[i] = dev_spec->shadow_ram[offset+i].value;
3677 } else {
3678 ret_val = e1000_read_flash_word_ich8lan(hw,
3679 act_offset + i,
3680 &word);
3681 if (ret_val)
3682 break;
3683 data[i] = word;
3684 }
3685 }
3686
3687 nvm->ops.release(hw);
3688
3689 out:
3690 if (ret_val)
3691 DEBUGOUT1("NVM read error: %d\n", ret_val);
3692
3693 return ret_val;
3694 }
3695
3696 /**
3697 * e1000_flash_cycle_init_ich8lan - Initialize flash
3698 * @hw: pointer to the HW structure
3699 *
3700 * This function does initial flash setup so that a new read/write/erase cycle
3701 * can be started.
3702 **/
e1000_flash_cycle_init_ich8lan(struct e1000_hw * hw)3703 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3704 {
3705 union ich8_hws_flash_status hsfsts;
3706 s32 ret_val = -E1000_ERR_NVM;
3707
3708 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3709
3710 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3711
3712 /* Check if the flash descriptor is valid */
3713 if (!hsfsts.hsf_status.fldesvalid) {
3714 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
3715 return -E1000_ERR_NVM;
3716 }
3717
3718 /* Clear FCERR and DAEL in hw status by writing 1 */
3719 hsfsts.hsf_status.flcerr = 1;
3720 hsfsts.hsf_status.dael = 1;
3721 if (hw->mac.type >= e1000_pch_spt)
3722 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3723 hsfsts.regval & 0xFFFF);
3724 else
3725 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3726
3727 /* Either we should have a hardware SPI cycle in progress
3728 * bit to check against, in order to start a new cycle or
3729 * FDONE bit should be changed in the hardware so that it
3730 * is 1 after hardware reset, which can then be used as an
3731 * indication whether a cycle is in progress or has been
3732 * completed.
3733 */
3734
3735 if (!hsfsts.hsf_status.flcinprog) {
3736 /* There is no cycle running at present,
3737 * so we can start a cycle.
3738 * Begin by setting Flash Cycle Done.
3739 */
3740 hsfsts.hsf_status.flcdone = 1;
3741 if (hw->mac.type >= e1000_pch_spt)
3742 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3743 hsfsts.regval & 0xFFFF);
3744 else
3745 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3746 hsfsts.regval);
3747 ret_val = E1000_SUCCESS;
3748 } else {
3749 s32 i;
3750
3751 /* Otherwise poll for sometime so the current
3752 * cycle has a chance to end before giving up.
3753 */
3754 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3755 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3756 ICH_FLASH_HSFSTS);
3757 if (!hsfsts.hsf_status.flcinprog) {
3758 ret_val = E1000_SUCCESS;
3759 break;
3760 }
3761 usec_delay(1);
3762 }
3763 if (ret_val == E1000_SUCCESS) {
3764 /* Successful in waiting for previous cycle to timeout,
3765 * now set the Flash Cycle Done.
3766 */
3767 hsfsts.hsf_status.flcdone = 1;
3768 if (hw->mac.type >= e1000_pch_spt)
3769 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3770 hsfsts.regval & 0xFFFF);
3771 else
3772 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3773 hsfsts.regval);
3774 } else {
3775 DEBUGOUT("Flash controller busy, cannot get access\n");
3776 }
3777 }
3778
3779 return ret_val;
3780 }
3781
3782 /**
3783 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3784 * @hw: pointer to the HW structure
3785 * @timeout: maximum time to wait for completion
3786 *
3787 * This function starts a flash cycle and waits for its completion.
3788 **/
e1000_flash_cycle_ich8lan(struct e1000_hw * hw,u32 timeout)3789 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3790 {
3791 union ich8_hws_flash_ctrl hsflctl;
3792 union ich8_hws_flash_status hsfsts;
3793 u32 i = 0;
3794
3795 DEBUGFUNC("e1000_flash_cycle_ich8lan");
3796
3797 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3798 if (hw->mac.type >= e1000_pch_spt)
3799 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3800 else
3801 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3802 hsflctl.hsf_ctrl.flcgo = 1;
3803
3804 if (hw->mac.type >= e1000_pch_spt)
3805 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3806 hsflctl.regval << 16);
3807 else
3808 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3809
3810 /* wait till FDONE bit is set to 1 */
3811 do {
3812 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3813 if (hsfsts.hsf_status.flcdone)
3814 break;
3815 usec_delay(1);
3816 } while (i++ < timeout);
3817
3818 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3819 return E1000_SUCCESS;
3820
3821 return -E1000_ERR_NVM;
3822 }
3823
3824 /**
3825 * e1000_read_flash_dword_ich8lan - Read dword from flash
3826 * @hw: pointer to the HW structure
3827 * @offset: offset to data location
3828 * @data: pointer to the location for storing the data
3829 *
3830 * Reads the flash dword at offset into data. Offset is converted
3831 * to bytes before read.
3832 **/
e1000_read_flash_dword_ich8lan(struct e1000_hw * hw,u32 offset,u32 * data)3833 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3834 u32 *data)
3835 {
3836 DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3837
3838 if (!data)
3839 return -E1000_ERR_NVM;
3840
3841 /* Must convert word offset into bytes. */
3842 offset <<= 1;
3843
3844 return e1000_read_flash_data32_ich8lan(hw, offset, data);
3845 }
3846
3847 /**
3848 * e1000_read_flash_word_ich8lan - Read word from flash
3849 * @hw: pointer to the HW structure
3850 * @offset: offset to data location
3851 * @data: pointer to the location for storing the data
3852 *
3853 * Reads the flash word at offset into data. Offset is converted
3854 * to bytes before read.
3855 **/
e1000_read_flash_word_ich8lan(struct e1000_hw * hw,u32 offset,u16 * data)3856 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3857 u16 *data)
3858 {
3859 DEBUGFUNC("e1000_read_flash_word_ich8lan");
3860
3861 if (!data)
3862 return -E1000_ERR_NVM;
3863
3864 /* Must convert offset into bytes. */
3865 offset <<= 1;
3866
3867 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3868 }
3869
3870 /**
3871 * e1000_read_flash_byte_ich8lan - Read byte from flash
3872 * @hw: pointer to the HW structure
3873 * @offset: The offset of the byte to read.
3874 * @data: Pointer to a byte to store the value read.
3875 *
3876 * Reads a single byte from the NVM using the flash access registers.
3877 **/
e1000_read_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 * data)3878 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3879 u8 *data)
3880 {
3881 s32 ret_val;
3882 u16 word = 0;
3883
3884 /* In SPT, only 32 bits access is supported,
3885 * so this function should not be called.
3886 */
3887 if (hw->mac.type >= e1000_pch_spt)
3888 return -E1000_ERR_NVM;
3889 else
3890 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3891
3892 if (ret_val)
3893 return ret_val;
3894
3895 *data = (u8)word;
3896
3897 return E1000_SUCCESS;
3898 }
3899
3900 /**
3901 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
3902 * @hw: pointer to the HW structure
3903 * @offset: The offset (in bytes) of the byte or word to read.
3904 * @size: Size of data to read, 1=byte 2=word
3905 * @data: Pointer to the word to store the value read.
3906 *
3907 * Reads a byte or word from the NVM using the flash access registers.
3908 **/
e1000_read_flash_data_ich8lan(struct e1000_hw * hw,u32 offset,u8 size,u16 * data)3909 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3910 u8 size, u16 *data)
3911 {
3912 union ich8_hws_flash_status hsfsts;
3913 union ich8_hws_flash_ctrl hsflctl;
3914 u32 flash_linear_addr;
3915 u32 flash_data = 0;
3916 s32 ret_val = -E1000_ERR_NVM;
3917 u8 count = 0;
3918
3919 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3920
3921 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3922 return -E1000_ERR_NVM;
3923 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3924 hw->nvm.flash_base_addr);
3925
3926 do {
3927 usec_delay(1);
3928 /* Steps */
3929 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3930 if (ret_val != E1000_SUCCESS)
3931 break;
3932 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3933
3934 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3935 hsflctl.hsf_ctrl.fldbcount = size - 1;
3936 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3937 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3938 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3939
3940 ret_val = e1000_flash_cycle_ich8lan(hw,
3941 ICH_FLASH_READ_COMMAND_TIMEOUT);
3942
3943 /* Check if FCERR is set to 1, if set to 1, clear it
3944 * and try the whole sequence a few more times, else
3945 * read in (shift in) the Flash Data0, the order is
3946 * least significant byte first msb to lsb
3947 */
3948 if (ret_val == E1000_SUCCESS) {
3949 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3950 if (size == 1)
3951 *data = (u8)(flash_data & 0x000000FF);
3952 else if (size == 2)
3953 *data = (u16)(flash_data & 0x0000FFFF);
3954 break;
3955 } else {
3956 /* If we've gotten here, then things are probably
3957 * completely hosed, but if the error condition is
3958 * detected, it won't hurt to give it another try...
3959 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3960 */
3961 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3962 ICH_FLASH_HSFSTS);
3963 if (hsfsts.hsf_status.flcerr) {
3964 /* Repeat for some time before giving up. */
3965 continue;
3966 } else if (!hsfsts.hsf_status.flcdone) {
3967 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3968 break;
3969 }
3970 }
3971 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3972
3973 return ret_val;
3974 }
3975
3976 /**
3977 * e1000_read_flash_data32_ich8lan - Read dword from NVM
3978 * @hw: pointer to the HW structure
3979 * @offset: The offset (in bytes) of the dword to read.
3980 * @data: Pointer to the dword to store the value read.
3981 *
3982 * Reads a byte or word from the NVM using the flash access registers.
3983 **/
e1000_read_flash_data32_ich8lan(struct e1000_hw * hw,u32 offset,u32 * data)3984 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3985 u32 *data)
3986 {
3987 union ich8_hws_flash_status hsfsts;
3988 union ich8_hws_flash_ctrl hsflctl;
3989 u32 flash_linear_addr;
3990 s32 ret_val = -E1000_ERR_NVM;
3991 u8 count = 0;
3992
3993 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3994
3995 if (offset > ICH_FLASH_LINEAR_ADDR_MASK && hw->mac.type < e1000_pch_spt)
3996 return -E1000_ERR_NVM;
3997 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3998 hw->nvm.flash_base_addr);
3999
4000 do {
4001 usec_delay(1);
4002 /* Steps */
4003 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4004 if (ret_val != E1000_SUCCESS)
4005 break;
4006 /* In SPT, This register is in Lan memory space, not flash.
4007 * Therefore, only 32 bit access is supported
4008 */
4009 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4010
4011 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4012 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4013 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
4014 /* In SPT, This register is in Lan memory space, not flash.
4015 * Therefore, only 32 bit access is supported
4016 */
4017 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4018 (u32)hsflctl.regval << 16);
4019 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4020
4021 ret_val = e1000_flash_cycle_ich8lan(hw,
4022 ICH_FLASH_READ_COMMAND_TIMEOUT);
4023
4024 /* Check if FCERR is set to 1, if set to 1, clear it
4025 * and try the whole sequence a few more times, else
4026 * read in (shift in) the Flash Data0, the order is
4027 * least significant byte first msb to lsb
4028 */
4029 if (ret_val == E1000_SUCCESS) {
4030 *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
4031 break;
4032 } else {
4033 /* If we've gotten here, then things are probably
4034 * completely hosed, but if the error condition is
4035 * detected, it won't hurt to give it another try...
4036 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
4037 */
4038 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4039 ICH_FLASH_HSFSTS);
4040 if (hsfsts.hsf_status.flcerr) {
4041 /* Repeat for some time before giving up. */
4042 continue;
4043 } else if (!hsfsts.hsf_status.flcdone) {
4044 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4045 break;
4046 }
4047 }
4048 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4049
4050 return ret_val;
4051 }
4052
4053 /**
4054 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
4055 * @hw: pointer to the HW structure
4056 * @offset: The offset (in bytes) of the word(s) to write.
4057 * @words: Size of data to write in words
4058 * @data: Pointer to the word(s) to write at offset.
4059 *
4060 * Writes a byte or word to the NVM using the flash access registers.
4061 **/
e1000_write_nvm_ich8lan(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)4062 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
4063 u16 *data)
4064 {
4065 struct e1000_nvm_info *nvm = &hw->nvm;
4066 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4067 u16 i;
4068
4069 DEBUGFUNC("e1000_write_nvm_ich8lan");
4070
4071 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
4072 (words == 0)) {
4073 DEBUGOUT("nvm parameter(s) out of bounds\n");
4074 return -E1000_ERR_NVM;
4075 }
4076
4077 nvm->ops.acquire(hw);
4078
4079 for (i = 0; i < words; i++) {
4080 dev_spec->shadow_ram[offset+i].modified = TRUE;
4081 dev_spec->shadow_ram[offset+i].value = data[i];
4082 }
4083
4084 nvm->ops.release(hw);
4085
4086 return E1000_SUCCESS;
4087 }
4088
4089 /**
4090 * e1000_update_nvm_checksum_spt - Update the checksum for NVM
4091 * @hw: pointer to the HW structure
4092 *
4093 * The NVM checksum is updated by calling the generic update_nvm_checksum,
4094 * which writes the checksum to the shadow ram. The changes in the shadow
4095 * ram are then committed to the EEPROM by processing each bank at a time
4096 * checking for the modified bit and writing only the pending changes.
4097 * After a successful commit, the shadow ram is cleared and is ready for
4098 * future writes.
4099 **/
e1000_update_nvm_checksum_spt(struct e1000_hw * hw)4100 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4101 {
4102 struct e1000_nvm_info *nvm = &hw->nvm;
4103 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4104 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4105 s32 ret_val;
4106 u32 dword = 0;
4107
4108 DEBUGFUNC("e1000_update_nvm_checksum_spt");
4109
4110 ret_val = e1000_update_nvm_checksum_generic(hw);
4111 if (ret_val)
4112 goto out;
4113
4114 if (nvm->type != e1000_nvm_flash_sw)
4115 goto out;
4116
4117 nvm->ops.acquire(hw);
4118
4119 /* We're writing to the opposite bank so if we're on bank 1,
4120 * write to bank 0 etc. We also need to erase the segment that
4121 * is going to be written
4122 */
4123 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4124 if (ret_val != E1000_SUCCESS) {
4125 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4126 bank = 0;
4127 }
4128
4129 if (bank == 0) {
4130 new_bank_offset = nvm->flash_bank_size;
4131 old_bank_offset = 0;
4132 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4133 if (ret_val)
4134 goto release;
4135 } else {
4136 old_bank_offset = nvm->flash_bank_size;
4137 new_bank_offset = 0;
4138 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4139 if (ret_val)
4140 goto release;
4141 }
4142 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4143 /* Determine whether to write the value stored
4144 * in the other NVM bank or a modified value stored
4145 * in the shadow RAM
4146 */
4147 ret_val = e1000_read_flash_dword_ich8lan(hw,
4148 i + old_bank_offset,
4149 &dword);
4150
4151 if (dev_spec->shadow_ram[i].modified) {
4152 dword &= 0xffff0000;
4153 dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4154 }
4155 if (dev_spec->shadow_ram[i + 1].modified) {
4156 dword &= 0x0000ffff;
4157 dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4158 << 16);
4159 }
4160 if (ret_val)
4161 break;
4162
4163 /* If the word is 0x13, then make sure the signature bits
4164 * (15:14) are 11b until the commit has completed.
4165 * This will allow us to write 10b which indicates the
4166 * signature is valid. We want to do this after the write
4167 * has completed so that we don't mark the segment valid
4168 * while the write is still in progress
4169 */
4170 if (i == E1000_ICH_NVM_SIG_WORD - 1)
4171 dword |= E1000_ICH_NVM_SIG_MASK << 16;
4172
4173 /* Convert offset to bytes. */
4174 act_offset = (i + new_bank_offset) << 1;
4175
4176 usec_delay(100);
4177
4178 /* Write the data to the new bank. Offset in words*/
4179 act_offset = i + new_bank_offset;
4180 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4181 dword);
4182 if (ret_val)
4183 break;
4184 }
4185
4186 /* Don't bother writing the segment valid bits if sector
4187 * programming failed.
4188 */
4189 if (ret_val) {
4190 DEBUGOUT("Flash commit failed.\n");
4191 goto release;
4192 }
4193
4194 /* Finally validate the new segment by setting bit 15:14
4195 * to 10b in word 0x13 , this can be done without an
4196 * erase as well since these bits are 11 to start with
4197 * and we need to change bit 14 to 0b
4198 */
4199 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4200
4201 /*offset in words but we read dword*/
4202 --act_offset;
4203 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4204
4205 if (ret_val)
4206 goto release;
4207
4208 dword &= 0xBFFFFFFF;
4209 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4210
4211 if (ret_val)
4212 goto release;
4213
4214 /* And invalidate the previously valid segment by setting
4215 * its signature word (0x13) high_byte to 0b. This can be
4216 * done without an erase because flash erase sets all bits
4217 * to 1's. We can write 1's to 0's without an erase
4218 */
4219 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4220
4221 /* offset in words but we read dword*/
4222 act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4223 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4224
4225 if (ret_val)
4226 goto release;
4227
4228 dword &= 0x00FFFFFF;
4229 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4230
4231 if (ret_val)
4232 goto release;
4233
4234 /* Great! Everything worked, we can now clear the cached entries. */
4235 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4236 dev_spec->shadow_ram[i].modified = FALSE;
4237 dev_spec->shadow_ram[i].value = 0xFFFF;
4238 }
4239
4240 release:
4241 nvm->ops.release(hw);
4242
4243 /* Reload the EEPROM, or else modifications will not appear
4244 * until after the next adapter reset.
4245 */
4246 if (!ret_val) {
4247 nvm->ops.reload(hw);
4248 msec_delay(10);
4249 }
4250
4251 out:
4252 if (ret_val)
4253 DEBUGOUT1("NVM update error: %d\n", ret_val);
4254
4255 return ret_val;
4256 }
4257
4258 /**
4259 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4260 * @hw: pointer to the HW structure
4261 *
4262 * The NVM checksum is updated by calling the generic update_nvm_checksum,
4263 * which writes the checksum to the shadow ram. The changes in the shadow
4264 * ram are then committed to the EEPROM by processing each bank at a time
4265 * checking for the modified bit and writing only the pending changes.
4266 * After a successful commit, the shadow ram is cleared and is ready for
4267 * future writes.
4268 **/
e1000_update_nvm_checksum_ich8lan(struct e1000_hw * hw)4269 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4270 {
4271 struct e1000_nvm_info *nvm = &hw->nvm;
4272 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4273 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4274 s32 ret_val;
4275 u16 data = 0;
4276
4277 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4278
4279 ret_val = e1000_update_nvm_checksum_generic(hw);
4280 if (ret_val)
4281 goto out;
4282
4283 if (nvm->type != e1000_nvm_flash_sw)
4284 goto out;
4285
4286 nvm->ops.acquire(hw);
4287
4288 /* We're writing to the opposite bank so if we're on bank 1,
4289 * write to bank 0 etc. We also need to erase the segment that
4290 * is going to be written
4291 */
4292 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4293 if (ret_val != E1000_SUCCESS) {
4294 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4295 bank = 0;
4296 }
4297
4298 if (bank == 0) {
4299 new_bank_offset = nvm->flash_bank_size;
4300 old_bank_offset = 0;
4301 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4302 if (ret_val)
4303 goto release;
4304 } else {
4305 old_bank_offset = nvm->flash_bank_size;
4306 new_bank_offset = 0;
4307 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4308 if (ret_val)
4309 goto release;
4310 }
4311 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4312 if (dev_spec->shadow_ram[i].modified) {
4313 data = dev_spec->shadow_ram[i].value;
4314 } else {
4315 ret_val = e1000_read_flash_word_ich8lan(hw, i +
4316 old_bank_offset,
4317 &data);
4318 if (ret_val)
4319 break;
4320 }
4321 /* If the word is 0x13, then make sure the signature bits
4322 * (15:14) are 11b until the commit has completed.
4323 * This will allow us to write 10b which indicates the
4324 * signature is valid. We want to do this after the write
4325 * has completed so that we don't mark the segment valid
4326 * while the write is still in progress
4327 */
4328 if (i == E1000_ICH_NVM_SIG_WORD)
4329 data |= E1000_ICH_NVM_SIG_MASK;
4330
4331 /* Convert offset to bytes. */
4332 act_offset = (i + new_bank_offset) << 1;
4333
4334 usec_delay(100);
4335
4336 /* Write the bytes to the new bank. */
4337 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4338 act_offset,
4339 (u8)data);
4340 if (ret_val)
4341 break;
4342
4343 usec_delay(100);
4344 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4345 act_offset + 1,
4346 (u8)(data >> 8));
4347 if (ret_val)
4348 break;
4349 }
4350
4351 /* Don't bother writing the segment valid bits if sector
4352 * programming failed.
4353 */
4354 if (ret_val) {
4355 DEBUGOUT("Flash commit failed.\n");
4356 goto release;
4357 }
4358
4359 /* Finally validate the new segment by setting bit 15:14
4360 * to 10b in word 0x13 , this can be done without an
4361 * erase as well since these bits are 11 to start with
4362 * and we need to change bit 14 to 0b
4363 */
4364 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4365 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4366 if (ret_val)
4367 goto release;
4368
4369 data &= 0xBFFF;
4370 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4371 (u8)(data >> 8));
4372 if (ret_val)
4373 goto release;
4374
4375 /* And invalidate the previously valid segment by setting
4376 * its signature word (0x13) high_byte to 0b. This can be
4377 * done without an erase because flash erase sets all bits
4378 * to 1's. We can write 1's to 0's without an erase
4379 */
4380 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4381
4382 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4383
4384 if (ret_val)
4385 goto release;
4386
4387 /* Great! Everything worked, we can now clear the cached entries. */
4388 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4389 dev_spec->shadow_ram[i].modified = FALSE;
4390 dev_spec->shadow_ram[i].value = 0xFFFF;
4391 }
4392
4393 release:
4394 nvm->ops.release(hw);
4395
4396 /* Reload the EEPROM, or else modifications will not appear
4397 * until after the next adapter reset.
4398 */
4399 if (!ret_val) {
4400 nvm->ops.reload(hw);
4401 msec_delay(10);
4402 }
4403
4404 out:
4405 if (ret_val)
4406 DEBUGOUT1("NVM update error: %d\n", ret_val);
4407
4408 return ret_val;
4409 }
4410
4411 /**
4412 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4413 * @hw: pointer to the HW structure
4414 *
4415 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4416 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
4417 * calculated, in which case we need to calculate the checksum and set bit 6.
4418 **/
e1000_validate_nvm_checksum_ich8lan(struct e1000_hw * hw)4419 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4420 {
4421 s32 ret_val;
4422 u16 data;
4423 u16 word;
4424 u16 valid_csum_mask;
4425
4426 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4427
4428 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
4429 * the checksum needs to be fixed. This bit is an indication that
4430 * the NVM was prepared by OEM software and did not calculate
4431 * the checksum...a likely scenario.
4432 */
4433 switch (hw->mac.type) {
4434 case e1000_pch_lpt:
4435 case e1000_pch_spt:
4436 case e1000_pch_cnp:
4437 case e1000_pch_tgp:
4438 case e1000_pch_adp:
4439 case e1000_pch_mtp:
4440 case e1000_pch_lnp:
4441 case e1000_pch_rpl:
4442 case e1000_pch_arl:
4443 case e1000_pch_ptp:
4444 case e1000_pch_nvl:
4445 word = NVM_COMPAT;
4446 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4447 break;
4448 default:
4449 word = NVM_FUTURE_INIT_WORD1;
4450 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4451 break;
4452 }
4453
4454 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4455 if (ret_val)
4456 return ret_val;
4457
4458 if (!(data & valid_csum_mask)) {
4459 data |= valid_csum_mask;
4460 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4461 if (ret_val)
4462 return ret_val;
4463 ret_val = hw->nvm.ops.update(hw);
4464 if (ret_val)
4465 return ret_val;
4466 }
4467
4468 return e1000_validate_nvm_checksum_generic(hw);
4469 }
4470
4471 /**
4472 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4473 * @hw: pointer to the HW structure
4474 * @offset: The offset (in bytes) of the byte/word to read.
4475 * @size: Size of data to read, 1=byte 2=word
4476 * @data: The byte(s) to write to the NVM.
4477 *
4478 * Writes one/two bytes to the NVM using the flash access registers.
4479 **/
e1000_write_flash_data_ich8lan(struct e1000_hw * hw,u32 offset,u8 size,u16 data)4480 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4481 u8 size, u16 data)
4482 {
4483 union ich8_hws_flash_status hsfsts;
4484 union ich8_hws_flash_ctrl hsflctl;
4485 u32 flash_linear_addr;
4486 u32 flash_data = 0;
4487 s32 ret_val;
4488 u8 count = 0;
4489
4490 DEBUGFUNC("e1000_write_ich8_data");
4491
4492 if (hw->mac.type >= e1000_pch_spt) {
4493 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4494 return -E1000_ERR_NVM;
4495 } else {
4496 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4497 return -E1000_ERR_NVM;
4498 }
4499
4500 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4501 hw->nvm.flash_base_addr);
4502
4503 do {
4504 usec_delay(1);
4505 /* Steps */
4506 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4507 if (ret_val != E1000_SUCCESS)
4508 break;
4509 /* In SPT, This register is in Lan memory space, not
4510 * flash. Therefore, only 32 bit access is supported
4511 */
4512 if (hw->mac.type >= e1000_pch_spt)
4513 hsflctl.regval =
4514 E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4515 else
4516 hsflctl.regval =
4517 E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4518
4519 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4520 hsflctl.hsf_ctrl.fldbcount = size - 1;
4521 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4522 /* In SPT, This register is in Lan memory space,
4523 * not flash. Therefore, only 32 bit access is
4524 * supported
4525 */
4526 if (hw->mac.type >= e1000_pch_spt)
4527 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4528 hsflctl.regval << 16);
4529 else
4530 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4531 hsflctl.regval);
4532
4533 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4534
4535 if (size == 1)
4536 flash_data = (u32)data & 0x00FF;
4537 else
4538 flash_data = (u32)data;
4539
4540 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4541
4542 /* check if FCERR is set to 1 , if set to 1, clear it
4543 * and try the whole sequence a few more times else done
4544 */
4545 ret_val =
4546 e1000_flash_cycle_ich8lan(hw,
4547 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4548 if (ret_val == E1000_SUCCESS)
4549 break;
4550
4551 /* If we're here, then things are most likely
4552 * completely hosed, but if the error condition
4553 * is detected, it won't hurt to give it another
4554 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4555 */
4556 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4557 if (hsfsts.hsf_status.flcerr)
4558 /* Repeat for some time before giving up. */
4559 continue;
4560 if (!hsfsts.hsf_status.flcdone) {
4561 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4562 break;
4563 }
4564 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4565
4566 return ret_val;
4567 }
4568
4569 /**
4570 * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4571 * @hw: pointer to the HW structure
4572 * @offset: The offset (in bytes) of the dwords to read.
4573 * @data: The 4 bytes to write to the NVM.
4574 *
4575 * Writes one/two/four bytes to the NVM using the flash access registers.
4576 **/
e1000_write_flash_data32_ich8lan(struct e1000_hw * hw,u32 offset,u32 data)4577 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4578 u32 data)
4579 {
4580 union ich8_hws_flash_status hsfsts;
4581 union ich8_hws_flash_ctrl hsflctl;
4582 u32 flash_linear_addr;
4583 s32 ret_val;
4584 u8 count = 0;
4585
4586 DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4587
4588 if (hw->mac.type >= e1000_pch_spt) {
4589 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4590 return -E1000_ERR_NVM;
4591 }
4592 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4593 hw->nvm.flash_base_addr);
4594 do {
4595 usec_delay(1);
4596 /* Steps */
4597 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4598 if (ret_val != E1000_SUCCESS)
4599 break;
4600
4601 /* In SPT, This register is in Lan memory space, not
4602 * flash. Therefore, only 32 bit access is supported
4603 */
4604 if (hw->mac.type >= e1000_pch_spt)
4605 hsflctl.regval = E1000_READ_FLASH_REG(hw,
4606 ICH_FLASH_HSFSTS)
4607 >> 16;
4608 else
4609 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4610 ICH_FLASH_HSFCTL);
4611
4612 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4613 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4614
4615 /* In SPT, This register is in Lan memory space,
4616 * not flash. Therefore, only 32 bit access is
4617 * supported
4618 */
4619 if (hw->mac.type >= e1000_pch_spt)
4620 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4621 hsflctl.regval << 16);
4622 else
4623 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4624 hsflctl.regval);
4625
4626 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4627
4628 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4629
4630 /* check if FCERR is set to 1 , if set to 1, clear it
4631 * and try the whole sequence a few more times else done
4632 */
4633 ret_val = e1000_flash_cycle_ich8lan(hw,
4634 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4635
4636 if (ret_val == E1000_SUCCESS)
4637 break;
4638
4639 /* If we're here, then things are most likely
4640 * completely hosed, but if the error condition
4641 * is detected, it won't hurt to give it another
4642 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4643 */
4644 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4645
4646 if (hsfsts.hsf_status.flcerr)
4647 /* Repeat for some time before giving up. */
4648 continue;
4649 if (!hsfsts.hsf_status.flcdone) {
4650 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4651 break;
4652 }
4653 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4654
4655 return ret_val;
4656 }
4657
4658 /**
4659 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4660 * @hw: pointer to the HW structure
4661 * @offset: The index of the byte to read.
4662 * @data: The byte to write to the NVM.
4663 *
4664 * Writes a single byte to the NVM using the flash access registers.
4665 **/
e1000_write_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 data)4666 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4667 u8 data)
4668 {
4669 u16 word = (u16)data;
4670
4671 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4672
4673 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4674 }
4675
4676 /**
4677 * e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4678 * @hw: pointer to the HW structure
4679 * @offset: The offset of the word to write.
4680 * @dword: The dword to write to the NVM.
4681 *
4682 * Writes a single dword to the NVM using the flash access registers.
4683 * Goes through a retry algorithm before giving up.
4684 **/
e1000_retry_write_flash_dword_ich8lan(struct e1000_hw * hw,u32 offset,u32 dword)4685 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4686 u32 offset, u32 dword)
4687 {
4688 s32 ret_val;
4689 u16 program_retries;
4690
4691 DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4692
4693 /* Must convert word offset into bytes. */
4694 offset <<= 1;
4695
4696 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4697
4698 if (!ret_val)
4699 return ret_val;
4700 for (program_retries = 0; program_retries < 100; program_retries++) {
4701 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4702 usec_delay(100);
4703 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4704 if (ret_val == E1000_SUCCESS)
4705 break;
4706 }
4707 if (program_retries == 100)
4708 return -E1000_ERR_NVM;
4709
4710 return E1000_SUCCESS;
4711 }
4712
4713 /**
4714 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4715 * @hw: pointer to the HW structure
4716 * @offset: The offset of the byte to write.
4717 * @byte: The byte to write to the NVM.
4718 *
4719 * Writes a single byte to the NVM using the flash access registers.
4720 * Goes through a retry algorithm before giving up.
4721 **/
e1000_retry_write_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 byte)4722 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4723 u32 offset, u8 byte)
4724 {
4725 s32 ret_val;
4726 u16 program_retries;
4727
4728 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4729
4730 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4731 if (!ret_val)
4732 return ret_val;
4733
4734 for (program_retries = 0; program_retries < 100; program_retries++) {
4735 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4736 usec_delay(100);
4737 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4738 if (ret_val == E1000_SUCCESS)
4739 break;
4740 }
4741 if (program_retries == 100)
4742 return -E1000_ERR_NVM;
4743
4744 return E1000_SUCCESS;
4745 }
4746
4747 /**
4748 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4749 * @hw: pointer to the HW structure
4750 * @bank: 0 for first bank, 1 for second bank, etc.
4751 *
4752 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4753 * bank N is 4096 * N + flash_reg_addr.
4754 **/
e1000_erase_flash_bank_ich8lan(struct e1000_hw * hw,u32 bank)4755 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4756 {
4757 struct e1000_nvm_info *nvm = &hw->nvm;
4758 union ich8_hws_flash_status hsfsts;
4759 union ich8_hws_flash_ctrl hsflctl;
4760 u32 flash_linear_addr;
4761 /* bank size is in 16bit words - adjust to bytes */
4762 u32 flash_bank_size = nvm->flash_bank_size * 2;
4763 s32 ret_val;
4764 s32 count = 0;
4765 s32 j, iteration, sector_size;
4766
4767 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4768
4769 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4770
4771 /* Determine HW Sector size: Read BERASE bits of hw flash status
4772 * register
4773 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4774 * consecutive sectors. The start index for the nth Hw sector
4775 * can be calculated as = bank * 4096 + n * 256
4776 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4777 * The start index for the nth Hw sector can be calculated
4778 * as = bank * 4096
4779 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4780 * (ich9 only, otherwise error condition)
4781 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4782 */
4783 switch (hsfsts.hsf_status.berasesz) {
4784 case 0:
4785 /* Hw sector size 256 */
4786 sector_size = ICH_FLASH_SEG_SIZE_256;
4787 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4788 break;
4789 case 1:
4790 sector_size = ICH_FLASH_SEG_SIZE_4K;
4791 iteration = 1;
4792 break;
4793 case 2:
4794 sector_size = ICH_FLASH_SEG_SIZE_8K;
4795 iteration = 1;
4796 break;
4797 case 3:
4798 sector_size = ICH_FLASH_SEG_SIZE_64K;
4799 iteration = 1;
4800 break;
4801 default:
4802 return -E1000_ERR_NVM;
4803 }
4804
4805 /* Start with the base address, then add the sector offset. */
4806 flash_linear_addr = hw->nvm.flash_base_addr;
4807 flash_linear_addr += (bank) ? flash_bank_size : 0;
4808
4809 for (j = 0; j < iteration; j++) {
4810 do {
4811 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4812
4813 /* Steps */
4814 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4815 if (ret_val)
4816 return ret_val;
4817
4818 /* Write a value 11 (block Erase) in Flash
4819 * Cycle field in hw flash control
4820 */
4821 if (hw->mac.type >= e1000_pch_spt)
4822 hsflctl.regval =
4823 E1000_READ_FLASH_REG(hw,
4824 ICH_FLASH_HSFSTS)>>16;
4825 else
4826 hsflctl.regval =
4827 E1000_READ_FLASH_REG16(hw,
4828 ICH_FLASH_HSFCTL);
4829
4830 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4831 if (hw->mac.type >= e1000_pch_spt)
4832 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4833 hsflctl.regval << 16);
4834 else
4835 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4836 hsflctl.regval);
4837
4838 /* Write the last 24 bits of an index within the
4839 * block into Flash Linear address field in Flash
4840 * Address.
4841 */
4842 flash_linear_addr += (j * sector_size);
4843 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4844 flash_linear_addr);
4845
4846 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4847 if (ret_val == E1000_SUCCESS)
4848 break;
4849
4850 /* Check if FCERR is set to 1. If 1,
4851 * clear it and try the whole sequence
4852 * a few more times else Done
4853 */
4854 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4855 ICH_FLASH_HSFSTS);
4856 if (hsfsts.hsf_status.flcerr)
4857 /* repeat for some time before giving up */
4858 continue;
4859 else if (!hsfsts.hsf_status.flcdone)
4860 return ret_val;
4861 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4862 }
4863
4864 return E1000_SUCCESS;
4865 }
4866
4867 /**
4868 * e1000_valid_led_default_ich8lan - Set the default LED settings
4869 * @hw: pointer to the HW structure
4870 * @data: Pointer to the LED settings
4871 *
4872 * Reads the LED default settings from the NVM to data. If the NVM LED
4873 * settings is all 0's or F's, set the LED default to a valid LED default
4874 * setting.
4875 **/
e1000_valid_led_default_ich8lan(struct e1000_hw * hw,u16 * data)4876 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4877 {
4878 s32 ret_val;
4879
4880 DEBUGFUNC("e1000_valid_led_default_ich8lan");
4881
4882 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4883 if (ret_val) {
4884 DEBUGOUT("NVM Read Error\n");
4885 return ret_val;
4886 }
4887
4888 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4889 *data = ID_LED_DEFAULT_ICH8LAN;
4890
4891 return E1000_SUCCESS;
4892 }
4893
4894 /**
4895 * e1000_id_led_init_pchlan - store LED configurations
4896 * @hw: pointer to the HW structure
4897 *
4898 * PCH does not control LEDs via the LEDCTL register, rather it uses
4899 * the PHY LED configuration register.
4900 *
4901 * PCH also does not have an "always on" or "always off" mode which
4902 * complicates the ID feature. Instead of using the "on" mode to indicate
4903 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4904 * use "link_up" mode. The LEDs will still ID on request if there is no
4905 * link based on logic in e1000_led_[on|off]_pchlan().
4906 **/
e1000_id_led_init_pchlan(struct e1000_hw * hw)4907 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4908 {
4909 struct e1000_mac_info *mac = &hw->mac;
4910 s32 ret_val;
4911 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4912 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4913 u16 data, i, temp, shift;
4914
4915 DEBUGFUNC("e1000_id_led_init_pchlan");
4916
4917 /* Get default ID LED modes */
4918 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4919 if (ret_val)
4920 return ret_val;
4921
4922 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4923 mac->ledctl_mode1 = mac->ledctl_default;
4924 mac->ledctl_mode2 = mac->ledctl_default;
4925
4926 for (i = 0; i < 4; i++) {
4927 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4928 shift = (i * 5);
4929 switch (temp) {
4930 case ID_LED_ON1_DEF2:
4931 case ID_LED_ON1_ON2:
4932 case ID_LED_ON1_OFF2:
4933 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4934 mac->ledctl_mode1 |= (ledctl_on << shift);
4935 break;
4936 case ID_LED_OFF1_DEF2:
4937 case ID_LED_OFF1_ON2:
4938 case ID_LED_OFF1_OFF2:
4939 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4940 mac->ledctl_mode1 |= (ledctl_off << shift);
4941 break;
4942 default:
4943 /* Do nothing */
4944 break;
4945 }
4946 switch (temp) {
4947 case ID_LED_DEF1_ON2:
4948 case ID_LED_ON1_ON2:
4949 case ID_LED_OFF1_ON2:
4950 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4951 mac->ledctl_mode2 |= (ledctl_on << shift);
4952 break;
4953 case ID_LED_DEF1_OFF2:
4954 case ID_LED_ON1_OFF2:
4955 case ID_LED_OFF1_OFF2:
4956 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4957 mac->ledctl_mode2 |= (ledctl_off << shift);
4958 break;
4959 default:
4960 /* Do nothing */
4961 break;
4962 }
4963 }
4964
4965 return E1000_SUCCESS;
4966 }
4967
4968 /**
4969 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4970 * @hw: pointer to the HW structure
4971 *
4972 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4973 * register, so the bus width is hard coded.
4974 **/
e1000_get_bus_info_ich8lan(struct e1000_hw * hw)4975 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4976 {
4977 struct e1000_bus_info *bus = &hw->bus;
4978 s32 ret_val;
4979
4980 DEBUGFUNC("e1000_get_bus_info_ich8lan");
4981
4982 ret_val = e1000_get_bus_info_pcie_generic(hw);
4983
4984 /* ICH devices are "PCI Express"-ish. They have
4985 * a configuration space, but do not contain
4986 * PCI Express Capability registers, so bus width
4987 * must be hardcoded.
4988 */
4989 if (bus->width == e1000_bus_width_unknown)
4990 bus->width = e1000_bus_width_pcie_x1;
4991
4992 return ret_val;
4993 }
4994
4995 /**
4996 * e1000_reset_hw_ich8lan - Reset the hardware
4997 * @hw: pointer to the HW structure
4998 *
4999 * Does a full reset of the hardware which includes a reset of the PHY and
5000 * MAC.
5001 **/
e1000_reset_hw_ich8lan(struct e1000_hw * hw)5002 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
5003 {
5004 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5005 u16 kum_cfg;
5006 u32 ctrl, reg;
5007 s32 ret_val;
5008
5009 DEBUGFUNC("e1000_reset_hw_ich8lan");
5010
5011 /* Prevent the PCI-E bus from sticking if there is no TLP connection
5012 * on the last TLP read/write transaction when MAC is reset.
5013 */
5014 ret_val = e1000_disable_pcie_master_generic(hw);
5015 if (ret_val)
5016 DEBUGOUT("PCI-E Master disable polling has failed.\n");
5017
5018 DEBUGOUT("Masking off all interrupts\n");
5019 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5020
5021 /* Disable the Transmit and Receive units. Then delay to allow
5022 * any pending transactions to complete before we hit the MAC
5023 * with the global reset.
5024 */
5025 E1000_WRITE_REG(hw, E1000_RCTL, 0);
5026 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
5027 E1000_WRITE_FLUSH(hw);
5028
5029 msec_delay(10);
5030
5031 /* Workaround for ICH8 bit corruption issue in FIFO memory */
5032 if (hw->mac.type == e1000_ich8lan) {
5033 /* Set Tx and Rx buffer allocation to 8k apiece. */
5034 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
5035 /* Set Packet Buffer Size to 16k. */
5036 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
5037 }
5038
5039 if (hw->mac.type == e1000_pchlan) {
5040 /* Save the NVM K1 bit setting*/
5041 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
5042 if (ret_val)
5043 return ret_val;
5044
5045 if (kum_cfg & E1000_NVM_K1_ENABLE)
5046 dev_spec->nvm_k1_enabled = TRUE;
5047 else
5048 dev_spec->nvm_k1_enabled = FALSE;
5049 }
5050
5051 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5052
5053 if (!hw->phy.ops.check_reset_block(hw)) {
5054 /* Full-chip reset requires MAC and PHY reset at the same
5055 * time to make sure the interface between MAC and the
5056 * external PHY is reset.
5057 */
5058 ctrl |= E1000_CTRL_PHY_RST;
5059
5060 /* Gate automatic PHY configuration by hardware on
5061 * non-managed 82579
5062 */
5063 if ((hw->mac.type == e1000_pch2lan) &&
5064 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
5065 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
5066 }
5067 ret_val = e1000_acquire_swflag_ich8lan(hw);
5068 DEBUGOUT("Issuing a global reset to ich8lan\n");
5069 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
5070 /* cannot issue a flush here because it hangs the hardware */
5071 msec_delay(20);
5072
5073 /* Set Phy Config Counter to 50msec */
5074 if (hw->mac.type == e1000_pch2lan) {
5075 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
5076 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
5077 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
5078 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
5079 }
5080
5081 if (!ret_val)
5082 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
5083
5084 if (ctrl & E1000_CTRL_PHY_RST) {
5085 ret_val = hw->phy.ops.get_cfg_done(hw);
5086 if (ret_val)
5087 return ret_val;
5088
5089 ret_val = e1000_post_phy_reset_ich8lan(hw);
5090 if (ret_val)
5091 return ret_val;
5092 }
5093
5094 /* For PCH, this write will make sure that any noise
5095 * will be detected as a CRC error and be dropped rather than show up
5096 * as a bad packet to the DMA engine.
5097 */
5098 if (hw->mac.type == e1000_pchlan)
5099 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5100
5101 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5102 E1000_READ_REG(hw, E1000_ICR);
5103
5104 reg = E1000_READ_REG(hw, E1000_KABGTXD);
5105 reg |= E1000_KABGTXD_BGSQLBIAS;
5106 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5107
5108 return E1000_SUCCESS;
5109 }
5110
5111 /**
5112 * e1000_init_hw_ich8lan - Initialize the hardware
5113 * @hw: pointer to the HW structure
5114 *
5115 * Prepares the hardware for transmit and receive by doing the following:
5116 * - initialize hardware bits
5117 * - initialize LED identification
5118 * - setup receive address registers
5119 * - setup flow control
5120 * - setup transmit descriptors
5121 * - clear statistics
5122 **/
e1000_init_hw_ich8lan(struct e1000_hw * hw)5123 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5124 {
5125 struct e1000_mac_info *mac = &hw->mac;
5126 u32 ctrl_ext, txdctl, snoop;
5127 s32 ret_val;
5128 u16 i;
5129
5130 DEBUGFUNC("e1000_init_hw_ich8lan");
5131
5132 e1000_initialize_hw_bits_ich8lan(hw);
5133
5134 /* Initialize identification LED */
5135 ret_val = mac->ops.id_led_init(hw);
5136 /* An error is not fatal and we should not stop init due to this */
5137 if (ret_val)
5138 DEBUGOUT("Error initializing identification LED\n");
5139
5140 /* Setup the receive address. */
5141 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5142
5143 /* Zero out the Multicast HASH table */
5144 DEBUGOUT("Zeroing the MTA\n");
5145 for (i = 0; i < mac->mta_reg_count; i++)
5146 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5147
5148 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
5149 * the ME. Disable wakeup by clearing the host wakeup bit.
5150 * Reset the phy after disabling host wakeup to reset the Rx buffer.
5151 */
5152 if (hw->phy.type == e1000_phy_82578) {
5153 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5154 i &= ~BM_WUC_HOST_WU_BIT;
5155 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5156 ret_val = e1000_phy_hw_reset_ich8lan(hw);
5157 if (ret_val)
5158 return ret_val;
5159 }
5160
5161 /* Setup link and flow control */
5162 ret_val = mac->ops.setup_link(hw);
5163
5164 /* Set the transmit descriptor write-back policy for both queues */
5165 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5166 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5167 E1000_TXDCTL_FULL_TX_DESC_WB);
5168 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5169 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5170 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5171 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5172 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5173 E1000_TXDCTL_FULL_TX_DESC_WB);
5174 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5175 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5176 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5177
5178 /* ICH8 has opposite polarity of no_snoop bits.
5179 * By default, we should use snoop behavior.
5180 */
5181 if (mac->type == e1000_ich8lan)
5182 snoop = PCIE_ICH8_SNOOP_ALL;
5183 else
5184 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5185 e1000_set_pcie_no_snoop_generic(hw, snoop);
5186
5187 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5188 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5189 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5190
5191 /* Clear all of the statistics registers (clear on read). It is
5192 * important that we do this after we have tried to establish link
5193 * because the symbol error count will increment wildly if there
5194 * is no link.
5195 */
5196 e1000_clear_hw_cntrs_ich8lan(hw);
5197
5198 return ret_val;
5199 }
5200
5201 /**
5202 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5203 * @hw: pointer to the HW structure
5204 *
5205 * Sets/Clears required hardware bits necessary for correctly setting up the
5206 * hardware for transmit and receive.
5207 **/
e1000_initialize_hw_bits_ich8lan(struct e1000_hw * hw)5208 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5209 {
5210 u32 reg;
5211
5212 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5213
5214 /* Extended Device Control */
5215 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5216 reg |= (1 << 22);
5217 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
5218 if (hw->mac.type >= e1000_pchlan)
5219 reg |= E1000_CTRL_EXT_PHYPDEN;
5220 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5221
5222 /* Transmit Descriptor Control 0 */
5223 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5224 reg |= (1 << 22);
5225 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5226
5227 /* Transmit Descriptor Control 1 */
5228 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5229 reg |= (1 << 22);
5230 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5231
5232 /* Transmit Arbitration Control 0 */
5233 reg = E1000_READ_REG(hw, E1000_TARC(0));
5234 if (hw->mac.type == e1000_ich8lan)
5235 reg |= (1 << 28) | (1 << 29);
5236 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5237 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5238
5239 /* Transmit Arbitration Control 1 */
5240 reg = E1000_READ_REG(hw, E1000_TARC(1));
5241 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5242 reg &= ~(1 << 28);
5243 else
5244 reg |= (1 << 28);
5245 reg |= (1 << 24) | (1 << 26) | (1 << 30);
5246 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5247
5248 /* Device Status */
5249 if (hw->mac.type == e1000_ich8lan) {
5250 reg = E1000_READ_REG(hw, E1000_STATUS);
5251 reg &= ~(1UL << 31);
5252 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5253 }
5254
5255 /* work-around descriptor data corruption issue during nfs v2 udp
5256 * traffic, just disable the nfs filtering capability
5257 */
5258 reg = E1000_READ_REG(hw, E1000_RFCTL);
5259 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5260
5261 /* Disable IPv6 extension header parsing because some malformed
5262 * IPv6 headers can hang the Rx.
5263 */
5264 if (hw->mac.type == e1000_ich8lan)
5265 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5266 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5267
5268 /* Enable ECC on Lynxpoint */
5269 if (hw->mac.type >= e1000_pch_lpt) {
5270 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5271 reg |= E1000_PBECCSTS_ECC_ENABLE;
5272 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5273
5274 reg = E1000_READ_REG(hw, E1000_CTRL);
5275 reg |= E1000_CTRL_MEHE;
5276 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5277 }
5278
5279 return;
5280 }
5281
5282 /**
5283 * e1000_setup_link_ich8lan - Setup flow control and link settings
5284 * @hw: pointer to the HW structure
5285 *
5286 * Determines which flow control settings to use, then configures flow
5287 * control. Calls the appropriate media-specific link configuration
5288 * function. Assuming the adapter has a valid link partner, a valid link
5289 * should be established. Assumes the hardware has previously been reset
5290 * and the transmitter and receiver are not enabled.
5291 **/
e1000_setup_link_ich8lan(struct e1000_hw * hw)5292 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5293 {
5294 s32 ret_val;
5295
5296 DEBUGFUNC("e1000_setup_link_ich8lan");
5297
5298 if (hw->phy.ops.check_reset_block(hw))
5299 return E1000_SUCCESS;
5300
5301 /* ICH parts do not have a word in the NVM to determine
5302 * the default flow control setting, so we explicitly
5303 * set it to full.
5304 */
5305 if (hw->fc.requested_mode == e1000_fc_default)
5306 hw->fc.requested_mode = e1000_fc_full;
5307
5308 /* Save off the requested flow control mode for use later. Depending
5309 * on the link partner's capabilities, we may or may not use this mode.
5310 */
5311 hw->fc.current_mode = hw->fc.requested_mode;
5312
5313 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5314 hw->fc.current_mode);
5315
5316 /* Continue to configure the copper link. */
5317 ret_val = hw->mac.ops.setup_physical_interface(hw);
5318 if (ret_val)
5319 return ret_val;
5320
5321 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5322 if ((hw->phy.type == e1000_phy_82578) ||
5323 (hw->phy.type == e1000_phy_82579) ||
5324 (hw->phy.type == e1000_phy_i217) ||
5325 (hw->phy.type == e1000_phy_82577)) {
5326 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5327
5328 ret_val = hw->phy.ops.write_reg(hw,
5329 PHY_REG(BM_PORT_CTRL_PAGE, 27),
5330 hw->fc.pause_time);
5331 if (ret_val)
5332 return ret_val;
5333 }
5334
5335 return e1000_set_fc_watermarks_generic(hw);
5336 }
5337
5338 /**
5339 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5340 * @hw: pointer to the HW structure
5341 *
5342 * Configures the kumeran interface to the PHY to wait the appropriate time
5343 * when polling the PHY, then call the generic setup_copper_link to finish
5344 * configuring the copper link.
5345 **/
e1000_setup_copper_link_ich8lan(struct e1000_hw * hw)5346 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5347 {
5348 u32 ctrl;
5349 s32 ret_val;
5350 u16 reg_data;
5351
5352 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5353
5354 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5355 ctrl |= E1000_CTRL_SLU;
5356 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5357 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5358
5359 /* Set the mac to wait the maximum time between each iteration
5360 * and increase the max iterations when polling the phy;
5361 * this fixes erroneous timeouts at 10Mbps.
5362 */
5363 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5364 0xFFFF);
5365 if (ret_val)
5366 return ret_val;
5367 ret_val = e1000_read_kmrn_reg_generic(hw,
5368 E1000_KMRNCTRLSTA_INBAND_PARAM,
5369 ®_data);
5370 if (ret_val)
5371 return ret_val;
5372 reg_data |= 0x3F;
5373 ret_val = e1000_write_kmrn_reg_generic(hw,
5374 E1000_KMRNCTRLSTA_INBAND_PARAM,
5375 reg_data);
5376 if (ret_val)
5377 return ret_val;
5378
5379 switch (hw->phy.type) {
5380 case e1000_phy_igp_3:
5381 ret_val = e1000_copper_link_setup_igp(hw);
5382 if (ret_val)
5383 return ret_val;
5384 break;
5385 case e1000_phy_bm:
5386 case e1000_phy_82578:
5387 ret_val = e1000_copper_link_setup_m88(hw);
5388 if (ret_val)
5389 return ret_val;
5390 break;
5391 case e1000_phy_82577:
5392 case e1000_phy_82579:
5393 ret_val = e1000_copper_link_setup_82577(hw);
5394 if (ret_val)
5395 return ret_val;
5396 break;
5397 case e1000_phy_ife:
5398 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5399 ®_data);
5400 if (ret_val)
5401 return ret_val;
5402
5403 reg_data &= ~IFE_PMC_AUTO_MDIX;
5404
5405 switch (hw->phy.mdix) {
5406 case 1:
5407 reg_data &= ~IFE_PMC_FORCE_MDIX;
5408 break;
5409 case 2:
5410 reg_data |= IFE_PMC_FORCE_MDIX;
5411 break;
5412 case 0:
5413 default:
5414 reg_data |= IFE_PMC_AUTO_MDIX;
5415 break;
5416 }
5417 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5418 reg_data);
5419 if (ret_val)
5420 return ret_val;
5421 break;
5422 default:
5423 break;
5424 }
5425
5426 return e1000_setup_copper_link_generic(hw);
5427 }
5428
5429 /**
5430 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5431 * @hw: pointer to the HW structure
5432 *
5433 * Calls the PHY specific link setup function and then calls the
5434 * generic setup_copper_link to finish configuring the link for
5435 * Lynxpoint PCH devices
5436 **/
e1000_setup_copper_link_pch_lpt(struct e1000_hw * hw)5437 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5438 {
5439 u32 ctrl;
5440 s32 ret_val;
5441
5442 DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5443
5444 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5445 ctrl |= E1000_CTRL_SLU;
5446 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5447 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5448
5449 ret_val = e1000_copper_link_setup_82577(hw);
5450 if (ret_val)
5451 return ret_val;
5452
5453 return e1000_setup_copper_link_generic(hw);
5454 }
5455
5456 /**
5457 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5458 * @hw: pointer to the HW structure
5459 * @speed: pointer to store current link speed
5460 * @duplex: pointer to store the current link duplex
5461 *
5462 * Calls the generic get_speed_and_duplex to retrieve the current link
5463 * information and then calls the Kumeran lock loss workaround for links at
5464 * gigabit speeds.
5465 **/
e1000_get_link_up_info_ich8lan(struct e1000_hw * hw,u16 * speed,u16 * duplex)5466 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5467 u16 *duplex)
5468 {
5469 s32 ret_val;
5470
5471 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5472
5473 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5474 if (ret_val)
5475 return ret_val;
5476
5477 if ((hw->mac.type == e1000_ich8lan) &&
5478 (hw->phy.type == e1000_phy_igp_3) &&
5479 (*speed == SPEED_1000)) {
5480 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5481 }
5482
5483 return ret_val;
5484 }
5485
5486 /**
5487 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5488 * @hw: pointer to the HW structure
5489 *
5490 * Work-around for 82566 Kumeran PCS lock loss:
5491 * On link status change (i.e. PCI reset, speed change) and link is up and
5492 * speed is gigabit-
5493 * 0) if workaround is optionally disabled do nothing
5494 * 1) wait 1ms for Kumeran link to come up
5495 * 2) check Kumeran Diagnostic register PCS lock loss bit
5496 * 3) if not set the link is locked (all is good), otherwise...
5497 * 4) reset the PHY
5498 * 5) repeat up to 10 times
5499 * Note: this is only called for IGP3 copper when speed is 1gb.
5500 **/
e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw * hw)5501 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5502 {
5503 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5504 u32 phy_ctrl;
5505 s32 ret_val;
5506 u16 i, data;
5507 bool link;
5508
5509 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5510
5511 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5512 return E1000_SUCCESS;
5513
5514 /* Make sure link is up before proceeding. If not just return.
5515 * Attempting this while link is negotiating fouled up link
5516 * stability
5517 */
5518 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5519 if (!link)
5520 return E1000_SUCCESS;
5521
5522 for (i = 0; i < 10; i++) {
5523 /* read once to clear */
5524 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5525 if (ret_val)
5526 return ret_val;
5527 /* and again to get new status */
5528 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5529 if (ret_val)
5530 return ret_val;
5531
5532 /* check for PCS lock */
5533 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5534 return E1000_SUCCESS;
5535
5536 /* Issue PHY reset */
5537 hw->phy.ops.reset(hw);
5538 msec_delay_irq(5);
5539 }
5540 /* Disable GigE link negotiation */
5541 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5542 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5543 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5544 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5545
5546 /* Call gig speed drop workaround on Gig disable before accessing
5547 * any PHY registers
5548 */
5549 e1000_gig_downshift_workaround_ich8lan(hw);
5550
5551 /* unable to acquire PCS lock */
5552 return -E1000_ERR_PHY;
5553 }
5554
5555 /**
5556 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5557 * @hw: pointer to the HW structure
5558 * @state: boolean value used to set the current Kumeran workaround state
5559 *
5560 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
5561 * /disabled - FALSE).
5562 **/
e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw * hw,bool state)5563 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5564 bool state)
5565 {
5566 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5567
5568 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5569
5570 if (hw->mac.type != e1000_ich8lan) {
5571 DEBUGOUT("Workaround applies to ICH8 only.\n");
5572 return;
5573 }
5574
5575 dev_spec->kmrn_lock_loss_workaround_enabled = state;
5576
5577 return;
5578 }
5579
5580 /**
5581 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5582 * @hw: pointer to the HW structure
5583 *
5584 * Workaround for 82566 power-down on D3 entry:
5585 * 1) disable gigabit link
5586 * 2) write VR power-down enable
5587 * 3) read it back
5588 * Continue if successful, else issue LCD reset and repeat
5589 **/
e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw * hw)5590 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5591 {
5592 u32 reg;
5593 u16 data;
5594 u8 retry = 0;
5595
5596 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5597
5598 if (hw->phy.type != e1000_phy_igp_3)
5599 return;
5600
5601 /* Try the workaround twice (if needed) */
5602 do {
5603 /* Disable link */
5604 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5605 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5606 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5607 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5608
5609 /* Call gig speed drop workaround on Gig disable before
5610 * accessing any PHY registers
5611 */
5612 if (hw->mac.type == e1000_ich8lan)
5613 e1000_gig_downshift_workaround_ich8lan(hw);
5614
5615 /* Write VR power-down enable */
5616 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5617 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5618 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5619 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5620
5621 /* Read it back and test */
5622 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5623 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5624 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5625 break;
5626
5627 /* Issue PHY reset and repeat at most one more time */
5628 reg = E1000_READ_REG(hw, E1000_CTRL);
5629 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5630 retry++;
5631 } while (retry);
5632 }
5633
5634 /**
5635 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5636 * @hw: pointer to the HW structure
5637 *
5638 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5639 * LPLU, Gig disable, MDIC PHY reset):
5640 * 1) Set Kumeran Near-end loopback
5641 * 2) Clear Kumeran Near-end loopback
5642 * Should only be called for ICH8[m] devices with any 1G Phy.
5643 **/
e1000_gig_downshift_workaround_ich8lan(struct e1000_hw * hw)5644 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5645 {
5646 s32 ret_val;
5647 u16 reg_data;
5648
5649 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5650
5651 if ((hw->mac.type != e1000_ich8lan) ||
5652 (hw->phy.type == e1000_phy_ife))
5653 return;
5654
5655 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5656 ®_data);
5657 if (ret_val)
5658 return;
5659 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5660 ret_val = e1000_write_kmrn_reg_generic(hw,
5661 E1000_KMRNCTRLSTA_DIAG_OFFSET,
5662 reg_data);
5663 if (ret_val)
5664 return;
5665 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5666 e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5667 reg_data);
5668 }
5669
5670 /**
5671 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5672 * @hw: pointer to the HW structure
5673 *
5674 * During S0 to Sx transition, it is possible the link remains at gig
5675 * instead of negotiating to a lower speed. Before going to Sx, set
5676 * 'Gig Disable' to force link speed negotiation to a lower speed based on
5677 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
5678 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5679 * needs to be written.
5680 * Parts that support (and are linked to a partner which support) EEE in
5681 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5682 * than 10Mbps w/o EEE.
5683 **/
e1000_suspend_workarounds_ich8lan(struct e1000_hw * hw)5684 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5685 {
5686 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5687 u32 phy_ctrl;
5688 s32 ret_val;
5689
5690 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5691
5692 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5693 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5694
5695 if (hw->phy.type == e1000_phy_i217) {
5696 u16 phy_reg, device_id = hw->device_id;
5697
5698 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5699 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5700 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5701 (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5702 (hw->mac.type >= e1000_pch_spt)) {
5703 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5704
5705 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5706 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5707 }
5708
5709 ret_val = hw->phy.ops.acquire(hw);
5710 if (ret_val)
5711 goto out;
5712
5713 if (!dev_spec->eee_disable) {
5714 u16 eee_advert;
5715
5716 ret_val =
5717 e1000_read_emi_reg_locked(hw,
5718 I217_EEE_ADVERTISEMENT,
5719 &eee_advert);
5720 if (ret_val)
5721 goto release;
5722
5723 /* Disable LPLU if both link partners support 100BaseT
5724 * EEE and 100Full is advertised on both ends of the
5725 * link, and enable Auto Enable LPI since there will
5726 * be no driver to enable LPI while in Sx.
5727 */
5728 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5729 (dev_spec->eee_lp_ability &
5730 I82579_EEE_100_SUPPORTED) &&
5731 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5732 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5733 E1000_PHY_CTRL_NOND0A_LPLU);
5734
5735 /* Set Auto Enable LPI after link up */
5736 hw->phy.ops.read_reg_locked(hw,
5737 I217_LPI_GPIO_CTRL,
5738 &phy_reg);
5739 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5740 hw->phy.ops.write_reg_locked(hw,
5741 I217_LPI_GPIO_CTRL,
5742 phy_reg);
5743 }
5744 }
5745
5746 /* For i217 Intel Rapid Start Technology support,
5747 * when the system is going into Sx and no manageability engine
5748 * is present, the driver must configure proxy to reset only on
5749 * power good. LPI (Low Power Idle) state must also reset only
5750 * on power good, as well as the MTA (Multicast table array).
5751 * The SMBus release must also be disabled on LCD reset.
5752 */
5753 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5754 E1000_ICH_FWSM_FW_VALID)) {
5755 /* Enable proxy to reset only on power good. */
5756 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5757 &phy_reg);
5758 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5759 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5760 phy_reg);
5761
5762 /* Set bit enable LPI (EEE) to reset only on
5763 * power good.
5764 */
5765 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5766 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5767 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5768
5769 /* Disable the SMB release on LCD reset. */
5770 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5771 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5772 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5773 }
5774
5775 /* Enable MTA to reset for Intel Rapid Start Technology
5776 * Support
5777 */
5778 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5779 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5780 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5781
5782 release:
5783 hw->phy.ops.release(hw);
5784 }
5785 out:
5786 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5787
5788 if (hw->mac.type == e1000_ich8lan)
5789 e1000_gig_downshift_workaround_ich8lan(hw);
5790
5791 if (hw->mac.type >= e1000_pchlan) {
5792 e1000_oem_bits_config_ich8lan(hw, FALSE);
5793
5794 /* Reset PHY to activate OEM bits on 82577/8 */
5795 if (hw->mac.type == e1000_pchlan)
5796 e1000_phy_hw_reset_generic(hw);
5797
5798 ret_val = hw->phy.ops.acquire(hw);
5799 if (ret_val)
5800 return;
5801 e1000_write_smbus_addr(hw);
5802 hw->phy.ops.release(hw);
5803 }
5804
5805 return;
5806 }
5807
5808 /**
5809 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5810 * @hw: pointer to the HW structure
5811 *
5812 * During Sx to S0 transitions on non-managed devices or managed devices
5813 * on which PHY resets are not blocked, if the PHY registers cannot be
5814 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
5815 * the PHY.
5816 * On i217, setup Intel Rapid Start Technology.
5817 **/
e1000_resume_workarounds_pchlan(struct e1000_hw * hw)5818 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5819 {
5820 s32 ret_val;
5821
5822 DEBUGFUNC("e1000_resume_workarounds_pchlan");
5823 if (hw->mac.type < e1000_pch2lan)
5824 return E1000_SUCCESS;
5825
5826 ret_val = e1000_init_phy_workarounds_pchlan(hw);
5827 if (ret_val) {
5828 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5829 return ret_val;
5830 }
5831
5832 /* For i217 Intel Rapid Start Technology support when the system
5833 * is transitioning from Sx and no manageability engine is present
5834 * configure SMBus to restore on reset, disable proxy, and enable
5835 * the reset on MTA (Multicast table array).
5836 */
5837 if (hw->phy.type == e1000_phy_i217) {
5838 u16 phy_reg;
5839
5840 ret_val = hw->phy.ops.acquire(hw);
5841 if (ret_val) {
5842 DEBUGOUT("Failed to setup iRST\n");
5843 return ret_val;
5844 }
5845
5846 /* Clear Auto Enable LPI after link up */
5847 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5848 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5849 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5850
5851 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5852 E1000_ICH_FWSM_FW_VALID)) {
5853 /* Restore clear on SMB if no manageability engine
5854 * is present
5855 */
5856 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5857 &phy_reg);
5858 if (ret_val)
5859 goto release;
5860 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5861 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5862
5863 /* Disable Proxy */
5864 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5865 }
5866 /* Enable reset on MTA */
5867 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5868 &phy_reg);
5869 if (ret_val)
5870 goto release;
5871 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5872 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5873 release:
5874 if (ret_val)
5875 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5876 hw->phy.ops.release(hw);
5877 return ret_val;
5878 }
5879 return E1000_SUCCESS;
5880 }
5881
5882 /**
5883 * e1000_cleanup_led_ich8lan - Restore the default LED operation
5884 * @hw: pointer to the HW structure
5885 *
5886 * Return the LED back to the default configuration.
5887 **/
e1000_cleanup_led_ich8lan(struct e1000_hw * hw)5888 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5889 {
5890 DEBUGFUNC("e1000_cleanup_led_ich8lan");
5891
5892 if (hw->phy.type == e1000_phy_ife)
5893 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5894 0);
5895
5896 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5897 return E1000_SUCCESS;
5898 }
5899
5900 /**
5901 * e1000_led_on_ich8lan - Turn LEDs on
5902 * @hw: pointer to the HW structure
5903 *
5904 * Turn on the LEDs.
5905 **/
e1000_led_on_ich8lan(struct e1000_hw * hw)5906 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5907 {
5908 DEBUGFUNC("e1000_led_on_ich8lan");
5909
5910 if (hw->phy.type == e1000_phy_ife)
5911 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5912 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5913
5914 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5915 return E1000_SUCCESS;
5916 }
5917
5918 /**
5919 * e1000_led_off_ich8lan - Turn LEDs off
5920 * @hw: pointer to the HW structure
5921 *
5922 * Turn off the LEDs.
5923 **/
e1000_led_off_ich8lan(struct e1000_hw * hw)5924 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5925 {
5926 DEBUGFUNC("e1000_led_off_ich8lan");
5927
5928 if (hw->phy.type == e1000_phy_ife)
5929 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5930 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5931
5932 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5933 return E1000_SUCCESS;
5934 }
5935
5936 /**
5937 * e1000_setup_led_pchlan - Configures SW controllable LED
5938 * @hw: pointer to the HW structure
5939 *
5940 * This prepares the SW controllable LED for use.
5941 **/
e1000_setup_led_pchlan(struct e1000_hw * hw)5942 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5943 {
5944 DEBUGFUNC("e1000_setup_led_pchlan");
5945
5946 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5947 (u16)hw->mac.ledctl_mode1);
5948 }
5949
5950 /**
5951 * e1000_cleanup_led_pchlan - Restore the default LED operation
5952 * @hw: pointer to the HW structure
5953 *
5954 * Return the LED back to the default configuration.
5955 **/
e1000_cleanup_led_pchlan(struct e1000_hw * hw)5956 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5957 {
5958 DEBUGFUNC("e1000_cleanup_led_pchlan");
5959
5960 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5961 (u16)hw->mac.ledctl_default);
5962 }
5963
5964 /**
5965 * e1000_led_on_pchlan - Turn LEDs on
5966 * @hw: pointer to the HW structure
5967 *
5968 * Turn on the LEDs.
5969 **/
e1000_led_on_pchlan(struct e1000_hw * hw)5970 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5971 {
5972 u16 data = (u16)hw->mac.ledctl_mode2;
5973 u32 i, led;
5974
5975 DEBUGFUNC("e1000_led_on_pchlan");
5976
5977 /* If no link, then turn LED on by setting the invert bit
5978 * for each LED that's mode is "link_up" in ledctl_mode2.
5979 */
5980 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5981 for (i = 0; i < 3; i++) {
5982 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5983 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5984 E1000_LEDCTL_MODE_LINK_UP)
5985 continue;
5986 if (led & E1000_PHY_LED0_IVRT)
5987 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5988 else
5989 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5990 }
5991 }
5992
5993 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5994 }
5995
5996 /**
5997 * e1000_led_off_pchlan - Turn LEDs off
5998 * @hw: pointer to the HW structure
5999 *
6000 * Turn off the LEDs.
6001 **/
e1000_led_off_pchlan(struct e1000_hw * hw)6002 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
6003 {
6004 u16 data = (u16)hw->mac.ledctl_mode1;
6005 u32 i, led;
6006
6007 DEBUGFUNC("e1000_led_off_pchlan");
6008
6009 /* If no link, then turn LED off by clearing the invert bit
6010 * for each LED that's mode is "link_up" in ledctl_mode1.
6011 */
6012 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
6013 for (i = 0; i < 3; i++) {
6014 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
6015 if ((led & E1000_PHY_LED0_MODE_MASK) !=
6016 E1000_LEDCTL_MODE_LINK_UP)
6017 continue;
6018 if (led & E1000_PHY_LED0_IVRT)
6019 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
6020 else
6021 data |= (E1000_PHY_LED0_IVRT << (i * 5));
6022 }
6023 }
6024
6025 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
6026 }
6027
6028 /**
6029 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
6030 * @hw: pointer to the HW structure
6031 *
6032 * Read appropriate register for the config done bit for completion status
6033 * and configure the PHY through s/w for EEPROM-less parts.
6034 *
6035 * NOTE: some silicon which is EEPROM-less will fail trying to read the
6036 * config done bit, so only an error is logged and continues. If we were
6037 * to return with error, EEPROM-less silicon would not be able to be reset
6038 * or change link.
6039 **/
e1000_get_cfg_done_ich8lan(struct e1000_hw * hw)6040 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
6041 {
6042 s32 ret_val = E1000_SUCCESS;
6043 u32 bank = 0;
6044 u32 status;
6045
6046 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
6047
6048 e1000_get_cfg_done_generic(hw);
6049
6050 /* Wait for indication from h/w that it has completed basic config */
6051 if (hw->mac.type >= e1000_ich10lan) {
6052 e1000_lan_init_done_ich8lan(hw);
6053 } else {
6054 ret_val = e1000_get_auto_rd_done_generic(hw);
6055 if (ret_val) {
6056 /* When auto config read does not complete, do not
6057 * return with an error. This can happen in situations
6058 * where there is no eeprom and prevents getting link.
6059 */
6060 DEBUGOUT("Auto Read Done did not complete\n");
6061 ret_val = E1000_SUCCESS;
6062 }
6063 }
6064
6065 /* Clear PHY Reset Asserted bit */
6066 status = E1000_READ_REG(hw, E1000_STATUS);
6067 if (status & E1000_STATUS_PHYRA)
6068 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
6069 else
6070 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
6071
6072 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
6073 if (hw->mac.type <= e1000_ich9lan) {
6074 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
6075 (hw->phy.type == e1000_phy_igp_3)) {
6076 e1000_phy_init_script_igp3(hw);
6077 }
6078 } else {
6079 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
6080 /* Maybe we should do a basic PHY config */
6081 DEBUGOUT("EEPROM not present\n");
6082 ret_val = -E1000_ERR_CONFIG;
6083 }
6084 }
6085
6086 return ret_val;
6087 }
6088
6089 /**
6090 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6091 * @hw: pointer to the HW structure
6092 *
6093 * In the case of a PHY power down to save power, or to turn off link during a
6094 * driver unload, or wake on lan is not enabled, remove the link.
6095 **/
e1000_power_down_phy_copper_ich8lan(struct e1000_hw * hw)6096 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6097 {
6098 /* If the management interface is not enabled, then power down */
6099 if (!(hw->mac.ops.check_mng_mode(hw) ||
6100 hw->phy.ops.check_reset_block(hw)))
6101 e1000_power_down_phy_copper(hw);
6102
6103 return;
6104 }
6105
6106 /**
6107 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6108 * @hw: pointer to the HW structure
6109 *
6110 * Clears hardware counters specific to the silicon family and calls
6111 * clear_hw_cntrs_generic to clear all general purpose counters.
6112 **/
e1000_clear_hw_cntrs_ich8lan(struct e1000_hw * hw)6113 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6114 {
6115 u16 phy_data;
6116 s32 ret_val;
6117
6118 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6119
6120 e1000_clear_hw_cntrs_base_generic(hw);
6121
6122 E1000_READ_REG(hw, E1000_ALGNERRC);
6123 E1000_READ_REG(hw, E1000_RXERRC);
6124 E1000_READ_REG(hw, E1000_TNCRS);
6125 E1000_READ_REG(hw, E1000_CEXTERR);
6126 E1000_READ_REG(hw, E1000_TSCTC);
6127 E1000_READ_REG(hw, E1000_TSCTFC);
6128
6129 E1000_READ_REG(hw, E1000_MGTPRC);
6130 E1000_READ_REG(hw, E1000_MGTPDC);
6131 E1000_READ_REG(hw, E1000_MGTPTC);
6132
6133 E1000_READ_REG(hw, E1000_IAC);
6134 E1000_READ_REG(hw, E1000_ICRXOC);
6135
6136 /* Clear PHY statistics registers */
6137 if ((hw->phy.type == e1000_phy_82578) ||
6138 (hw->phy.type == e1000_phy_82579) ||
6139 (hw->phy.type == e1000_phy_i217) ||
6140 (hw->phy.type == e1000_phy_82577)) {
6141 ret_val = hw->phy.ops.acquire(hw);
6142 if (ret_val)
6143 return;
6144 ret_val = hw->phy.ops.set_page(hw,
6145 HV_STATS_PAGE << IGP_PAGE_SHIFT);
6146 if (ret_val)
6147 goto release;
6148 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6149 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6150 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6151 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6152 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6153 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6154 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6155 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6156 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6157 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6158 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6159 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6160 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6161 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6162 release:
6163 hw->phy.ops.release(hw);
6164 }
6165 }
6166
6167