1 /******************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2001-2020, Intel Corporation
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Redistributions in binary form must reproduce the above copyright
14 notice, this list of conditions and the following disclaimer in the
15 documentation and/or other materials provided with the distribution.
16
17 3. Neither the name of the Intel Corporation nor the names of its
18 contributors may be used to endorse or promote products derived from
19 this software without specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.
32
33 ******************************************************************************/
34
35 /* 82562G 10/100 Network Connection
36 * 82562G-2 10/100 Network Connection
37 * 82562GT 10/100 Network Connection
38 * 82562GT-2 10/100 Network Connection
39 * 82562V 10/100 Network Connection
40 * 82562V-2 10/100 Network Connection
41 * 82566DC-2 Gigabit Network Connection
42 * 82566DC Gigabit Network Connection
43 * 82566DM-2 Gigabit Network Connection
44 * 82566DM Gigabit Network Connection
45 * 82566MC Gigabit Network Connection
46 * 82566MM Gigabit Network Connection
47 * 82567LM Gigabit Network Connection
48 * 82567LF Gigabit Network Connection
49 * 82567V Gigabit Network Connection
50 * 82567LM-2 Gigabit Network Connection
51 * 82567LF-2 Gigabit Network Connection
52 * 82567V-2 Gigabit Network Connection
53 * 82567LF-3 Gigabit Network Connection
54 * 82567LM-3 Gigabit Network Connection
55 * 82567LM-4 Gigabit Network Connection
56 * 82577LM Gigabit Network Connection
57 * 82577LC Gigabit Network Connection
58 * 82578DM Gigabit Network Connection
59 * 82578DC Gigabit Network Connection
60 * 82579LM Gigabit Network Connection
61 * 82579V Gigabit Network Connection
62 * Ethernet Connection I217-LM
63 * Ethernet Connection I217-V
64 * Ethernet Connection I218-V
65 * Ethernet Connection I218-LM
66 * Ethernet Connection (2) I218-LM
67 * Ethernet Connection (2) I218-V
68 * Ethernet Connection (3) I218-LM
69 * Ethernet Connection (3) I218-V
70 */
71
72 #include "e1000_api.h"
73
74 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
75 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
76 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
77 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
78 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
80 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
81 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
82 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
83 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
84 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85 u8 *mc_addr_list,
86 u32 mc_addr_count);
87 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
88 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
89 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
90 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
91 bool active);
92 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
93 bool active);
94 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
95 u16 words, u16 *data);
96 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
97 u16 *data);
98 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
99 u16 words, u16 *data);
100 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
102 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
103 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
104 u16 *data);
105 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
106 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
107 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
108 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
109 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
110 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
111 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
112 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
113 u16 *speed, u16 *duplex);
114 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
115 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
116 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
117 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
118 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
119 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
120 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
121 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
122 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
123 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
124 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
125 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
126 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
127 u32 offset, u8 *data);
128 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
129 u8 size, u16 *data);
130 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
131 u32 *data);
132 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
133 u32 offset, u32 *data);
134 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
135 u32 offset, u32 data);
136 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
137 u32 offset, u32 dword);
138 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
139 u32 offset, u16 *data);
140 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
141 u32 offset, u8 byte);
142 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
143 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
144 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
145 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
146 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
147 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
148 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
149
150 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
151 /* Offset 04h HSFSTS */
152 union ich8_hws_flash_status {
153 struct ich8_hsfsts {
154 u16 flcdone:1; /* bit 0 Flash Cycle Done */
155 u16 flcerr:1; /* bit 1 Flash Cycle Error */
156 u16 dael:1; /* bit 2 Direct Access error Log */
157 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
158 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
159 u16 reserved1:2; /* bit 13:6 Reserved */
160 u16 reserved2:6; /* bit 13:6 Reserved */
161 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
162 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
163 } hsf_status;
164 u16 regval;
165 };
166
167 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
168 /* Offset 06h FLCTL */
169 union ich8_hws_flash_ctrl {
170 struct ich8_hsflctl {
171 u16 flcgo:1; /* 0 Flash Cycle Go */
172 u16 flcycle:2; /* 2:1 Flash Cycle */
173 u16 reserved:5; /* 7:3 Reserved */
174 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
175 u16 flockdn:6; /* 15:10 Reserved */
176 } hsf_ctrl;
177 u16 regval;
178 };
179
180 /* ICH Flash Region Access Permissions */
181 union ich8_hws_flash_regacc {
182 struct ich8_flracc {
183 u32 grra:8; /* 0:7 GbE region Read Access */
184 u32 grwa:8; /* 8:15 GbE region Write Access */
185 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
186 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
187 } hsf_flregacc;
188 u16 regval;
189 };
190
191 /**
192 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
193 * @hw: pointer to the HW structure
194 *
195 * Test access to the PHY registers by reading the PHY ID registers. If
196 * the PHY ID is already known (e.g. resume path) compare it with known ID,
197 * otherwise assume the read PHY ID is correct if it is valid.
198 *
199 * Assumes the sw/fw/hw semaphore is already acquired.
200 **/
e1000_phy_is_accessible_pchlan(struct e1000_hw * hw)201 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
202 {
203 u16 phy_reg = 0;
204 u32 phy_id = 0;
205 s32 ret_val = 0;
206 u16 retry_count;
207 u32 mac_reg = 0;
208
209 for (retry_count = 0; retry_count < 2; retry_count++) {
210 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
211 if (ret_val || (phy_reg == 0xFFFF))
212 continue;
213 phy_id = (u32)(phy_reg << 16);
214
215 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
216 if (ret_val || (phy_reg == 0xFFFF)) {
217 phy_id = 0;
218 continue;
219 }
220 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
221 break;
222 }
223
224 if (hw->phy.id) {
225 if (hw->phy.id == phy_id)
226 goto out;
227 } else if (phy_id) {
228 hw->phy.id = phy_id;
229 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
230 goto out;
231 }
232
233 /* In case the PHY needs to be in mdio slow mode,
234 * set slow mode and try to get the PHY id again.
235 */
236 if (hw->mac.type < e1000_pch_lpt) {
237 hw->phy.ops.release(hw);
238 ret_val = e1000_set_mdio_slow_mode_hv(hw);
239 if (!ret_val)
240 ret_val = e1000_get_phy_id(hw);
241 hw->phy.ops.acquire(hw);
242 }
243
244 if (ret_val)
245 return false;
246 out:
247 if (hw->mac.type >= e1000_pch_lpt) {
248 /* Only unforce SMBus if ME is not active */
249 if (!(E1000_READ_REG(hw, E1000_FWSM) &
250 E1000_ICH_FWSM_FW_VALID)) {
251 /* Unforce SMBus mode in PHY */
252 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
253 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
254 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
255
256 /* Unforce SMBus mode in MAC */
257 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
258 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
259 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
260 }
261 }
262
263 return true;
264 }
265
266 /**
267 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
268 * @hw: pointer to the HW structure
269 *
270 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
271 * used to reset the PHY to a quiescent state when necessary.
272 **/
e1000_toggle_lanphypc_pch_lpt(struct e1000_hw * hw)273 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
274 {
275 u32 mac_reg;
276
277 DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
278
279 /* Set Phy Config Counter to 50msec */
280 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
281 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
282 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
283 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
284
285 /* Toggle LANPHYPC Value bit */
286 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
287 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
288 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
289 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
290 E1000_WRITE_FLUSH(hw);
291 msec_delay(1);
292 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
293 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
294 E1000_WRITE_FLUSH(hw);
295
296 if (hw->mac.type < e1000_pch_lpt) {
297 msec_delay(50);
298 } else {
299 u16 count = 20;
300
301 do {
302 msec_delay(5);
303 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
304 E1000_CTRL_EXT_LPCD) && count--);
305
306 msec_delay(30);
307 }
308 }
309
310 /**
311 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
312 * @hw: pointer to the HW structure
313 *
314 * Workarounds/flow necessary for PHY initialization during driver load
315 * and resume paths.
316 **/
e1000_init_phy_workarounds_pchlan(struct e1000_hw * hw)317 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
318 {
319 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
320 s32 ret_val;
321
322 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
323
324 /* Gate automatic PHY configuration by hardware on managed and
325 * non-managed 82579 and newer adapters.
326 */
327 e1000_gate_hw_phy_config_ich8lan(hw, true);
328
329 /* It is not possible to be certain of the current state of ULP
330 * so forcibly disable it.
331 */
332 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
333 ret_val = e1000_disable_ulp_lpt_lp(hw, true);
334 if (ret_val)
335 ERROR_REPORT("Failed to disable ULP\n");
336
337 ret_val = hw->phy.ops.acquire(hw);
338 if (ret_val) {
339 DEBUGOUT("Failed to initialize PHY flow\n");
340 goto out;
341 }
342
343 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
344 * inaccessible and resetting the PHY is not blocked, toggle the
345 * LANPHYPC Value bit to force the interconnect to PCIe mode.
346 */
347 switch (hw->mac.type) {
348 case e1000_pch_lpt:
349 case e1000_pch_spt:
350 case e1000_pch_cnp:
351 case e1000_pch_tgp:
352 case e1000_pch_adp:
353 case e1000_pch_mtp:
354 case e1000_pch_ptp:
355 if (e1000_phy_is_accessible_pchlan(hw))
356 break;
357
358 /* Before toggling LANPHYPC, see if PHY is accessible by
359 * forcing MAC to SMBus mode first.
360 */
361 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
362 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
363 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
364
365 /* Wait 50 milliseconds for MAC to finish any retries
366 * that it might be trying to perform from previous
367 * attempts to acknowledge any phy read requests.
368 */
369 msec_delay(50);
370
371 /* FALLTHROUGH */
372 case e1000_pch2lan:
373 if (e1000_phy_is_accessible_pchlan(hw))
374 break;
375
376 /* FALLTHROUGH */
377 case e1000_pchlan:
378 if ((hw->mac.type == e1000_pchlan) &&
379 (fwsm & E1000_ICH_FWSM_FW_VALID))
380 break;
381
382 if (hw->phy.ops.check_reset_block(hw)) {
383 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
384 ret_val = -E1000_ERR_PHY;
385 break;
386 }
387
388 /* Toggle LANPHYPC Value bit */
389 e1000_toggle_lanphypc_pch_lpt(hw);
390 if (hw->mac.type >= e1000_pch_lpt) {
391 if (e1000_phy_is_accessible_pchlan(hw))
392 break;
393
394 /* Toggling LANPHYPC brings the PHY out of SMBus mode
395 * so ensure that the MAC is also out of SMBus mode
396 */
397 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
398 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
399 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
400
401 if (e1000_phy_is_accessible_pchlan(hw))
402 break;
403
404 ret_val = -E1000_ERR_PHY;
405 }
406 break;
407 default:
408 break;
409 }
410
411 hw->phy.ops.release(hw);
412 if (!ret_val) {
413
414 /* Check to see if able to reset PHY. Print error if not */
415 if (hw->phy.ops.check_reset_block(hw)) {
416 ERROR_REPORT("Reset blocked by ME\n");
417 goto out;
418 }
419
420 /* Reset the PHY before any access to it. Doing so, ensures
421 * that the PHY is in a known good state before we read/write
422 * PHY registers. The generic reset is sufficient here,
423 * because we haven't determined the PHY type yet.
424 */
425 ret_val = e1000_phy_hw_reset_generic(hw);
426 if (ret_val)
427 goto out;
428
429 /* On a successful reset, possibly need to wait for the PHY
430 * to quiesce to an accessible state before returning control
431 * to the calling function. If the PHY does not quiesce, then
432 * return E1000E_BLK_PHY_RESET, as this is the condition that
433 * the PHY is in.
434 */
435 ret_val = hw->phy.ops.check_reset_block(hw);
436 if (ret_val)
437 ERROR_REPORT("ME blocked access to PHY after reset\n");
438 }
439
440 out:
441 /* Ungate automatic PHY configuration on non-managed 82579 */
442 if ((hw->mac.type == e1000_pch2lan) &&
443 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
444 msec_delay(10);
445 e1000_gate_hw_phy_config_ich8lan(hw, false);
446 }
447
448 return ret_val;
449 }
450
451 /**
452 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
453 * @hw: pointer to the HW structure
454 *
455 * Initialize family-specific PHY parameters and function pointers.
456 **/
e1000_init_phy_params_pchlan(struct e1000_hw * hw)457 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
458 {
459 struct e1000_phy_info *phy = &hw->phy;
460 s32 ret_val;
461
462 DEBUGFUNC("e1000_init_phy_params_pchlan");
463
464 phy->addr = 1;
465 phy->reset_delay_us = 100;
466
467 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
468 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
469 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
470 phy->ops.set_page = e1000_set_page_igp;
471 phy->ops.read_reg = e1000_read_phy_reg_hv;
472 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
473 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
474 phy->ops.release = e1000_release_swflag_ich8lan;
475 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
476 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
477 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
478 phy->ops.write_reg = e1000_write_phy_reg_hv;
479 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
480 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
481 phy->ops.power_up = e1000_power_up_phy_copper;
482 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
483 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
484
485 phy->id = e1000_phy_unknown;
486
487 ret_val = e1000_init_phy_workarounds_pchlan(hw);
488 if (ret_val)
489 return ret_val;
490
491 if (phy->id == e1000_phy_unknown)
492 switch (hw->mac.type) {
493 default:
494 ret_val = e1000_get_phy_id(hw);
495 if (ret_val)
496 return ret_val;
497 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
498 break;
499 /* FALLTHROUGH */
500 case e1000_pch2lan:
501 case e1000_pch_lpt:
502 case e1000_pch_spt:
503 case e1000_pch_cnp:
504 case e1000_pch_tgp:
505 case e1000_pch_adp:
506 case e1000_pch_mtp:
507 case e1000_pch_ptp:
508 /* In case the PHY needs to be in mdio slow mode,
509 * set slow mode and try to get the PHY id again.
510 */
511 ret_val = e1000_set_mdio_slow_mode_hv(hw);
512 if (ret_val)
513 return ret_val;
514 ret_val = e1000_get_phy_id(hw);
515 if (ret_val)
516 return ret_val;
517 break;
518 }
519 phy->type = e1000_get_phy_type_from_id(phy->id);
520
521 switch (phy->type) {
522 case e1000_phy_82577:
523 case e1000_phy_82579:
524 case e1000_phy_i217:
525 phy->ops.check_polarity = e1000_check_polarity_82577;
526 phy->ops.force_speed_duplex =
527 e1000_phy_force_speed_duplex_82577;
528 phy->ops.get_cable_length = e1000_get_cable_length_82577;
529 phy->ops.get_info = e1000_get_phy_info_82577;
530 phy->ops.commit = e1000_phy_sw_reset_generic;
531 break;
532 case e1000_phy_82578:
533 phy->ops.check_polarity = e1000_check_polarity_m88;
534 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
535 phy->ops.get_cable_length = e1000_get_cable_length_m88;
536 phy->ops.get_info = e1000_get_phy_info_m88;
537 break;
538 default:
539 ret_val = -E1000_ERR_PHY;
540 break;
541 }
542
543 return ret_val;
544 }
545
546 /**
547 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
548 * @hw: pointer to the HW structure
549 *
550 * Initialize family-specific PHY parameters and function pointers.
551 **/
e1000_init_phy_params_ich8lan(struct e1000_hw * hw)552 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
553 {
554 struct e1000_phy_info *phy = &hw->phy;
555 s32 ret_val;
556 u16 i = 0;
557
558 DEBUGFUNC("e1000_init_phy_params_ich8lan");
559
560 phy->addr = 1;
561 phy->reset_delay_us = 100;
562
563 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
564 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
565 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
566 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
567 phy->ops.read_reg = e1000_read_phy_reg_igp;
568 phy->ops.release = e1000_release_swflag_ich8lan;
569 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
570 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
571 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
572 phy->ops.write_reg = e1000_write_phy_reg_igp;
573 phy->ops.power_up = e1000_power_up_phy_copper;
574 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
575
576 /* We may need to do this twice - once for IGP and if that fails,
577 * we'll set BM func pointers and try again
578 */
579 ret_val = e1000_determine_phy_address(hw);
580 if (ret_val) {
581 phy->ops.write_reg = e1000_write_phy_reg_bm;
582 phy->ops.read_reg = e1000_read_phy_reg_bm;
583 ret_val = e1000_determine_phy_address(hw);
584 if (ret_val) {
585 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
586 return ret_val;
587 }
588 }
589
590 phy->id = 0;
591 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
592 (i++ < 100)) {
593 msec_delay(1);
594 ret_val = e1000_get_phy_id(hw);
595 if (ret_val)
596 return ret_val;
597 }
598
599 /* Verify phy id */
600 switch (phy->id) {
601 case IGP03E1000_E_PHY_ID:
602 phy->type = e1000_phy_igp_3;
603 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
604 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
605 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
606 phy->ops.get_info = e1000_get_phy_info_igp;
607 phy->ops.check_polarity = e1000_check_polarity_igp;
608 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
609 break;
610 case IFE_E_PHY_ID:
611 case IFE_PLUS_E_PHY_ID:
612 case IFE_C_E_PHY_ID:
613 phy->type = e1000_phy_ife;
614 phy->autoneg_mask = E1000_ALL_NOT_GIG;
615 phy->ops.get_info = e1000_get_phy_info_ife;
616 phy->ops.check_polarity = e1000_check_polarity_ife;
617 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
618 break;
619 case BME1000_E_PHY_ID:
620 phy->type = e1000_phy_bm;
621 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
622 phy->ops.read_reg = e1000_read_phy_reg_bm;
623 phy->ops.write_reg = e1000_write_phy_reg_bm;
624 phy->ops.commit = e1000_phy_sw_reset_generic;
625 phy->ops.get_info = e1000_get_phy_info_m88;
626 phy->ops.check_polarity = e1000_check_polarity_m88;
627 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
628 break;
629 default:
630 return -E1000_ERR_PHY;
631 break;
632 }
633
634 return E1000_SUCCESS;
635 }
636
637 /**
638 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
639 * @hw: pointer to the HW structure
640 *
641 * Initialize family-specific NVM parameters and function
642 * pointers.
643 **/
e1000_init_nvm_params_ich8lan(struct e1000_hw * hw)644 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
645 {
646 struct e1000_nvm_info *nvm = &hw->nvm;
647 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
648 u32 gfpreg, sector_base_addr, sector_end_addr;
649 u16 i;
650 u32 nvm_size;
651
652 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
653
654 nvm->type = e1000_nvm_flash_sw;
655
656 if (hw->mac.type >= e1000_pch_spt) {
657 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
658 * STRAP register. This is because in SPT the GbE Flash region
659 * is no longer accessed through the flash registers. Instead,
660 * the mechanism has changed, and the Flash region access
661 * registers are now implemented in GbE memory space.
662 */
663 nvm->flash_base_addr = 0;
664 nvm_size =
665 (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
666 * NVM_SIZE_MULTIPLIER;
667 nvm->flash_bank_size = nvm_size / 2;
668 /* Adjust to word count */
669 nvm->flash_bank_size /= sizeof(u16);
670 /* Set the base address for flash register access */
671 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
672 } else {
673 /* Can't read flash registers if register set isn't mapped. */
674 if (!hw->flash_address) {
675 DEBUGOUT("ERROR: Flash registers not mapped\n");
676 return -E1000_ERR_CONFIG;
677 }
678
679 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
680
681 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
682 * Add 1 to sector_end_addr since this sector is included in
683 * the overall size.
684 */
685 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
686 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
687
688 /* flash_base_addr is byte-aligned */
689 nvm->flash_base_addr = sector_base_addr
690 << FLASH_SECTOR_ADDR_SHIFT;
691
692 /* find total size of the NVM, then cut in half since the total
693 * size represents two separate NVM banks.
694 */
695 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
696 << FLASH_SECTOR_ADDR_SHIFT);
697 nvm->flash_bank_size /= 2;
698 /* Adjust to word count */
699 nvm->flash_bank_size /= sizeof(u16);
700 }
701
702 nvm->word_size = E1000_SHADOW_RAM_WORDS;
703
704 /* Clear shadow ram */
705 for (i = 0; i < nvm->word_size; i++) {
706 dev_spec->shadow_ram[i].modified = false;
707 dev_spec->shadow_ram[i].value = 0xFFFF;
708 }
709
710 /* Function Pointers */
711 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
712 nvm->ops.release = e1000_release_nvm_ich8lan;
713 if (hw->mac.type >= e1000_pch_spt) {
714 nvm->ops.read = e1000_read_nvm_spt;
715 nvm->ops.update = e1000_update_nvm_checksum_spt;
716 } else {
717 nvm->ops.read = e1000_read_nvm_ich8lan;
718 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
719 }
720 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
721 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
722 nvm->ops.write = e1000_write_nvm_ich8lan;
723
724 return E1000_SUCCESS;
725 }
726
727 /**
728 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
729 * @hw: pointer to the HW structure
730 *
731 * Initialize family-specific MAC parameters and function
732 * pointers.
733 **/
e1000_init_mac_params_ich8lan(struct e1000_hw * hw)734 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
735 {
736 struct e1000_mac_info *mac = &hw->mac;
737
738 DEBUGFUNC("e1000_init_mac_params_ich8lan");
739
740 /* Set media type function pointer */
741 hw->phy.media_type = e1000_media_type_copper;
742
743 /* Set mta register count */
744 mac->mta_reg_count = 32;
745 /* Set rar entry count */
746 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
747 if (mac->type == e1000_ich8lan)
748 mac->rar_entry_count--;
749 /* Set if part includes ASF firmware */
750 mac->asf_firmware_present = true;
751 /* FWSM register */
752 mac->has_fwsm = true;
753 /* ARC subsystem not supported */
754 mac->arc_subsystem_valid = false;
755 /* Adaptive IFS supported */
756 mac->adaptive_ifs = true;
757
758 /* Function pointers */
759
760 /* bus type/speed/width */
761 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
762 /* function id */
763 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
764 /* reset */
765 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
766 /* hw initialization */
767 mac->ops.init_hw = e1000_init_hw_ich8lan;
768 /* link setup */
769 mac->ops.setup_link = e1000_setup_link_ich8lan;
770 /* physical interface setup */
771 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
772 /* check for link */
773 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
774 /* link info */
775 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
776 /* multicast address update */
777 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
778 /* clear hardware counters */
779 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
780
781 /* LED and other operations */
782 switch (mac->type) {
783 case e1000_ich8lan:
784 case e1000_ich9lan:
785 case e1000_ich10lan:
786 /* check management mode */
787 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
788 /* ID LED init */
789 mac->ops.id_led_init = e1000_id_led_init_generic;
790 /* blink LED */
791 mac->ops.blink_led = e1000_blink_led_generic;
792 /* setup LED */
793 mac->ops.setup_led = e1000_setup_led_generic;
794 /* cleanup LED */
795 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
796 /* turn on/off LED */
797 mac->ops.led_on = e1000_led_on_ich8lan;
798 mac->ops.led_off = e1000_led_off_ich8lan;
799 break;
800 case e1000_pch2lan:
801 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
802 mac->ops.rar_set = e1000_rar_set_pch2lan;
803 /* FALLTHROUGH */
804 case e1000_pch_lpt:
805 case e1000_pch_spt:
806 case e1000_pch_cnp:
807 case e1000_pch_tgp:
808 case e1000_pch_adp:
809 case e1000_pch_mtp:
810 case e1000_pch_ptp:
811 /* multicast address update for pch2 */
812 mac->ops.update_mc_addr_list =
813 e1000_update_mc_addr_list_pch2lan;
814 /* FALLTHROUGH */
815 case e1000_pchlan:
816 /* check management mode */
817 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
818 /* ID LED init */
819 mac->ops.id_led_init = e1000_id_led_init_pchlan;
820 /* setup LED */
821 mac->ops.setup_led = e1000_setup_led_pchlan;
822 /* cleanup LED */
823 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
824 /* turn on/off LED */
825 mac->ops.led_on = e1000_led_on_pchlan;
826 mac->ops.led_off = e1000_led_off_pchlan;
827 break;
828 default:
829 break;
830 }
831
832 if (mac->type >= e1000_pch_lpt) {
833 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
834 mac->ops.rar_set = e1000_rar_set_pch_lpt;
835 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
836 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
837 }
838
839 /* Enable PCS Lock-loss workaround for ICH8 */
840 if (mac->type == e1000_ich8lan)
841 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
842
843 return E1000_SUCCESS;
844 }
845
846 /**
847 * __e1000_access_emi_reg_locked - Read/write EMI register
848 * @hw: pointer to the HW structure
849 * @address: EMI address to program
850 * @data: pointer to value to read/write from/to the EMI address
851 * @read: boolean flag to indicate read or write
852 *
853 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
854 **/
__e1000_access_emi_reg_locked(struct e1000_hw * hw,u16 address,u16 * data,bool read)855 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
856 u16 *data, bool read)
857 {
858 s32 ret_val;
859
860 DEBUGFUNC("__e1000_access_emi_reg_locked");
861
862 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
863 if (ret_val)
864 return ret_val;
865
866 if (read)
867 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
868 data);
869 else
870 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
871 *data);
872
873 return ret_val;
874 }
875
876 /**
877 * e1000_read_emi_reg_locked - Read Extended Management Interface register
878 * @hw: pointer to the HW structure
879 * @addr: EMI address to program
880 * @data: value to be read from the EMI address
881 *
882 * Assumes the SW/FW/HW Semaphore is already acquired.
883 **/
e1000_read_emi_reg_locked(struct e1000_hw * hw,u16 addr,u16 * data)884 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
885 {
886 DEBUGFUNC("e1000_read_emi_reg_locked");
887
888 return __e1000_access_emi_reg_locked(hw, addr, data, true);
889 }
890
891 /**
892 * e1000_write_emi_reg_locked - Write Extended Management Interface register
893 * @hw: pointer to the HW structure
894 * @addr: EMI address to program
895 * @data: value to be written to the EMI address
896 *
897 * Assumes the SW/FW/HW Semaphore is already acquired.
898 **/
e1000_write_emi_reg_locked(struct e1000_hw * hw,u16 addr,u16 data)899 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
900 {
901 DEBUGFUNC("e1000_read_emi_reg_locked");
902
903 return __e1000_access_emi_reg_locked(hw, addr, &data, false);
904 }
905
906 /**
907 * e1000_set_eee_pchlan - Enable/disable EEE support
908 * @hw: pointer to the HW structure
909 *
910 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
911 * the link and the EEE capabilities of the link partner. The LPI Control
912 * register bits will remain set only if/when link is up.
913 *
914 * EEE LPI must not be asserted earlier than one second after link is up.
915 * On 82579, EEE LPI should not be enabled until such time otherwise there
916 * can be link issues with some switches. Other devices can have EEE LPI
917 * enabled immediately upon link up since they have a timer in hardware which
918 * prevents LPI from being asserted too early.
919 **/
e1000_set_eee_pchlan(struct e1000_hw * hw)920 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
921 {
922 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
923 s32 ret_val;
924 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
925
926 DEBUGFUNC("e1000_set_eee_pchlan");
927
928 switch (hw->phy.type) {
929 case e1000_phy_82579:
930 lpa = I82579_EEE_LP_ABILITY;
931 pcs_status = I82579_EEE_PCS_STATUS;
932 adv_addr = I82579_EEE_ADVERTISEMENT;
933 break;
934 case e1000_phy_i217:
935 lpa = I217_EEE_LP_ABILITY;
936 pcs_status = I217_EEE_PCS_STATUS;
937 adv_addr = I217_EEE_ADVERTISEMENT;
938 break;
939 default:
940 return E1000_SUCCESS;
941 }
942
943 ret_val = hw->phy.ops.acquire(hw);
944 if (ret_val)
945 return ret_val;
946
947 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
948 if (ret_val)
949 goto release;
950
951 /* Clear bits that enable EEE in various speeds */
952 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
953
954 /* Enable EEE if not disabled by user */
955 if (!dev_spec->eee_disable) {
956 /* Save off link partner's EEE ability */
957 ret_val = e1000_read_emi_reg_locked(hw, lpa,
958 &dev_spec->eee_lp_ability);
959 if (ret_val)
960 goto release;
961
962 /* Read EEE advertisement */
963 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
964 if (ret_val)
965 goto release;
966
967 /* Enable EEE only for speeds in which the link partner is
968 * EEE capable and for which we advertise EEE.
969 */
970 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
971 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
972
973 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
974 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
975 if (data & NWAY_LPAR_100TX_FD_CAPS)
976 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
977 else
978 /* EEE is not supported in 100Half, so ignore
979 * partner's EEE in 100 ability if full-duplex
980 * is not advertised.
981 */
982 dev_spec->eee_lp_ability &=
983 ~I82579_EEE_100_SUPPORTED;
984 }
985 }
986
987 if (hw->phy.type == e1000_phy_82579) {
988 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
989 &data);
990 if (ret_val)
991 goto release;
992
993 data &= ~I82579_LPI_100_PLL_SHUT;
994 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
995 data);
996 }
997
998 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
999 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
1000 if (ret_val)
1001 goto release;
1002
1003 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
1004 release:
1005 hw->phy.ops.release(hw);
1006
1007 return ret_val;
1008 }
1009
1010 /**
1011 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
1012 * @hw: pointer to the HW structure
1013 * @link: link up bool flag
1014 *
1015 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1016 * preventing further DMA write requests. Workaround the issue by disabling
1017 * the de-assertion of the clock request when in 1Gpbs mode.
1018 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1019 * speeds in order to avoid Tx hangs.
1020 **/
e1000_k1_workaround_lpt_lp(struct e1000_hw * hw,bool link)1021 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1022 {
1023 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1024 u32 status = E1000_READ_REG(hw, E1000_STATUS);
1025 s32 ret_val = E1000_SUCCESS;
1026 u16 reg;
1027
1028 if (link && (status & E1000_STATUS_SPEED_1000)) {
1029 ret_val = hw->phy.ops.acquire(hw);
1030 if (ret_val)
1031 return ret_val;
1032
1033 ret_val =
1034 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1035 ®);
1036 if (ret_val)
1037 goto release;
1038
1039 ret_val =
1040 e1000_write_kmrn_reg_locked(hw,
1041 E1000_KMRNCTRLSTA_K1_CONFIG,
1042 reg &
1043 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1044 if (ret_val)
1045 goto release;
1046
1047 usec_delay(10);
1048
1049 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1050 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1051
1052 ret_val =
1053 e1000_write_kmrn_reg_locked(hw,
1054 E1000_KMRNCTRLSTA_K1_CONFIG,
1055 reg);
1056 release:
1057 hw->phy.ops.release(hw);
1058 } else {
1059 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1060 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1061
1062 if ((hw->phy.revision > 5) || !link ||
1063 ((status & E1000_STATUS_SPEED_100) &&
1064 (status & E1000_STATUS_FD)))
1065 goto update_fextnvm6;
1066
1067 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®);
1068 if (ret_val)
1069 return ret_val;
1070
1071 /* Clear link status transmit timeout */
1072 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1073
1074 if (status & E1000_STATUS_SPEED_100) {
1075 /* Set inband Tx timeout to 5x10us for 100Half */
1076 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1077
1078 /* Do not extend the K1 entry latency for 100Half */
1079 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1080 } else {
1081 /* Set inband Tx timeout to 50x10us for 10Full/Half */
1082 reg |= 50 <<
1083 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1084
1085 /* Extend the K1 entry latency for 10 Mbps */
1086 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1087 }
1088
1089 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1090 if (ret_val)
1091 return ret_val;
1092
1093 update_fextnvm6:
1094 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1095 }
1096
1097 return ret_val;
1098 }
1099
e1000_ltr2ns(u16 ltr)1100 static u64 e1000_ltr2ns(u16 ltr)
1101 {
1102 u32 value, scale;
1103
1104 /* Determine the latency in nsec based on the LTR value & scale */
1105 value = ltr & E1000_LTRV_VALUE_MASK;
1106 scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1107
1108 return value * (1ULL << (scale * E1000_LTRV_SCALE_FACTOR));
1109 }
1110
1111 /**
1112 * e1000_platform_pm_pch_lpt - Set platform power management values
1113 * @hw: pointer to the HW structure
1114 * @link: bool indicating link status
1115 *
1116 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1117 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1118 * when link is up (which must not exceed the maximum latency supported
1119 * by the platform), otherwise specify there is no LTR requirement.
1120 * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
1121 * latencies in the LTR Extended Capability Structure in the PCIe Extended
1122 * Capability register set, on this device LTR is set by writing the
1123 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1124 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1125 * message to the PMC.
1126 *
1127 * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1128 * high-water mark.
1129 **/
e1000_platform_pm_pch_lpt(struct e1000_hw * hw,bool link)1130 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1131 {
1132 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1133 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1134 u16 lat_enc = 0; /* latency encoded */
1135 s32 obff_hwm = 0;
1136
1137 DEBUGFUNC("e1000_platform_pm_pch_lpt");
1138
1139 if (link) {
1140 u16 speed, duplex, scale = 0;
1141 u16 max_snoop, max_nosnoop;
1142 u16 max_ltr_enc; /* max LTR latency encoded */
1143 s64 lat_ns;
1144 s64 value;
1145 u32 rxa;
1146
1147 if (!hw->mac.max_frame_size) {
1148 DEBUGOUT("max_frame_size not set.\n");
1149 return -E1000_ERR_CONFIG;
1150 }
1151
1152 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1153 if (!speed) {
1154 DEBUGOUT("Speed not set.\n");
1155 return -E1000_ERR_CONFIG;
1156 }
1157
1158 /* Rx Packet Buffer Allocation size (KB) */
1159 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1160
1161 /* Determine the maximum latency tolerated by the device.
1162 *
1163 * Per the PCIe spec, the tolerated latencies are encoded as
1164 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1165 * a 10-bit value (0-1023) to provide a range from 1 ns to
1166 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
1167 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1168 */
1169 lat_ns = ((s64)rxa * 1024 -
1170 (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1171 if (lat_ns < 0)
1172 lat_ns = 0;
1173 else
1174 lat_ns /= speed;
1175 value = lat_ns;
1176
1177 while (value > E1000_LTRV_VALUE_MASK) {
1178 scale++;
1179 value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1180 }
1181 if (scale > E1000_LTRV_SCALE_MAX) {
1182 DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1183 return -E1000_ERR_CONFIG;
1184 }
1185 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1186
1187 /* Determine the maximum latency tolerated by the platform */
1188 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1189 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1190 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1191
1192 if (lat_enc > max_ltr_enc) {
1193 lat_enc = max_ltr_enc;
1194 lat_ns = e1000_ltr2ns(max_ltr_enc);
1195 }
1196
1197 if (lat_ns) {
1198 lat_ns *= speed * 1000;
1199 lat_ns /= 8;
1200 lat_ns /= 1000000000;
1201 obff_hwm = (s32)(rxa - lat_ns);
1202 }
1203 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1204 DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1205 return -E1000_ERR_CONFIG;
1206 }
1207 }
1208
1209 /* Set Snoop and No-Snoop latencies the same */
1210 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1211 E1000_WRITE_REG(hw, E1000_LTRV, reg);
1212
1213 /* Set OBFF high water mark */
1214 reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1215 reg |= obff_hwm;
1216 E1000_WRITE_REG(hw, E1000_SVT, reg);
1217
1218 /* Enable OBFF */
1219 reg = E1000_READ_REG(hw, E1000_SVCR);
1220 reg |= E1000_SVCR_OFF_EN;
1221 /* Always unblock interrupts to the CPU even when the system is
1222 * in OBFF mode. This ensures that small round-robin traffic
1223 * (like ping) does not get dropped or experience long latency.
1224 */
1225 reg |= E1000_SVCR_OFF_MASKINT;
1226 E1000_WRITE_REG(hw, E1000_SVCR, reg);
1227
1228 return E1000_SUCCESS;
1229 }
1230
1231 /**
1232 * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1233 * @hw: pointer to the HW structure
1234 * @itr: interrupt throttling rate
1235 *
1236 * Configure OBFF with the updated interrupt rate.
1237 **/
e1000_set_obff_timer_pch_lpt(struct e1000_hw * hw,u32 itr)1238 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1239 {
1240 u32 svcr;
1241 s32 timer;
1242
1243 DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1244
1245 /* Convert ITR value into microseconds for OBFF timer */
1246 timer = itr & E1000_ITR_MASK;
1247 timer = (timer * E1000_ITR_MULT) / 1000;
1248
1249 if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1250 DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1251 return -E1000_ERR_CONFIG;
1252 }
1253
1254 svcr = E1000_READ_REG(hw, E1000_SVCR);
1255 svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1256 svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1257 E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1258
1259 return E1000_SUCCESS;
1260 }
1261
1262 /**
1263 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1264 * @hw: pointer to the HW structure
1265 * @to_sx: boolean indicating a system power state transition to Sx
1266 *
1267 * When link is down, configure ULP mode to significantly reduce the power
1268 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
1269 * ME firmware to start the ULP configuration. If not on an ME enabled
1270 * system, configure the ULP mode by software.
1271 */
e1000_enable_ulp_lpt_lp(struct e1000_hw * hw,bool to_sx)1272 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1273 {
1274 u32 mac_reg;
1275 s32 ret_val = E1000_SUCCESS;
1276 u16 phy_reg;
1277 u16 oem_reg = 0;
1278
1279 if ((hw->mac.type < e1000_pch_lpt) ||
1280 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1281 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1282 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1283 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1284 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1285 return 0;
1286
1287 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1288 /* Request ME configure ULP mode in the PHY */
1289 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1290 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1291 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1292
1293 goto out;
1294 }
1295
1296 if (!to_sx) {
1297 int i = 0;
1298 /* Poll up to 5 seconds for Cable Disconnected indication */
1299 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1300 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1301 /* Bail if link is re-acquired */
1302 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1303 return -E1000_ERR_PHY;
1304 if (i++ == 100)
1305 break;
1306
1307 msec_delay(50);
1308 }
1309 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1310 (E1000_READ_REG(hw, E1000_FEXT) &
1311 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1312 i * 50);
1313 if (!(E1000_READ_REG(hw, E1000_FEXT) &
1314 E1000_FEXT_PHY_CABLE_DISCONNECTED))
1315 return 0;
1316 }
1317
1318 ret_val = hw->phy.ops.acquire(hw);
1319 if (ret_val)
1320 goto out;
1321
1322 /* Force SMBus mode in PHY */
1323 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1324 if (ret_val)
1325 goto release;
1326 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1327 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1328
1329 /* Force SMBus mode in MAC */
1330 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1331 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1332 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1333
1334 /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
1335 * LPLU and disable Gig speed when entering ULP
1336 */
1337 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1338 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1339 &oem_reg);
1340 if (ret_val)
1341 goto release;
1342
1343 phy_reg = oem_reg;
1344 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1345
1346 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1347 phy_reg);
1348
1349 if (ret_val)
1350 goto release;
1351 }
1352
1353 /* Set Inband ULP Exit, Reset to SMBus mode and
1354 * Disable SMBus Release on PERST# in PHY
1355 */
1356 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1357 if (ret_val)
1358 goto release;
1359 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1360 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1361 if (to_sx) {
1362 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1363 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1364 else
1365 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1366
1367 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1368 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1369 } else {
1370 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1371 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1372 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1373 }
1374 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1375
1376 /* Set Disable SMBus Release on PERST# in MAC */
1377 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1378 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1379 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1380
1381 /* Commit ULP changes in PHY by starting auto ULP configuration */
1382 phy_reg |= I218_ULP_CONFIG1_START;
1383 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1384
1385 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1386 to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1387 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1388 oem_reg);
1389 if (ret_val)
1390 goto release;
1391 }
1392
1393 release:
1394 hw->phy.ops.release(hw);
1395 out:
1396 if (ret_val)
1397 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1398 else
1399 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1400
1401 return ret_val;
1402 }
1403
1404 /**
1405 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1406 * @hw: pointer to the HW structure
1407 * @force: boolean indicating whether or not to force disabling ULP
1408 *
1409 * Un-configure ULP mode when link is up, the system is transitioned from
1410 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
1411 * system, poll for an indication from ME that ULP has been un-configured.
1412 * If not on an ME enabled system, un-configure the ULP mode by software.
1413 *
1414 * During nominal operation, this function is called when link is acquired
1415 * to disable ULP mode (force=false); otherwise, for example when unloading
1416 * the driver or during Sx->S0 transitions, this is called with force=true
1417 * to forcibly disable ULP.
1418 */
e1000_disable_ulp_lpt_lp(struct e1000_hw * hw,bool force)1419 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1420 {
1421 s32 ret_val = E1000_SUCCESS;
1422 u8 ulp_exit_timeout = 30;
1423 u32 mac_reg;
1424 u16 phy_reg;
1425 int i = 0;
1426
1427 if ((hw->mac.type < e1000_pch_lpt) ||
1428 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1429 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1430 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1431 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1432 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1433 return 0;
1434
1435 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1436 if (force) {
1437 /* Request ME un-configure ULP mode in the PHY */
1438 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1439 mac_reg &= ~E1000_H2ME_ULP;
1440 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1441 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1442 }
1443
1444 if (hw->mac.type == e1000_pch_cnp)
1445 ulp_exit_timeout = 100;
1446
1447 while (E1000_READ_REG(hw, E1000_FWSM) &
1448 E1000_FWSM_ULP_CFG_DONE) {
1449 if (i++ == ulp_exit_timeout) {
1450 ret_val = -E1000_ERR_PHY;
1451 goto out;
1452 }
1453
1454 msec_delay(10);
1455 }
1456 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1457
1458 if (force) {
1459 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1460 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1461 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1462 } else {
1463 /* Clear H2ME.ULP after ME ULP configuration */
1464 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1465 mac_reg &= ~E1000_H2ME_ULP;
1466 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1467 }
1468
1469 goto out;
1470 }
1471
1472 ret_val = hw->phy.ops.acquire(hw);
1473 if (ret_val)
1474 goto out;
1475
1476 if (force)
1477 /* Toggle LANPHYPC Value bit */
1478 e1000_toggle_lanphypc_pch_lpt(hw);
1479
1480 /* Unforce SMBus mode in PHY */
1481 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1482 if (ret_val) {
1483 /* The MAC might be in PCIe mode, so temporarily force to
1484 * SMBus mode in order to access the PHY.
1485 */
1486 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1487 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1488 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1489
1490 msec_delay(50);
1491
1492 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1493 &phy_reg);
1494 if (ret_val)
1495 goto release;
1496 }
1497 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1498 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1499
1500 /* Unforce SMBus mode in MAC */
1501 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1502 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1503 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1504
1505 /* When ULP mode was previously entered, K1 was disabled by the
1506 * hardware. Re-Enable K1 in the PHY when exiting ULP.
1507 */
1508 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1509 if (ret_val)
1510 goto release;
1511 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1512 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1513
1514 /* Clear ULP enabled configuration */
1515 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1516 if (ret_val)
1517 goto release;
1518 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1519 I218_ULP_CONFIG1_STICKY_ULP |
1520 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1521 I218_ULP_CONFIG1_WOL_HOST |
1522 I218_ULP_CONFIG1_INBAND_EXIT |
1523 I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1524 I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1525 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1526 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1527
1528 /* Commit ULP changes by starting auto ULP configuration */
1529 phy_reg |= I218_ULP_CONFIG1_START;
1530 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1531
1532 /* Clear Disable SMBus Release on PERST# in MAC */
1533 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1534 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1535 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1536
1537 release:
1538 hw->phy.ops.release(hw);
1539 if (force) {
1540 hw->phy.ops.reset(hw);
1541 msec_delay(50);
1542 }
1543 out:
1544 if (ret_val)
1545 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1546 else
1547 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1548
1549 return ret_val;
1550 }
1551
1552 /**
1553 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1554 * @hw: pointer to the HW structure
1555 *
1556 * Checks to see of the link status of the hardware has changed. If a
1557 * change in link status has been detected, then we read the PHY registers
1558 * to get the current speed/duplex if link exists.
1559 **/
e1000_check_for_copper_link_ich8lan(struct e1000_hw * hw)1560 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1561 {
1562 struct e1000_mac_info *mac = &hw->mac;
1563 s32 ret_val, tipg_reg = 0;
1564 u16 emi_addr, emi_val = 0;
1565 bool link;
1566 u16 phy_reg;
1567
1568 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1569
1570 /* We only want to go out to the PHY registers to see if Auto-Neg
1571 * has completed and/or if our link status has changed. The
1572 * get_link_status flag is set upon receiving a Link Status
1573 * Change or Rx Sequence Error interrupt.
1574 */
1575 if (!mac->get_link_status)
1576 return E1000_SUCCESS;
1577
1578 /* First we want to see if the MII Status Register reports
1579 * link. If so, then we want to get the current speed/duplex
1580 * of the PHY.
1581 */
1582 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1583 if (ret_val)
1584 return ret_val;
1585
1586 if (hw->mac.type == e1000_pchlan) {
1587 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1588 if (ret_val)
1589 return ret_val;
1590 }
1591
1592 /* When connected at 10Mbps half-duplex, some parts are excessively
1593 * aggressive resulting in many collisions. To avoid this, increase
1594 * the IPG and reduce Rx latency in the PHY.
1595 */
1596 if ((hw->mac.type >= e1000_pch2lan) && link) {
1597 u16 speed, duplex;
1598
1599 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1600 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1601 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1602
1603 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1604 tipg_reg |= 0xFF;
1605 /* Reduce Rx latency in analog PHY */
1606 emi_val = 0;
1607 } else if (hw->mac.type >= e1000_pch_spt &&
1608 duplex == FULL_DUPLEX && speed != SPEED_1000) {
1609 tipg_reg |= 0xC;
1610 emi_val = 1;
1611 } else {
1612 /* Roll back the default values */
1613 tipg_reg |= 0x08;
1614 emi_val = 1;
1615 }
1616
1617 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1618
1619 ret_val = hw->phy.ops.acquire(hw);
1620 if (ret_val)
1621 return ret_val;
1622
1623 if (hw->mac.type == e1000_pch2lan)
1624 emi_addr = I82579_RX_CONFIG;
1625 else
1626 emi_addr = I217_RX_CONFIG;
1627 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1628
1629
1630 if (hw->mac.type >= e1000_pch_lpt) {
1631 hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1632 &phy_reg);
1633 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1634 if (speed == SPEED_100 || speed == SPEED_10)
1635 phy_reg |= 0x3E8;
1636 else
1637 phy_reg |= 0xFA;
1638 hw->phy.ops.write_reg_locked(hw,
1639 I217_PLL_CLOCK_GATE_REG,
1640 phy_reg);
1641
1642 if (speed == SPEED_1000) {
1643 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1644 &phy_reg);
1645
1646 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1647
1648 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1649 phy_reg);
1650 }
1651 }
1652 hw->phy.ops.release(hw);
1653
1654 if (ret_val)
1655 return ret_val;
1656
1657 if (hw->mac.type >= e1000_pch_spt) {
1658 u16 data;
1659 u16 ptr_gap;
1660
1661 if (speed == SPEED_1000) {
1662 ret_val = hw->phy.ops.acquire(hw);
1663 if (ret_val)
1664 return ret_val;
1665
1666 ret_val = hw->phy.ops.read_reg_locked(hw,
1667 PHY_REG(776, 20),
1668 &data);
1669 if (ret_val) {
1670 hw->phy.ops.release(hw);
1671 return ret_val;
1672 }
1673
1674 ptr_gap = (data & (0x3FF << 2)) >> 2;
1675 if (ptr_gap < 0x18) {
1676 data &= ~(0x3FF << 2);
1677 data |= (0x18 << 2);
1678 ret_val =
1679 hw->phy.ops.write_reg_locked(hw,
1680 PHY_REG(776, 20), data);
1681 }
1682 hw->phy.ops.release(hw);
1683 if (ret_val)
1684 return ret_val;
1685 } else {
1686 ret_val = hw->phy.ops.acquire(hw);
1687 if (ret_val)
1688 return ret_val;
1689
1690 ret_val = hw->phy.ops.write_reg_locked(hw,
1691 PHY_REG(776, 20),
1692 0xC023);
1693 hw->phy.ops.release(hw);
1694 if (ret_val)
1695 return ret_val;
1696
1697 }
1698 }
1699 }
1700
1701 /* I217 Packet Loss issue:
1702 * ensure that FEXTNVM4 Beacon Duration is set correctly
1703 * on power up.
1704 * Set the Beacon Duration for I217 to 8 usec
1705 */
1706 if (hw->mac.type >= e1000_pch_lpt) {
1707 u32 mac_reg;
1708
1709 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1710 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1711 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1712 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1713 }
1714
1715 /* Work-around I218 hang issue */
1716 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1717 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1718 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1719 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1720 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1721 if (ret_val)
1722 return ret_val;
1723 }
1724 if (hw->mac.type >= e1000_pch_lpt) {
1725 /* Set platform power management values for
1726 * Latency Tolerance Reporting (LTR)
1727 * Optimized Buffer Flush/Fill (OBFF)
1728 */
1729 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1730 if (ret_val)
1731 return ret_val;
1732 }
1733 /* Clear link partner's EEE ability */
1734 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1735
1736 if (hw->mac.type >= e1000_pch_lpt) {
1737 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1738
1739 if (hw->mac.type == e1000_pch_spt) {
1740 /* FEXTNVM6 K1-off workaround - for SPT only */
1741 u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1742
1743 if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1744 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1745 else
1746 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1747 }
1748
1749 if (hw->dev_spec.ich8lan.disable_k1_off == true)
1750 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1751
1752 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1753
1754 /* Configure K0s minimum time */
1755 e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
1756 }
1757
1758 if (!link)
1759 return E1000_SUCCESS; /* No link detected */
1760
1761 mac->get_link_status = false;
1762
1763 switch (hw->mac.type) {
1764 case e1000_pch2lan:
1765 ret_val = e1000_k1_workaround_lv(hw);
1766 if (ret_val)
1767 return ret_val;
1768 /* FALLTHROUGH */
1769 case e1000_pchlan:
1770 if (hw->phy.type == e1000_phy_82578) {
1771 ret_val = e1000_link_stall_workaround_hv(hw);
1772 if (ret_val)
1773 return ret_val;
1774 }
1775
1776 /* Workaround for PCHx parts in half-duplex:
1777 * Set the number of preambles removed from the packet
1778 * when it is passed from the PHY to the MAC to prevent
1779 * the MAC from misinterpreting the packet type.
1780 */
1781 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1782 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1783
1784 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1785 E1000_STATUS_FD)
1786 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1787
1788 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1789 break;
1790 default:
1791 break;
1792 }
1793
1794 /* Check if there was DownShift, must be checked
1795 * immediately after link-up
1796 */
1797 e1000_check_downshift_generic(hw);
1798
1799 /* Enable/Disable EEE after link up */
1800 if (hw->phy.type > e1000_phy_82579) {
1801 ret_val = e1000_set_eee_pchlan(hw);
1802 if (ret_val)
1803 return ret_val;
1804 }
1805
1806 /* If we are forcing speed/duplex, then we simply return since
1807 * we have already determined whether we have link or not.
1808 */
1809 if (!mac->autoneg)
1810 return -E1000_ERR_CONFIG;
1811
1812 /* Auto-Neg is enabled. Auto Speed Detection takes care
1813 * of MAC speed/duplex configuration. So we only need to
1814 * configure Collision Distance in the MAC.
1815 */
1816 mac->ops.config_collision_dist(hw);
1817
1818 /* Configure Flow Control now that Auto-Neg has completed.
1819 * First, we need to restore the desired flow control
1820 * settings because we may have had to re-autoneg with a
1821 * different link partner.
1822 */
1823 ret_val = e1000_config_fc_after_link_up_generic(hw);
1824 if (ret_val)
1825 DEBUGOUT("Error configuring flow control\n");
1826
1827 return ret_val;
1828 }
1829
1830 /**
1831 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1832 * @hw: pointer to the HW structure
1833 *
1834 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1835 **/
e1000_init_function_pointers_ich8lan(struct e1000_hw * hw)1836 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1837 {
1838 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1839
1840 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1841 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1842 switch (hw->mac.type) {
1843 case e1000_ich8lan:
1844 case e1000_ich9lan:
1845 case e1000_ich10lan:
1846 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1847 break;
1848 case e1000_pchlan:
1849 case e1000_pch2lan:
1850 case e1000_pch_lpt:
1851 case e1000_pch_spt:
1852 case e1000_pch_cnp:
1853 case e1000_pch_tgp:
1854 case e1000_pch_adp:
1855 case e1000_pch_mtp:
1856 case e1000_pch_ptp:
1857 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1858 break;
1859 default:
1860 break;
1861 }
1862 }
1863
1864 /**
1865 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1866 * @hw: pointer to the HW structure
1867 *
1868 * Acquires the mutex for performing NVM operations.
1869 **/
e1000_acquire_nvm_ich8lan(struct e1000_hw * hw)1870 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1871 {
1872 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1873
1874 ASSERT_CTX_LOCK_HELD(hw);
1875
1876 return E1000_SUCCESS;
1877 }
1878
1879 /**
1880 * e1000_release_nvm_ich8lan - Release NVM mutex
1881 * @hw: pointer to the HW structure
1882 *
1883 * Releases the mutex used while performing NVM operations.
1884 **/
e1000_release_nvm_ich8lan(struct e1000_hw * hw)1885 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1886 {
1887 DEBUGFUNC("e1000_release_nvm_ich8lan");
1888
1889 ASSERT_CTX_LOCK_HELD(hw);
1890 }
1891
1892 /**
1893 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1894 * @hw: pointer to the HW structure
1895 *
1896 * Acquires the software control flag for performing PHY and select
1897 * MAC CSR accesses.
1898 **/
e1000_acquire_swflag_ich8lan(struct e1000_hw * hw)1899 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1900 {
1901 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1902 s32 ret_val = E1000_SUCCESS;
1903
1904 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1905
1906 ASSERT_CTX_LOCK_HELD(hw);
1907
1908 while (timeout) {
1909 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1910 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1911 break;
1912
1913 msec_delay_irq(1);
1914 timeout--;
1915 }
1916
1917 if (!timeout) {
1918 DEBUGOUT("SW has already locked the resource.\n");
1919 ret_val = -E1000_ERR_CONFIG;
1920 goto out;
1921 }
1922
1923 timeout = SW_FLAG_TIMEOUT;
1924
1925 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1926 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1927
1928 while (timeout) {
1929 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1930 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1931 break;
1932
1933 msec_delay_irq(1);
1934 timeout--;
1935 }
1936
1937 if (!timeout) {
1938 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1939 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1940 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1941 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1942 ret_val = -E1000_ERR_CONFIG;
1943 goto out;
1944 }
1945
1946 out:
1947 return ret_val;
1948 }
1949
1950 /**
1951 * e1000_release_swflag_ich8lan - Release software control flag
1952 * @hw: pointer to the HW structure
1953 *
1954 * Releases the software control flag for performing PHY and select
1955 * MAC CSR accesses.
1956 **/
e1000_release_swflag_ich8lan(struct e1000_hw * hw)1957 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1958 {
1959 u32 extcnf_ctrl;
1960
1961 DEBUGFUNC("e1000_release_swflag_ich8lan");
1962
1963 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1964
1965 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1966 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1967 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1968 } else {
1969 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1970 }
1971 }
1972
1973 /**
1974 * e1000_check_mng_mode_ich8lan - Checks management mode
1975 * @hw: pointer to the HW structure
1976 *
1977 * This checks if the adapter has any manageability enabled.
1978 * This is a function pointer entry point only called by read/write
1979 * routines for the PHY and NVM parts.
1980 **/
e1000_check_mng_mode_ich8lan(struct e1000_hw * hw)1981 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1982 {
1983 u32 fwsm;
1984
1985 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1986
1987 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1988
1989 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1990 ((fwsm & E1000_FWSM_MODE_MASK) ==
1991 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1992 }
1993
1994 /**
1995 * e1000_check_mng_mode_pchlan - Checks management mode
1996 * @hw: pointer to the HW structure
1997 *
1998 * This checks if the adapter has iAMT enabled.
1999 * This is a function pointer entry point only called by read/write
2000 * routines for the PHY and NVM parts.
2001 **/
e1000_check_mng_mode_pchlan(struct e1000_hw * hw)2002 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
2003 {
2004 u32 fwsm;
2005
2006 DEBUGFUNC("e1000_check_mng_mode_pchlan");
2007
2008 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2009
2010 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
2011 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
2012 }
2013
2014 /**
2015 * e1000_rar_set_pch2lan - Set receive address register
2016 * @hw: pointer to the HW structure
2017 * @addr: pointer to the receive address
2018 * @index: receive address array register
2019 *
2020 * Sets the receive address array register at index to the address passed
2021 * in by addr. For 82579, RAR[0] is the base address register that is to
2022 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
2023 * Use SHRA[0-3] in place of those reserved for ME.
2024 **/
e1000_rar_set_pch2lan(struct e1000_hw * hw,u8 * addr,u32 index)2025 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
2026 {
2027 u32 rar_low, rar_high;
2028
2029 DEBUGFUNC("e1000_rar_set_pch2lan");
2030
2031 /* HW expects these in little endian so we reverse the byte order
2032 * from network order (big endian) to little endian
2033 */
2034 rar_low = ((u32) addr[0] |
2035 ((u32) addr[1] << 8) |
2036 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2037
2038 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2039
2040 /* If MAC address zero, no need to set the AV bit */
2041 if (rar_low || rar_high)
2042 rar_high |= E1000_RAH_AV;
2043
2044 if (index == 0) {
2045 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2046 E1000_WRITE_FLUSH(hw);
2047 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2048 E1000_WRITE_FLUSH(hw);
2049 return E1000_SUCCESS;
2050 }
2051
2052 /* RAR[1-6] are owned by manageability. Skip those and program the
2053 * next address into the SHRA register array.
2054 */
2055 if (index < (u32) (hw->mac.rar_entry_count)) {
2056 s32 ret_val;
2057
2058 ret_val = e1000_acquire_swflag_ich8lan(hw);
2059 if (ret_val)
2060 goto out;
2061
2062 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2063 E1000_WRITE_FLUSH(hw);
2064 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2065 E1000_WRITE_FLUSH(hw);
2066
2067 e1000_release_swflag_ich8lan(hw);
2068
2069 /* verify the register updates */
2070 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2071 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2072 return E1000_SUCCESS;
2073
2074 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2075 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2076 }
2077
2078 out:
2079 DEBUGOUT1("Failed to write receive address at index %d\n", index);
2080 return -E1000_ERR_CONFIG;
2081 }
2082
2083 /**
2084 * e1000_rar_set_pch_lpt - Set receive address registers
2085 * @hw: pointer to the HW structure
2086 * @addr: pointer to the receive address
2087 * @index: receive address array register
2088 *
2089 * Sets the receive address register array at index to the address passed
2090 * in by addr. For LPT, RAR[0] is the base address register that is to
2091 * contain the MAC address. SHRA[0-10] are the shared receive address
2092 * registers that are shared between the Host and manageability engine (ME).
2093 **/
e1000_rar_set_pch_lpt(struct e1000_hw * hw,u8 * addr,u32 index)2094 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2095 {
2096 u32 rar_low, rar_high;
2097 u32 wlock_mac;
2098
2099 DEBUGFUNC("e1000_rar_set_pch_lpt");
2100
2101 /* HW expects these in little endian so we reverse the byte order
2102 * from network order (big endian) to little endian
2103 */
2104 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2105 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2106
2107 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2108
2109 /* If MAC address zero, no need to set the AV bit */
2110 if (rar_low || rar_high)
2111 rar_high |= E1000_RAH_AV;
2112
2113 if (index == 0) {
2114 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2115 E1000_WRITE_FLUSH(hw);
2116 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2117 E1000_WRITE_FLUSH(hw);
2118 return E1000_SUCCESS;
2119 }
2120
2121 /* The manageability engine (ME) can lock certain SHRAR registers that
2122 * it is using - those registers are unavailable for use.
2123 */
2124 if (index < hw->mac.rar_entry_count) {
2125 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2126 E1000_FWSM_WLOCK_MAC_MASK;
2127 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2128
2129 /* Check if all SHRAR registers are locked */
2130 if (wlock_mac == 1)
2131 goto out;
2132
2133 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2134 s32 ret_val;
2135
2136 ret_val = e1000_acquire_swflag_ich8lan(hw);
2137
2138 if (ret_val)
2139 goto out;
2140
2141 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2142 rar_low);
2143 E1000_WRITE_FLUSH(hw);
2144 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2145 rar_high);
2146 E1000_WRITE_FLUSH(hw);
2147
2148 e1000_release_swflag_ich8lan(hw);
2149
2150 /* verify the register updates */
2151 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2152 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2153 return E1000_SUCCESS;
2154 }
2155 }
2156
2157 out:
2158 DEBUGOUT1("Failed to write receive address at index %d\n", index);
2159 return -E1000_ERR_CONFIG;
2160 }
2161
2162 /**
2163 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2164 * @hw: pointer to the HW structure
2165 * @mc_addr_list: array of multicast addresses to program
2166 * @mc_addr_count: number of multicast addresses to program
2167 *
2168 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2169 * The caller must have a packed mc_addr_list of multicast addresses.
2170 **/
e1000_update_mc_addr_list_pch2lan(struct e1000_hw * hw,u8 * mc_addr_list,u32 mc_addr_count)2171 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2172 u8 *mc_addr_list,
2173 u32 mc_addr_count)
2174 {
2175 u16 phy_reg = 0;
2176 int i;
2177 s32 ret_val;
2178
2179 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2180
2181 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2182
2183 ret_val = hw->phy.ops.acquire(hw);
2184 if (ret_val)
2185 return;
2186
2187 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2188 if (ret_val)
2189 goto release;
2190
2191 for (i = 0; i < hw->mac.mta_reg_count; i++) {
2192 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2193 (u16)(hw->mac.mta_shadow[i] &
2194 0xFFFF));
2195 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2196 (u16)((hw->mac.mta_shadow[i] >> 16) &
2197 0xFFFF));
2198 }
2199
2200 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2201
2202 release:
2203 hw->phy.ops.release(hw);
2204 }
2205
2206 /**
2207 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2208 * @hw: pointer to the HW structure
2209 *
2210 * Checks if firmware is blocking the reset of the PHY.
2211 * This is a function pointer entry point only called by
2212 * reset routines.
2213 **/
e1000_check_reset_block_ich8lan(struct e1000_hw * hw)2214 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2215 {
2216 u32 fwsm;
2217 bool blocked = false;
2218 int i = 0;
2219
2220 DEBUGFUNC("e1000_check_reset_block_ich8lan");
2221
2222 do {
2223 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2224 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2225 blocked = true;
2226 msec_delay(10);
2227 continue;
2228 }
2229 blocked = false;
2230 } while (blocked && (i++ < 30));
2231 return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2232 }
2233
2234 /**
2235 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2236 * @hw: pointer to the HW structure
2237 *
2238 * Assumes semaphore already acquired.
2239 *
2240 **/
e1000_write_smbus_addr(struct e1000_hw * hw)2241 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2242 {
2243 u16 phy_data;
2244 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2245 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2246 E1000_STRAP_SMT_FREQ_SHIFT;
2247 s32 ret_val;
2248
2249 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2250
2251 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2252 if (ret_val)
2253 return ret_val;
2254
2255 phy_data &= ~HV_SMB_ADDR_MASK;
2256 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2257 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2258
2259 if (hw->phy.type == e1000_phy_i217) {
2260 /* Restore SMBus frequency */
2261 if (freq--) {
2262 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2263 phy_data |= (freq & (1 << 0)) <<
2264 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2265 phy_data |= (freq & (1 << 1)) <<
2266 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2267 } else {
2268 DEBUGOUT("Unsupported SMB frequency in PHY\n");
2269 }
2270 }
2271
2272 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2273 }
2274
2275 /**
2276 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2277 * @hw: pointer to the HW structure
2278 *
2279 * SW should configure the LCD from the NVM extended configuration region
2280 * as a workaround for certain parts.
2281 **/
e1000_sw_lcd_config_ich8lan(struct e1000_hw * hw)2282 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2283 {
2284 struct e1000_phy_info *phy = &hw->phy;
2285 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2286 s32 ret_val = E1000_SUCCESS;
2287 u16 word_addr, reg_data, reg_addr, phy_page = 0;
2288
2289 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2290
2291 /* Initialize the PHY from the NVM on ICH platforms. This
2292 * is needed due to an issue where the NVM configuration is
2293 * not properly autoloaded after power transitions.
2294 * Therefore, after each PHY reset, we will load the
2295 * configuration data out of the NVM manually.
2296 */
2297 switch (hw->mac.type) {
2298 case e1000_ich8lan:
2299 if (phy->type != e1000_phy_igp_3)
2300 return ret_val;
2301
2302 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2303 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2304 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2305 break;
2306 }
2307 /* FALLTHROUGH */
2308 case e1000_pchlan:
2309 case e1000_pch2lan:
2310 case e1000_pch_lpt:
2311 case e1000_pch_spt:
2312 case e1000_pch_cnp:
2313 case e1000_pch_tgp:
2314 case e1000_pch_adp:
2315 case e1000_pch_mtp:
2316 case e1000_pch_ptp:
2317 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2318 break;
2319 default:
2320 return ret_val;
2321 }
2322
2323 ret_val = hw->phy.ops.acquire(hw);
2324 if (ret_val)
2325 return ret_val;
2326
2327 data = E1000_READ_REG(hw, E1000_FEXTNVM);
2328 if (!(data & sw_cfg_mask))
2329 goto release;
2330
2331 /* Make sure HW does not configure LCD from PHY
2332 * extended configuration before SW configuration
2333 */
2334 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2335 if ((hw->mac.type < e1000_pch2lan) &&
2336 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2337 goto release;
2338
2339 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2340 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2341 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2342 if (!cnf_size)
2343 goto release;
2344
2345 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2346 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2347
2348 if (((hw->mac.type == e1000_pchlan) &&
2349 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2350 (hw->mac.type > e1000_pchlan)) {
2351 /* HW configures the SMBus address and LEDs when the
2352 * OEM and LCD Write Enable bits are set in the NVM.
2353 * When both NVM bits are cleared, SW will configure
2354 * them instead.
2355 */
2356 ret_val = e1000_write_smbus_addr(hw);
2357 if (ret_val)
2358 goto release;
2359
2360 data = E1000_READ_REG(hw, E1000_LEDCTL);
2361 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2362 (u16)data);
2363 if (ret_val)
2364 goto release;
2365 }
2366
2367 /* Configure LCD from extended configuration region. */
2368
2369 /* cnf_base_addr is in DWORD */
2370 word_addr = (u16)(cnf_base_addr << 1);
2371
2372 for (i = 0; i < cnf_size; i++) {
2373 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2374 ®_data);
2375 if (ret_val)
2376 goto release;
2377
2378 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2379 1, ®_addr);
2380 if (ret_val)
2381 goto release;
2382
2383 /* Save off the PHY page for future writes. */
2384 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2385 phy_page = reg_data;
2386 continue;
2387 }
2388
2389 reg_addr &= PHY_REG_MASK;
2390 reg_addr |= phy_page;
2391
2392 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2393 reg_data);
2394 if (ret_val)
2395 goto release;
2396 }
2397
2398 release:
2399 hw->phy.ops.release(hw);
2400 return ret_val;
2401 }
2402
2403 /**
2404 * e1000_k1_gig_workaround_hv - K1 Si workaround
2405 * @hw: pointer to the HW structure
2406 * @link: link up bool flag
2407 *
2408 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2409 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
2410 * If link is down, the function will restore the default K1 setting located
2411 * in the NVM.
2412 **/
e1000_k1_gig_workaround_hv(struct e1000_hw * hw,bool link)2413 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2414 {
2415 s32 ret_val = E1000_SUCCESS;
2416 u16 status_reg = 0;
2417 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2418
2419 DEBUGFUNC("e1000_k1_gig_workaround_hv");
2420
2421 if (hw->mac.type != e1000_pchlan)
2422 return E1000_SUCCESS;
2423
2424 /* Wrap the whole flow with the sw flag */
2425 ret_val = hw->phy.ops.acquire(hw);
2426 if (ret_val)
2427 return ret_val;
2428
2429 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2430 if (link) {
2431 if (hw->phy.type == e1000_phy_82578) {
2432 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2433 &status_reg);
2434 if (ret_val)
2435 goto release;
2436
2437 status_reg &= (BM_CS_STATUS_LINK_UP |
2438 BM_CS_STATUS_RESOLVED |
2439 BM_CS_STATUS_SPEED_MASK);
2440
2441 if (status_reg == (BM_CS_STATUS_LINK_UP |
2442 BM_CS_STATUS_RESOLVED |
2443 BM_CS_STATUS_SPEED_1000))
2444 k1_enable = false;
2445 }
2446
2447 if (hw->phy.type == e1000_phy_82577) {
2448 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2449 &status_reg);
2450 if (ret_val)
2451 goto release;
2452
2453 status_reg &= (HV_M_STATUS_LINK_UP |
2454 HV_M_STATUS_AUTONEG_COMPLETE |
2455 HV_M_STATUS_SPEED_MASK);
2456
2457 if (status_reg == (HV_M_STATUS_LINK_UP |
2458 HV_M_STATUS_AUTONEG_COMPLETE |
2459 HV_M_STATUS_SPEED_1000))
2460 k1_enable = false;
2461 }
2462
2463 /* Link stall fix for link up */
2464 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2465 0x0100);
2466 if (ret_val)
2467 goto release;
2468
2469 } else {
2470 /* Link stall fix for link down */
2471 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2472 0x4100);
2473 if (ret_val)
2474 goto release;
2475 }
2476
2477 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2478
2479 release:
2480 hw->phy.ops.release(hw);
2481
2482 return ret_val;
2483 }
2484
2485 /**
2486 * e1000_configure_k1_ich8lan - Configure K1 power state
2487 * @hw: pointer to the HW structure
2488 * @k1_enable: K1 state to configure
2489 *
2490 * Configure the K1 power state based on the provided parameter.
2491 * Assumes semaphore already acquired.
2492 *
2493 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2494 **/
e1000_configure_k1_ich8lan(struct e1000_hw * hw,bool k1_enable)2495 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2496 {
2497 s32 ret_val;
2498 u32 ctrl_reg = 0;
2499 u32 ctrl_ext = 0;
2500 u32 reg = 0;
2501 u16 kmrn_reg = 0;
2502
2503 DEBUGFUNC("e1000_configure_k1_ich8lan");
2504
2505 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2506 &kmrn_reg);
2507 if (ret_val)
2508 return ret_val;
2509
2510 if (k1_enable)
2511 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2512 else
2513 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2514
2515 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2516 kmrn_reg);
2517 if (ret_val)
2518 return ret_val;
2519
2520 usec_delay(20);
2521 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2522 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2523
2524 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2525 reg |= E1000_CTRL_FRCSPD;
2526 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2527
2528 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2529 E1000_WRITE_FLUSH(hw);
2530 usec_delay(20);
2531 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2532 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2533 E1000_WRITE_FLUSH(hw);
2534 usec_delay(20);
2535
2536 return E1000_SUCCESS;
2537 }
2538
2539 /**
2540 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2541 * @hw: pointer to the HW structure
2542 * @d0_state: boolean if entering d0 or d3 device state
2543 *
2544 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2545 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
2546 * in NVM determines whether HW should configure LPLU and Gbe Disable.
2547 **/
e1000_oem_bits_config_ich8lan(struct e1000_hw * hw,bool d0_state)2548 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2549 {
2550 s32 ret_val = 0;
2551 u32 mac_reg;
2552 u16 oem_reg;
2553
2554 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2555
2556 if (hw->mac.type < e1000_pchlan)
2557 return ret_val;
2558
2559 ret_val = hw->phy.ops.acquire(hw);
2560 if (ret_val)
2561 return ret_val;
2562
2563 if (hw->mac.type == e1000_pchlan) {
2564 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2565 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2566 goto release;
2567 }
2568
2569 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2570 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2571 goto release;
2572
2573 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2574
2575 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2576 if (ret_val)
2577 goto release;
2578
2579 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2580
2581 if (d0_state) {
2582 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2583 oem_reg |= HV_OEM_BITS_GBE_DIS;
2584
2585 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2586 oem_reg |= HV_OEM_BITS_LPLU;
2587 } else {
2588 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2589 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2590 oem_reg |= HV_OEM_BITS_GBE_DIS;
2591
2592 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2593 E1000_PHY_CTRL_NOND0A_LPLU))
2594 oem_reg |= HV_OEM_BITS_LPLU;
2595 }
2596
2597 /* Set Restart auto-neg to activate the bits */
2598 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2599 !hw->phy.ops.check_reset_block(hw))
2600 oem_reg |= HV_OEM_BITS_RESTART_AN;
2601
2602 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2603
2604 release:
2605 hw->phy.ops.release(hw);
2606
2607 return ret_val;
2608 }
2609
2610
2611 /**
2612 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2613 * @hw: pointer to the HW structure
2614 **/
e1000_set_mdio_slow_mode_hv(struct e1000_hw * hw)2615 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2616 {
2617 s32 ret_val;
2618 u16 data;
2619
2620 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2621
2622 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2623 if (ret_val)
2624 return ret_val;
2625
2626 data |= HV_KMRN_MDIO_SLOW;
2627
2628 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2629
2630 return ret_val;
2631 }
2632
2633 /**
2634 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2635 * done after every PHY reset.
2636 * @hw: pointer to the HW structure
2637 **/
e1000_hv_phy_workarounds_ich8lan(struct e1000_hw * hw)2638 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2639 {
2640 s32 ret_val = E1000_SUCCESS;
2641 u16 phy_data;
2642
2643 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2644
2645 if (hw->mac.type != e1000_pchlan)
2646 return E1000_SUCCESS;
2647
2648 /* Set MDIO slow mode before any other MDIO access */
2649 if (hw->phy.type == e1000_phy_82577) {
2650 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2651 if (ret_val)
2652 return ret_val;
2653 }
2654
2655 if (((hw->phy.type == e1000_phy_82577) &&
2656 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2657 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2658 /* Disable generation of early preamble */
2659 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2660 if (ret_val)
2661 return ret_val;
2662
2663 /* Preamble tuning for SSC */
2664 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2665 0xA204);
2666 if (ret_val)
2667 return ret_val;
2668 }
2669
2670 if (hw->phy.type == e1000_phy_82578) {
2671 /* Return registers to default by doing a soft reset then
2672 * writing 0x3140 to the control register.
2673 */
2674 if (hw->phy.revision < 2) {
2675 e1000_phy_sw_reset_generic(hw);
2676 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2677 0x3140);
2678 if (ret_val)
2679 return ret_val;
2680 }
2681 }
2682
2683 /* Select page 0 */
2684 ret_val = hw->phy.ops.acquire(hw);
2685 if (ret_val)
2686 return ret_val;
2687
2688 hw->phy.addr = 1;
2689 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2690 hw->phy.ops.release(hw);
2691 if (ret_val)
2692 return ret_val;
2693
2694 /* Configure the K1 Si workaround during phy reset assuming there is
2695 * link so that it disables K1 if link is in 1Gbps.
2696 */
2697 ret_val = e1000_k1_gig_workaround_hv(hw, true);
2698 if (ret_val)
2699 return ret_val;
2700
2701 /* Workaround for link disconnects on a busy hub in half duplex */
2702 ret_val = hw->phy.ops.acquire(hw);
2703 if (ret_val)
2704 return ret_val;
2705 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2706 if (ret_val)
2707 goto release;
2708 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2709 phy_data & 0x00FF);
2710 if (ret_val)
2711 goto release;
2712
2713 /* set MSE higher to enable link to stay up when noise is high */
2714 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2715 release:
2716 hw->phy.ops.release(hw);
2717
2718 return ret_val;
2719 }
2720
2721 /**
2722 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2723 * @hw: pointer to the HW structure
2724 **/
e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw * hw)2725 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2726 {
2727 u32 mac_reg;
2728 u16 i, phy_reg = 0;
2729 s32 ret_val;
2730
2731 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2732
2733 ret_val = hw->phy.ops.acquire(hw);
2734 if (ret_val)
2735 return;
2736 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2737 if (ret_val)
2738 goto release;
2739
2740 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2741 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2742 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2743 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2744 (u16)(mac_reg & 0xFFFF));
2745 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2746 (u16)((mac_reg >> 16) & 0xFFFF));
2747
2748 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2749 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2750 (u16)(mac_reg & 0xFFFF));
2751 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2752 (u16)((mac_reg & E1000_RAH_AV)
2753 >> 16));
2754 }
2755
2756 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2757
2758 release:
2759 hw->phy.ops.release(hw);
2760 }
2761
e1000_calc_rx_da_crc(u8 mac[])2762 static u32 e1000_calc_rx_da_crc(u8 mac[])
2763 {
2764 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
2765 u32 i, j, mask, crc;
2766
2767 DEBUGFUNC("e1000_calc_rx_da_crc");
2768
2769 crc = 0xffffffff;
2770 for (i = 0; i < 6; i++) {
2771 crc = crc ^ mac[i];
2772 for (j = 8; j > 0; j--) {
2773 mask = (crc & 1) * (-1);
2774 crc = (crc >> 1) ^ (poly & mask);
2775 }
2776 }
2777 return ~crc;
2778 }
2779
2780 /**
2781 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2782 * with 82579 PHY
2783 * @hw: pointer to the HW structure
2784 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
2785 **/
e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw * hw,bool enable)2786 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2787 {
2788 s32 ret_val = E1000_SUCCESS;
2789 u16 phy_reg, data;
2790 u32 mac_reg;
2791 u16 i;
2792
2793 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2794
2795 if (hw->mac.type < e1000_pch2lan)
2796 return E1000_SUCCESS;
2797
2798 /* disable Rx path while enabling/disabling workaround */
2799 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2800 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2801 phy_reg | (1 << 14));
2802 if (ret_val)
2803 return ret_val;
2804
2805 if (enable) {
2806 /* Write Rx addresses (rar_entry_count for RAL/H, and
2807 * SHRAL/H) and initial CRC values to the MAC
2808 */
2809 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2810 u8 mac_addr[ETHER_ADDR_LEN] = {0};
2811 u32 addr_high, addr_low;
2812
2813 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2814 if (!(addr_high & E1000_RAH_AV))
2815 continue;
2816 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2817 mac_addr[0] = (addr_low & 0xFF);
2818 mac_addr[1] = ((addr_low >> 8) & 0xFF);
2819 mac_addr[2] = ((addr_low >> 16) & 0xFF);
2820 mac_addr[3] = ((addr_low >> 24) & 0xFF);
2821 mac_addr[4] = (addr_high & 0xFF);
2822 mac_addr[5] = ((addr_high >> 8) & 0xFF);
2823
2824 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2825 e1000_calc_rx_da_crc(mac_addr));
2826 }
2827
2828 /* Write Rx addresses to the PHY */
2829 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2830
2831 /* Enable jumbo frame workaround in the MAC */
2832 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2833 mac_reg &= ~(1 << 14);
2834 mac_reg |= (7 << 15);
2835 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2836
2837 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2838 mac_reg |= E1000_RCTL_SECRC;
2839 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2840
2841 ret_val = e1000_read_kmrn_reg_generic(hw,
2842 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2843 &data);
2844 if (ret_val)
2845 return ret_val;
2846 ret_val = e1000_write_kmrn_reg_generic(hw,
2847 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2848 data | (1 << 0));
2849 if (ret_val)
2850 return ret_val;
2851 ret_val = e1000_read_kmrn_reg_generic(hw,
2852 E1000_KMRNCTRLSTA_HD_CTRL,
2853 &data);
2854 if (ret_val)
2855 return ret_val;
2856 data &= ~(0xF << 8);
2857 data |= (0xB << 8);
2858 ret_val = e1000_write_kmrn_reg_generic(hw,
2859 E1000_KMRNCTRLSTA_HD_CTRL,
2860 data);
2861 if (ret_val)
2862 return ret_val;
2863
2864 /* Enable jumbo frame workaround in the PHY */
2865 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2866 data &= ~(0x7F << 5);
2867 data |= (0x37 << 5);
2868 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2869 if (ret_val)
2870 return ret_val;
2871 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2872 data &= ~(1 << 13);
2873 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2874 if (ret_val)
2875 return ret_val;
2876 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2877 data &= ~(0x3FF << 2);
2878 data |= (E1000_TX_PTR_GAP << 2);
2879 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2880 if (ret_val)
2881 return ret_val;
2882 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2883 if (ret_val)
2884 return ret_val;
2885 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2886 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2887 (1 << 10));
2888 if (ret_val)
2889 return ret_val;
2890 } else {
2891 /* Write MAC register values back to h/w defaults */
2892 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2893 mac_reg &= ~(0xF << 14);
2894 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2895
2896 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2897 mac_reg &= ~E1000_RCTL_SECRC;
2898 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2899
2900 ret_val = e1000_read_kmrn_reg_generic(hw,
2901 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2902 &data);
2903 if (ret_val)
2904 return ret_val;
2905 ret_val = e1000_write_kmrn_reg_generic(hw,
2906 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2907 data & ~(1 << 0));
2908 if (ret_val)
2909 return ret_val;
2910 ret_val = e1000_read_kmrn_reg_generic(hw,
2911 E1000_KMRNCTRLSTA_HD_CTRL,
2912 &data);
2913 if (ret_val)
2914 return ret_val;
2915 data &= ~(0xF << 8);
2916 data |= (0xB << 8);
2917 ret_val = e1000_write_kmrn_reg_generic(hw,
2918 E1000_KMRNCTRLSTA_HD_CTRL,
2919 data);
2920 if (ret_val)
2921 return ret_val;
2922
2923 /* Write PHY register values back to h/w defaults */
2924 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2925 data &= ~(0x7F << 5);
2926 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2927 if (ret_val)
2928 return ret_val;
2929 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2930 data |= (1 << 13);
2931 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2932 if (ret_val)
2933 return ret_val;
2934 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2935 data &= ~(0x3FF << 2);
2936 data |= (0x8 << 2);
2937 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2938 if (ret_val)
2939 return ret_val;
2940 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2941 if (ret_val)
2942 return ret_val;
2943 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2944 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2945 ~(1 << 10));
2946 if (ret_val)
2947 return ret_val;
2948 }
2949
2950 /* re-enable Rx path after enabling/disabling workaround */
2951 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2952 ~(1 << 14));
2953 }
2954
2955 /**
2956 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2957 * done after every PHY reset.
2958 * @hw: pointer to the HW structure
2959 **/
e1000_lv_phy_workarounds_ich8lan(struct e1000_hw * hw)2960 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2961 {
2962 s32 ret_val = E1000_SUCCESS;
2963
2964 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2965
2966 if (hw->mac.type != e1000_pch2lan)
2967 return E1000_SUCCESS;
2968
2969 /* Set MDIO slow mode before any other MDIO access */
2970 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2971 if (ret_val)
2972 return ret_val;
2973
2974 ret_val = hw->phy.ops.acquire(hw);
2975 if (ret_val)
2976 return ret_val;
2977 /* set MSE higher to enable link to stay up when noise is high */
2978 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2979 if (ret_val)
2980 goto release;
2981 /* drop link after 5 times MSE threshold was reached */
2982 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2983 release:
2984 hw->phy.ops.release(hw);
2985
2986 return ret_val;
2987 }
2988
2989 /**
2990 * e1000_k1_gig_workaround_lv - K1 Si workaround
2991 * @hw: pointer to the HW structure
2992 *
2993 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2994 * Disable K1 for 1000 and 100 speeds
2995 **/
e1000_k1_workaround_lv(struct e1000_hw * hw)2996 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2997 {
2998 s32 ret_val = E1000_SUCCESS;
2999 u16 status_reg = 0;
3000
3001 DEBUGFUNC("e1000_k1_workaround_lv");
3002
3003 if (hw->mac.type != e1000_pch2lan)
3004 return E1000_SUCCESS;
3005
3006 /* Set K1 beacon duration based on 10Mbs speed */
3007 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
3008 if (ret_val)
3009 return ret_val;
3010
3011 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
3012 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
3013 if (status_reg &
3014 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
3015 u16 pm_phy_reg;
3016
3017 /* LV 1G/100 Packet drop issue wa */
3018 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
3019 &pm_phy_reg);
3020 if (ret_val)
3021 return ret_val;
3022 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
3023 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
3024 pm_phy_reg);
3025 if (ret_val)
3026 return ret_val;
3027 } else {
3028 u32 mac_reg;
3029 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
3030 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
3031 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
3032 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
3033 }
3034 }
3035
3036 return ret_val;
3037 }
3038
3039 /**
3040 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
3041 * @hw: pointer to the HW structure
3042 * @gate: boolean set to true to gate, false to ungate
3043 *
3044 * Gate/ungate the automatic PHY configuration via hardware; perform
3045 * the configuration via software instead.
3046 **/
e1000_gate_hw_phy_config_ich8lan(struct e1000_hw * hw,bool gate)3047 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
3048 {
3049 u32 extcnf_ctrl;
3050
3051 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3052
3053 if (hw->mac.type < e1000_pch2lan)
3054 return;
3055
3056 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3057
3058 if (gate)
3059 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3060 else
3061 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3062
3063 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3064 }
3065
3066 /**
3067 * e1000_lan_init_done_ich8lan - Check for PHY config completion
3068 * @hw: pointer to the HW structure
3069 *
3070 * Check the appropriate indication the MAC has finished configuring the
3071 * PHY after a software reset.
3072 **/
e1000_lan_init_done_ich8lan(struct e1000_hw * hw)3073 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3074 {
3075 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3076
3077 DEBUGFUNC("e1000_lan_init_done_ich8lan");
3078
3079 /* Wait for basic configuration completes before proceeding */
3080 do {
3081 data = E1000_READ_REG(hw, E1000_STATUS);
3082 data &= E1000_STATUS_LAN_INIT_DONE;
3083 usec_delay(100);
3084 } while ((!data) && --loop);
3085
3086 /* If basic configuration is incomplete before the above loop
3087 * count reaches 0, loading the configuration from NVM will
3088 * leave the PHY in a bad state possibly resulting in no link.
3089 */
3090 if (loop == 0)
3091 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3092
3093 /* Clear the Init Done bit for the next init event */
3094 data = E1000_READ_REG(hw, E1000_STATUS);
3095 data &= ~E1000_STATUS_LAN_INIT_DONE;
3096 E1000_WRITE_REG(hw, E1000_STATUS, data);
3097 }
3098
3099 /**
3100 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3101 * @hw: pointer to the HW structure
3102 **/
e1000_post_phy_reset_ich8lan(struct e1000_hw * hw)3103 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3104 {
3105 s32 ret_val = E1000_SUCCESS;
3106 u16 reg;
3107
3108 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3109
3110 if (hw->phy.ops.check_reset_block(hw))
3111 return E1000_SUCCESS;
3112
3113 /* Allow time for h/w to get to quiescent state after reset */
3114 msec_delay(10);
3115
3116 /* Perform any necessary post-reset workarounds */
3117 switch (hw->mac.type) {
3118 case e1000_pchlan:
3119 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3120 if (ret_val)
3121 return ret_val;
3122 break;
3123 case e1000_pch2lan:
3124 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3125 if (ret_val)
3126 return ret_val;
3127 break;
3128 default:
3129 break;
3130 }
3131
3132 /* Clear the host wakeup bit after lcd reset */
3133 if (hw->mac.type >= e1000_pchlan) {
3134 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
3135 reg &= ~BM_WUC_HOST_WU_BIT;
3136 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3137 }
3138
3139 /* Configure the LCD with the extended configuration region in NVM */
3140 ret_val = e1000_sw_lcd_config_ich8lan(hw);
3141 if (ret_val)
3142 return ret_val;
3143
3144 /* Configure the LCD with the OEM bits in NVM */
3145 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
3146
3147 if (hw->mac.type == e1000_pch2lan) {
3148 /* Ungate automatic PHY configuration on non-managed 82579 */
3149 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3150 E1000_ICH_FWSM_FW_VALID)) {
3151 msec_delay(10);
3152 e1000_gate_hw_phy_config_ich8lan(hw, false);
3153 }
3154
3155 /* Set EEE LPI Update Timer to 200usec */
3156 ret_val = hw->phy.ops.acquire(hw);
3157 if (ret_val)
3158 return ret_val;
3159 ret_val = e1000_write_emi_reg_locked(hw,
3160 I82579_LPI_UPDATE_TIMER,
3161 0x1387);
3162 hw->phy.ops.release(hw);
3163 }
3164
3165 return ret_val;
3166 }
3167
3168 /**
3169 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3170 * @hw: pointer to the HW structure
3171 *
3172 * Resets the PHY
3173 * This is a function pointer entry point called by drivers
3174 * or other shared routines.
3175 **/
e1000_phy_hw_reset_ich8lan(struct e1000_hw * hw)3176 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3177 {
3178 s32 ret_val = E1000_SUCCESS;
3179
3180 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3181
3182 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3183 if ((hw->mac.type == e1000_pch2lan) &&
3184 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3185 e1000_gate_hw_phy_config_ich8lan(hw, true);
3186
3187 ret_val = e1000_phy_hw_reset_generic(hw);
3188 if (ret_val)
3189 return ret_val;
3190
3191 return e1000_post_phy_reset_ich8lan(hw);
3192 }
3193
3194 /**
3195 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3196 * @hw: pointer to the HW structure
3197 * @active: true to enable LPLU, false to disable
3198 *
3199 * Sets the LPLU state according to the active flag. For PCH, if OEM write
3200 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3201 * the phy speed. This function will manually set the LPLU bit and restart
3202 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
3203 * since it configures the same bit.
3204 **/
e1000_set_lplu_state_pchlan(struct e1000_hw * hw,bool active)3205 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3206 {
3207 s32 ret_val;
3208 u16 oem_reg;
3209
3210 DEBUGFUNC("e1000_set_lplu_state_pchlan");
3211 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3212 if (ret_val)
3213 return ret_val;
3214
3215 if (active)
3216 oem_reg |= HV_OEM_BITS_LPLU;
3217 else
3218 oem_reg &= ~HV_OEM_BITS_LPLU;
3219
3220 if (!hw->phy.ops.check_reset_block(hw))
3221 oem_reg |= HV_OEM_BITS_RESTART_AN;
3222
3223 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3224 }
3225
3226 /**
3227 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3228 * @hw: pointer to the HW structure
3229 * @active: true to enable LPLU, false to disable
3230 *
3231 * Sets the LPLU D0 state according to the active flag. When
3232 * activating LPLU this function also disables smart speed
3233 * and vice versa. LPLU will not be activated unless the
3234 * device autonegotiation advertisement meets standards of
3235 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3236 * This is a function pointer entry point only called by
3237 * PHY setup routines.
3238 **/
e1000_set_d0_lplu_state_ich8lan(struct e1000_hw * hw,bool active)3239 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3240 {
3241 struct e1000_phy_info *phy = &hw->phy;
3242 u32 phy_ctrl;
3243 s32 ret_val = E1000_SUCCESS;
3244 u16 data;
3245
3246 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3247
3248 if (phy->type == e1000_phy_ife)
3249 return E1000_SUCCESS;
3250
3251 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3252
3253 if (active) {
3254 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3255 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3256
3257 if (phy->type != e1000_phy_igp_3)
3258 return E1000_SUCCESS;
3259
3260 /* Call gig speed drop workaround on LPLU before accessing
3261 * any PHY registers
3262 */
3263 if (hw->mac.type == e1000_ich8lan)
3264 e1000_gig_downshift_workaround_ich8lan(hw);
3265
3266 /* When LPLU is enabled, we should disable SmartSpeed */
3267 ret_val = phy->ops.read_reg(hw,
3268 IGP01E1000_PHY_PORT_CONFIG,
3269 &data);
3270 if (ret_val)
3271 return ret_val;
3272 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3273 ret_val = phy->ops.write_reg(hw,
3274 IGP01E1000_PHY_PORT_CONFIG,
3275 data);
3276 if (ret_val)
3277 return ret_val;
3278 } else {
3279 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3280 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3281
3282 if (phy->type != e1000_phy_igp_3)
3283 return E1000_SUCCESS;
3284
3285 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3286 * during Dx states where the power conservation is most
3287 * important. During driver activity we should enable
3288 * SmartSpeed, so performance is maintained.
3289 */
3290 if (phy->smart_speed == e1000_smart_speed_on) {
3291 ret_val = phy->ops.read_reg(hw,
3292 IGP01E1000_PHY_PORT_CONFIG,
3293 &data);
3294 if (ret_val)
3295 return ret_val;
3296
3297 data |= IGP01E1000_PSCFR_SMART_SPEED;
3298 ret_val = phy->ops.write_reg(hw,
3299 IGP01E1000_PHY_PORT_CONFIG,
3300 data);
3301 if (ret_val)
3302 return ret_val;
3303 } else if (phy->smart_speed == e1000_smart_speed_off) {
3304 ret_val = phy->ops.read_reg(hw,
3305 IGP01E1000_PHY_PORT_CONFIG,
3306 &data);
3307 if (ret_val)
3308 return ret_val;
3309
3310 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3311 ret_val = phy->ops.write_reg(hw,
3312 IGP01E1000_PHY_PORT_CONFIG,
3313 data);
3314 if (ret_val)
3315 return ret_val;
3316 }
3317 }
3318
3319 return E1000_SUCCESS;
3320 }
3321
3322 /**
3323 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3324 * @hw: pointer to the HW structure
3325 * @active: true to enable LPLU, false to disable
3326 *
3327 * Sets the LPLU D3 state according to the active flag. When
3328 * activating LPLU this function also disables smart speed
3329 * and vice versa. LPLU will not be activated unless the
3330 * device autonegotiation advertisement meets standards of
3331 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3332 * This is a function pointer entry point only called by
3333 * PHY setup routines.
3334 **/
e1000_set_d3_lplu_state_ich8lan(struct e1000_hw * hw,bool active)3335 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3336 {
3337 struct e1000_phy_info *phy = &hw->phy;
3338 u32 phy_ctrl;
3339 s32 ret_val = E1000_SUCCESS;
3340 u16 data;
3341
3342 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3343
3344 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3345
3346 if (!active) {
3347 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3348 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3349
3350 if (phy->type != e1000_phy_igp_3)
3351 return E1000_SUCCESS;
3352
3353 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3354 * during Dx states where the power conservation is most
3355 * important. During driver activity we should enable
3356 * SmartSpeed, so performance is maintained.
3357 */
3358 if (phy->smart_speed == e1000_smart_speed_on) {
3359 ret_val = phy->ops.read_reg(hw,
3360 IGP01E1000_PHY_PORT_CONFIG,
3361 &data);
3362 if (ret_val)
3363 return ret_val;
3364
3365 data |= IGP01E1000_PSCFR_SMART_SPEED;
3366 ret_val = phy->ops.write_reg(hw,
3367 IGP01E1000_PHY_PORT_CONFIG,
3368 data);
3369 if (ret_val)
3370 return ret_val;
3371 } else if (phy->smart_speed == e1000_smart_speed_off) {
3372 ret_val = phy->ops.read_reg(hw,
3373 IGP01E1000_PHY_PORT_CONFIG,
3374 &data);
3375 if (ret_val)
3376 return ret_val;
3377
3378 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3379 ret_val = phy->ops.write_reg(hw,
3380 IGP01E1000_PHY_PORT_CONFIG,
3381 data);
3382 if (ret_val)
3383 return ret_val;
3384 }
3385 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3386 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3387 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3388 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3389 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3390
3391 if (phy->type != e1000_phy_igp_3)
3392 return E1000_SUCCESS;
3393
3394 /* Call gig speed drop workaround on LPLU before accessing
3395 * any PHY registers
3396 */
3397 if (hw->mac.type == e1000_ich8lan)
3398 e1000_gig_downshift_workaround_ich8lan(hw);
3399
3400 /* When LPLU is enabled, we should disable SmartSpeed */
3401 ret_val = phy->ops.read_reg(hw,
3402 IGP01E1000_PHY_PORT_CONFIG,
3403 &data);
3404 if (ret_val)
3405 return ret_val;
3406
3407 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3408 ret_val = phy->ops.write_reg(hw,
3409 IGP01E1000_PHY_PORT_CONFIG,
3410 data);
3411 }
3412
3413 return ret_val;
3414 }
3415
3416 /**
3417 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3418 * @hw: pointer to the HW structure
3419 * @bank: pointer to the variable that returns the active bank
3420 *
3421 * Reads signature byte from the NVM using the flash access registers.
3422 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3423 **/
e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw * hw,u32 * bank)3424 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3425 {
3426 u32 eecd;
3427 struct e1000_nvm_info *nvm = &hw->nvm;
3428 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3429 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3430 u32 nvm_dword = 0;
3431 u8 sig_byte = 0;
3432 s32 ret_val;
3433
3434 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3435
3436 switch (hw->mac.type) {
3437 case e1000_pch_spt:
3438 case e1000_pch_cnp:
3439 case e1000_pch_tgp:
3440 case e1000_pch_adp:
3441 case e1000_pch_mtp:
3442 case e1000_pch_ptp:
3443 bank1_offset = nvm->flash_bank_size;
3444 act_offset = E1000_ICH_NVM_SIG_WORD;
3445
3446 /* set bank to 0 in case flash read fails */
3447 *bank = 0;
3448
3449 /* Check bank 0 */
3450 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3451 &nvm_dword);
3452 if (ret_val)
3453 return ret_val;
3454 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3455 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3456 E1000_ICH_NVM_SIG_VALUE) {
3457 *bank = 0;
3458 return E1000_SUCCESS;
3459 }
3460
3461 /* Check bank 1 */
3462 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3463 bank1_offset,
3464 &nvm_dword);
3465 if (ret_val)
3466 return ret_val;
3467 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3468 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3469 E1000_ICH_NVM_SIG_VALUE) {
3470 *bank = 1;
3471 return E1000_SUCCESS;
3472 }
3473
3474 DEBUGOUT("ERROR: No valid NVM bank present\n");
3475 return -E1000_ERR_NVM;
3476 case e1000_ich8lan:
3477 case e1000_ich9lan:
3478 eecd = E1000_READ_REG(hw, E1000_EECD);
3479 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3480 E1000_EECD_SEC1VAL_VALID_MASK) {
3481 if (eecd & E1000_EECD_SEC1VAL)
3482 *bank = 1;
3483 else
3484 *bank = 0;
3485
3486 return E1000_SUCCESS;
3487 }
3488 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3489 /* FALLTHROUGH */
3490 default:
3491 /* set bank to 0 in case flash read fails */
3492 *bank = 0;
3493
3494 /* Check bank 0 */
3495 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3496 &sig_byte);
3497 if (ret_val)
3498 return ret_val;
3499 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3500 E1000_ICH_NVM_SIG_VALUE) {
3501 *bank = 0;
3502 return E1000_SUCCESS;
3503 }
3504
3505 /* Check bank 1 */
3506 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3507 bank1_offset,
3508 &sig_byte);
3509 if (ret_val)
3510 return ret_val;
3511 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3512 E1000_ICH_NVM_SIG_VALUE) {
3513 *bank = 1;
3514 return E1000_SUCCESS;
3515 }
3516
3517 DEBUGOUT("ERROR: No valid NVM bank present\n");
3518 return -E1000_ERR_NVM;
3519 }
3520 }
3521
3522 /**
3523 * e1000_read_nvm_spt - NVM access for SPT
3524 * @hw: pointer to the HW structure
3525 * @offset: The offset (in bytes) of the word(s) to read.
3526 * @words: Size of data to read in words.
3527 * @data: pointer to the word(s) to read at offset.
3528 *
3529 * Reads a word(s) from the NVM
3530 **/
e1000_read_nvm_spt(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)3531 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3532 u16 *data)
3533 {
3534 struct e1000_nvm_info *nvm = &hw->nvm;
3535 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3536 u32 act_offset;
3537 s32 ret_val = E1000_SUCCESS;
3538 u32 bank = 0;
3539 u32 dword = 0;
3540 u16 offset_to_read;
3541 u16 i;
3542
3543 DEBUGFUNC("e1000_read_nvm_spt");
3544
3545 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3546 (words == 0)) {
3547 DEBUGOUT("nvm parameter(s) out of bounds\n");
3548 ret_val = -E1000_ERR_NVM;
3549 goto out;
3550 }
3551
3552 nvm->ops.acquire(hw);
3553
3554 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3555 if (ret_val != E1000_SUCCESS) {
3556 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3557 bank = 0;
3558 }
3559
3560 act_offset = (bank) ? nvm->flash_bank_size : 0;
3561 act_offset += offset;
3562
3563 ret_val = E1000_SUCCESS;
3564
3565 for (i = 0; i < words; i += 2) {
3566 if (words - i == 1) {
3567 if (dev_spec->shadow_ram[offset + i].modified) {
3568 data[i] =
3569 dev_spec->shadow_ram[offset + i].value;
3570 } else {
3571 offset_to_read = act_offset + i -
3572 ((act_offset + i) % 2);
3573 ret_val =
3574 e1000_read_flash_dword_ich8lan(hw,
3575 offset_to_read,
3576 &dword);
3577 if (ret_val)
3578 break;
3579 if ((act_offset + i) % 2 == 0)
3580 data[i] = (u16)(dword & 0xFFFF);
3581 else
3582 data[i] = (u16)((dword >> 16) & 0xFFFF);
3583 }
3584 } else {
3585 offset_to_read = act_offset + i;
3586 if (!(dev_spec->shadow_ram[offset + i].modified) ||
3587 !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3588 ret_val =
3589 e1000_read_flash_dword_ich8lan(hw,
3590 offset_to_read,
3591 &dword);
3592 if (ret_val)
3593 break;
3594 }
3595 if (dev_spec->shadow_ram[offset + i].modified)
3596 data[i] =
3597 dev_spec->shadow_ram[offset + i].value;
3598 else
3599 data[i] = (u16)(dword & 0xFFFF);
3600 if (dev_spec->shadow_ram[offset + i + 1].modified)
3601 data[i + 1] =
3602 dev_spec->shadow_ram[offset + i + 1].value;
3603 else
3604 data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3605 }
3606 }
3607
3608 nvm->ops.release(hw);
3609
3610 out:
3611 if (ret_val)
3612 DEBUGOUT1("NVM read error: %d\n", ret_val);
3613
3614 return ret_val;
3615 }
3616
3617 /**
3618 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
3619 * @hw: pointer to the HW structure
3620 * @offset: The offset (in bytes) of the word(s) to read.
3621 * @words: Size of data to read in words
3622 * @data: Pointer to the word(s) to read at offset.
3623 *
3624 * Reads a word(s) from the NVM using the flash access registers.
3625 **/
e1000_read_nvm_ich8lan(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)3626 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3627 u16 *data)
3628 {
3629 struct e1000_nvm_info *nvm = &hw->nvm;
3630 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3631 u32 act_offset;
3632 s32 ret_val = E1000_SUCCESS;
3633 u32 bank = 0;
3634 u16 i, word;
3635
3636 DEBUGFUNC("e1000_read_nvm_ich8lan");
3637
3638 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3639 (words == 0)) {
3640 DEBUGOUT("nvm parameter(s) out of bounds\n");
3641 ret_val = -E1000_ERR_NVM;
3642 goto out;
3643 }
3644
3645 nvm->ops.acquire(hw);
3646
3647 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3648 if (ret_val != E1000_SUCCESS) {
3649 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3650 bank = 0;
3651 }
3652
3653 act_offset = (bank) ? nvm->flash_bank_size : 0;
3654 act_offset += offset;
3655
3656 ret_val = E1000_SUCCESS;
3657 for (i = 0; i < words; i++) {
3658 if (dev_spec->shadow_ram[offset + i].modified) {
3659 data[i] = dev_spec->shadow_ram[offset + i].value;
3660 } else {
3661 ret_val = e1000_read_flash_word_ich8lan(hw,
3662 act_offset + i,
3663 &word);
3664 if (ret_val)
3665 break;
3666 data[i] = word;
3667 }
3668 }
3669
3670 nvm->ops.release(hw);
3671
3672 out:
3673 if (ret_val)
3674 DEBUGOUT1("NVM read error: %d\n", ret_val);
3675
3676 return ret_val;
3677 }
3678
3679 /**
3680 * e1000_flash_cycle_init_ich8lan - Initialize flash
3681 * @hw: pointer to the HW structure
3682 *
3683 * This function does initial flash setup so that a new read/write/erase cycle
3684 * can be started.
3685 **/
e1000_flash_cycle_init_ich8lan(struct e1000_hw * hw)3686 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3687 {
3688 union ich8_hws_flash_status hsfsts;
3689 s32 ret_val = -E1000_ERR_NVM;
3690
3691 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3692
3693 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3694
3695 /* Check if the flash descriptor is valid */
3696 if (!hsfsts.hsf_status.fldesvalid) {
3697 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
3698 return -E1000_ERR_NVM;
3699 }
3700
3701 /* Clear FCERR and DAEL in hw status by writing 1 */
3702 hsfsts.hsf_status.flcerr = 1;
3703 hsfsts.hsf_status.dael = 1;
3704 if (hw->mac.type >= e1000_pch_spt)
3705 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3706 hsfsts.regval & 0xFFFF);
3707 else
3708 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3709
3710 /* Either we should have a hardware SPI cycle in progress
3711 * bit to check against, in order to start a new cycle or
3712 * FDONE bit should be changed in the hardware so that it
3713 * is 1 after hardware reset, which can then be used as an
3714 * indication whether a cycle is in progress or has been
3715 * completed.
3716 */
3717
3718 if (!hsfsts.hsf_status.flcinprog) {
3719 /* There is no cycle running at present,
3720 * so we can start a cycle.
3721 * Begin by setting Flash Cycle Done.
3722 */
3723 hsfsts.hsf_status.flcdone = 1;
3724 if (hw->mac.type >= e1000_pch_spt)
3725 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3726 hsfsts.regval & 0xFFFF);
3727 else
3728 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3729 hsfsts.regval);
3730 ret_val = E1000_SUCCESS;
3731 } else {
3732 s32 i;
3733
3734 /* Otherwise poll for sometime so the current
3735 * cycle has a chance to end before giving up.
3736 */
3737 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3738 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3739 ICH_FLASH_HSFSTS);
3740 if (!hsfsts.hsf_status.flcinprog) {
3741 ret_val = E1000_SUCCESS;
3742 break;
3743 }
3744 usec_delay(1);
3745 }
3746 if (ret_val == E1000_SUCCESS) {
3747 /* Successful in waiting for previous cycle to timeout,
3748 * now set the Flash Cycle Done.
3749 */
3750 hsfsts.hsf_status.flcdone = 1;
3751 if (hw->mac.type >= e1000_pch_spt)
3752 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3753 hsfsts.regval & 0xFFFF);
3754 else
3755 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3756 hsfsts.regval);
3757 } else {
3758 DEBUGOUT("Flash controller busy, cannot get access\n");
3759 }
3760 }
3761
3762 return ret_val;
3763 }
3764
3765 /**
3766 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3767 * @hw: pointer to the HW structure
3768 * @timeout: maximum time to wait for completion
3769 *
3770 * This function starts a flash cycle and waits for its completion.
3771 **/
e1000_flash_cycle_ich8lan(struct e1000_hw * hw,u32 timeout)3772 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3773 {
3774 union ich8_hws_flash_ctrl hsflctl;
3775 union ich8_hws_flash_status hsfsts;
3776 u32 i = 0;
3777
3778 DEBUGFUNC("e1000_flash_cycle_ich8lan");
3779
3780 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3781 if (hw->mac.type >= e1000_pch_spt)
3782 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3783 else
3784 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3785 hsflctl.hsf_ctrl.flcgo = 1;
3786
3787 if (hw->mac.type >= e1000_pch_spt)
3788 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3789 hsflctl.regval << 16);
3790 else
3791 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3792
3793 /* wait till FDONE bit is set to 1 */
3794 do {
3795 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3796 if (hsfsts.hsf_status.flcdone)
3797 break;
3798 usec_delay(1);
3799 } while (i++ < timeout);
3800
3801 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3802 return E1000_SUCCESS;
3803
3804 return -E1000_ERR_NVM;
3805 }
3806
3807 /**
3808 * e1000_read_flash_dword_ich8lan - Read dword from flash
3809 * @hw: pointer to the HW structure
3810 * @offset: offset to data location
3811 * @data: pointer to the location for storing the data
3812 *
3813 * Reads the flash dword at offset into data. Offset is converted
3814 * to bytes before read.
3815 **/
e1000_read_flash_dword_ich8lan(struct e1000_hw * hw,u32 offset,u32 * data)3816 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3817 u32 *data)
3818 {
3819 DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3820
3821 if (!data)
3822 return -E1000_ERR_NVM;
3823
3824 /* Must convert word offset into bytes. */
3825 offset <<= 1;
3826
3827 return e1000_read_flash_data32_ich8lan(hw, offset, data);
3828 }
3829
3830 /**
3831 * e1000_read_flash_word_ich8lan - Read word from flash
3832 * @hw: pointer to the HW structure
3833 * @offset: offset to data location
3834 * @data: pointer to the location for storing the data
3835 *
3836 * Reads the flash word at offset into data. Offset is converted
3837 * to bytes before read.
3838 **/
e1000_read_flash_word_ich8lan(struct e1000_hw * hw,u32 offset,u16 * data)3839 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3840 u16 *data)
3841 {
3842 DEBUGFUNC("e1000_read_flash_word_ich8lan");
3843
3844 if (!data)
3845 return -E1000_ERR_NVM;
3846
3847 /* Must convert offset into bytes. */
3848 offset <<= 1;
3849
3850 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3851 }
3852
3853 /**
3854 * e1000_read_flash_byte_ich8lan - Read byte from flash
3855 * @hw: pointer to the HW structure
3856 * @offset: The offset of the byte to read.
3857 * @data: Pointer to a byte to store the value read.
3858 *
3859 * Reads a single byte from the NVM using the flash access registers.
3860 **/
e1000_read_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 * data)3861 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3862 u8 *data)
3863 {
3864 s32 ret_val;
3865 u16 word = 0;
3866
3867 /* In SPT, only 32 bits access is supported,
3868 * so this function should not be called.
3869 */
3870 if (hw->mac.type >= e1000_pch_spt)
3871 return -E1000_ERR_NVM;
3872 else
3873 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3874
3875 if (ret_val)
3876 return ret_val;
3877
3878 *data = (u8)word;
3879
3880 return E1000_SUCCESS;
3881 }
3882
3883 /**
3884 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
3885 * @hw: pointer to the HW structure
3886 * @offset: The offset (in bytes) of the byte or word to read.
3887 * @size: Size of data to read, 1=byte 2=word
3888 * @data: Pointer to the word to store the value read.
3889 *
3890 * Reads a byte or word from the NVM using the flash access registers.
3891 **/
e1000_read_flash_data_ich8lan(struct e1000_hw * hw,u32 offset,u8 size,u16 * data)3892 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3893 u8 size, u16 *data)
3894 {
3895 union ich8_hws_flash_status hsfsts;
3896 union ich8_hws_flash_ctrl hsflctl;
3897 u32 flash_linear_addr;
3898 u32 flash_data = 0;
3899 s32 ret_val = -E1000_ERR_NVM;
3900 u8 count = 0;
3901
3902 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3903
3904 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3905 return -E1000_ERR_NVM;
3906 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3907 hw->nvm.flash_base_addr);
3908
3909 do {
3910 usec_delay(1);
3911 /* Steps */
3912 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3913 if (ret_val != E1000_SUCCESS)
3914 break;
3915 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3916
3917 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3918 hsflctl.hsf_ctrl.fldbcount = size - 1;
3919 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3920 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3921 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3922
3923 ret_val = e1000_flash_cycle_ich8lan(hw,
3924 ICH_FLASH_READ_COMMAND_TIMEOUT);
3925
3926 /* Check if FCERR is set to 1, if set to 1, clear it
3927 * and try the whole sequence a few more times, else
3928 * read in (shift in) the Flash Data0, the order is
3929 * least significant byte first msb to lsb
3930 */
3931 if (ret_val == E1000_SUCCESS) {
3932 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3933 if (size == 1)
3934 *data = (u8)(flash_data & 0x000000FF);
3935 else if (size == 2)
3936 *data = (u16)(flash_data & 0x0000FFFF);
3937 break;
3938 } else {
3939 /* If we've gotten here, then things are probably
3940 * completely hosed, but if the error condition is
3941 * detected, it won't hurt to give it another try...
3942 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3943 */
3944 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3945 ICH_FLASH_HSFSTS);
3946 if (hsfsts.hsf_status.flcerr) {
3947 /* Repeat for some time before giving up. */
3948 continue;
3949 } else if (!hsfsts.hsf_status.flcdone) {
3950 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3951 break;
3952 }
3953 }
3954 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3955
3956 return ret_val;
3957 }
3958
3959 /**
3960 * e1000_read_flash_data32_ich8lan - Read dword from NVM
3961 * @hw: pointer to the HW structure
3962 * @offset: The offset (in bytes) of the dword to read.
3963 * @data: Pointer to the dword to store the value read.
3964 *
3965 * Reads a byte or word from the NVM using the flash access registers.
3966 **/
e1000_read_flash_data32_ich8lan(struct e1000_hw * hw,u32 offset,u32 * data)3967 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3968 u32 *data)
3969 {
3970 union ich8_hws_flash_status hsfsts;
3971 union ich8_hws_flash_ctrl hsflctl;
3972 u32 flash_linear_addr;
3973 s32 ret_val = -E1000_ERR_NVM;
3974 u8 count = 0;
3975
3976 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3977
3978 if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3979 hw->mac.type < e1000_pch_spt)
3980 return -E1000_ERR_NVM;
3981 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3982 hw->nvm.flash_base_addr);
3983
3984 do {
3985 usec_delay(1);
3986 /* Steps */
3987 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3988 if (ret_val != E1000_SUCCESS)
3989 break;
3990 /* In SPT, This register is in Lan memory space, not flash.
3991 * Therefore, only 32 bit access is supported
3992 */
3993 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3994
3995 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3996 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3997 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3998 /* In SPT, This register is in Lan memory space, not flash.
3999 * Therefore, only 32 bit access is supported
4000 */
4001 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4002 (u32)hsflctl.regval << 16);
4003 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4004
4005 ret_val = e1000_flash_cycle_ich8lan(hw,
4006 ICH_FLASH_READ_COMMAND_TIMEOUT);
4007
4008 /* Check if FCERR is set to 1, if set to 1, clear it
4009 * and try the whole sequence a few more times, else
4010 * read in (shift in) the Flash Data0, the order is
4011 * least significant byte first msb to lsb
4012 */
4013 if (ret_val == E1000_SUCCESS) {
4014 *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
4015 break;
4016 } else {
4017 /* If we've gotten here, then things are probably
4018 * completely hosed, but if the error condition is
4019 * detected, it won't hurt to give it another try...
4020 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
4021 */
4022 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4023 ICH_FLASH_HSFSTS);
4024 if (hsfsts.hsf_status.flcerr) {
4025 /* Repeat for some time before giving up. */
4026 continue;
4027 } else if (!hsfsts.hsf_status.flcdone) {
4028 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4029 break;
4030 }
4031 }
4032 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4033
4034 return ret_val;
4035 }
4036
4037 /**
4038 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
4039 * @hw: pointer to the HW structure
4040 * @offset: The offset (in bytes) of the word(s) to write.
4041 * @words: Size of data to write in words
4042 * @data: Pointer to the word(s) to write at offset.
4043 *
4044 * Writes a byte or word to the NVM using the flash access registers.
4045 **/
e1000_write_nvm_ich8lan(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)4046 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
4047 u16 *data)
4048 {
4049 struct e1000_nvm_info *nvm = &hw->nvm;
4050 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4051 u16 i;
4052
4053 DEBUGFUNC("e1000_write_nvm_ich8lan");
4054
4055 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
4056 (words == 0)) {
4057 DEBUGOUT("nvm parameter(s) out of bounds\n");
4058 return -E1000_ERR_NVM;
4059 }
4060
4061 nvm->ops.acquire(hw);
4062
4063 for (i = 0; i < words; i++) {
4064 dev_spec->shadow_ram[offset + i].modified = true;
4065 dev_spec->shadow_ram[offset + i].value = data[i];
4066 }
4067
4068 nvm->ops.release(hw);
4069
4070 return E1000_SUCCESS;
4071 }
4072
4073 /**
4074 * e1000_update_nvm_checksum_spt - Update the checksum for NVM
4075 * @hw: pointer to the HW structure
4076 *
4077 * The NVM checksum is updated by calling the generic update_nvm_checksum,
4078 * which writes the checksum to the shadow ram. The changes in the shadow
4079 * ram are then committed to the EEPROM by processing each bank at a time
4080 * checking for the modified bit and writing only the pending changes.
4081 * After a successful commit, the shadow ram is cleared and is ready for
4082 * future writes.
4083 **/
e1000_update_nvm_checksum_spt(struct e1000_hw * hw)4084 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4085 {
4086 struct e1000_nvm_info *nvm = &hw->nvm;
4087 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4088 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4089 s32 ret_val;
4090 u32 dword = 0;
4091
4092 DEBUGFUNC("e1000_update_nvm_checksum_spt");
4093
4094 ret_val = e1000_update_nvm_checksum_generic(hw);
4095 if (ret_val)
4096 goto out;
4097
4098 if (nvm->type != e1000_nvm_flash_sw)
4099 goto out;
4100
4101 nvm->ops.acquire(hw);
4102
4103 /* We're writing to the opposite bank so if we're on bank 1,
4104 * write to bank 0 etc. We also need to erase the segment that
4105 * is going to be written
4106 */
4107 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4108 if (ret_val != E1000_SUCCESS) {
4109 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4110 bank = 0;
4111 }
4112
4113 if (bank == 0) {
4114 new_bank_offset = nvm->flash_bank_size;
4115 old_bank_offset = 0;
4116 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4117 if (ret_val)
4118 goto release;
4119 } else {
4120 old_bank_offset = nvm->flash_bank_size;
4121 new_bank_offset = 0;
4122 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4123 if (ret_val)
4124 goto release;
4125 }
4126 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4127 /* Determine whether to write the value stored
4128 * in the other NVM bank or a modified value stored
4129 * in the shadow RAM
4130 */
4131 ret_val = e1000_read_flash_dword_ich8lan(hw,
4132 i + old_bank_offset,
4133 &dword);
4134
4135 if (dev_spec->shadow_ram[i].modified) {
4136 dword &= 0xffff0000;
4137 dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4138 }
4139 if (dev_spec->shadow_ram[i + 1].modified) {
4140 dword &= 0x0000ffff;
4141 dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4142 << 16);
4143 }
4144 if (ret_val)
4145 break;
4146
4147 /* If the word is 0x13, then make sure the signature bits
4148 * (15:14) are 11b until the commit has completed.
4149 * This will allow us to write 10b which indicates the
4150 * signature is valid. We want to do this after the write
4151 * has completed so that we don't mark the segment valid
4152 * while the write is still in progress
4153 */
4154 if (i == E1000_ICH_NVM_SIG_WORD - 1)
4155 dword |= E1000_ICH_NVM_SIG_MASK << 16;
4156
4157 /* Convert offset to bytes. */
4158 act_offset = (i + new_bank_offset) << 1;
4159
4160 usec_delay(100);
4161
4162 /* Write the data to the new bank. Offset in words*/
4163 act_offset = i + new_bank_offset;
4164 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4165 dword);
4166 if (ret_val)
4167 break;
4168 }
4169
4170 /* Don't bother writing the segment valid bits if sector
4171 * programming failed.
4172 */
4173 if (ret_val) {
4174 DEBUGOUT("Flash commit failed.\n");
4175 goto release;
4176 }
4177
4178 /* Finally validate the new segment by setting bit 15:14
4179 * to 10b in word 0x13 , this can be done without an
4180 * erase as well since these bits are 11 to start with
4181 * and we need to change bit 14 to 0b
4182 */
4183 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4184
4185 /*offset in words but we read dword*/
4186 --act_offset;
4187 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4188
4189 if (ret_val)
4190 goto release;
4191
4192 dword &= 0xBFFFFFFF;
4193 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4194
4195 if (ret_val)
4196 goto release;
4197
4198 /* offset in words but we read dword*/
4199 act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4200 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4201
4202 if (ret_val)
4203 goto release;
4204
4205 dword &= 0x00FFFFFF;
4206 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4207
4208 if (ret_val)
4209 goto release;
4210
4211 /* Great! Everything worked, we can now clear the cached entries. */
4212 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4213 dev_spec->shadow_ram[i].modified = false;
4214 dev_spec->shadow_ram[i].value = 0xFFFF;
4215 }
4216
4217 release:
4218 nvm->ops.release(hw);
4219
4220 /* Reload the EEPROM, or else modifications will not appear
4221 * until after the next adapter reset.
4222 */
4223 if (!ret_val) {
4224 nvm->ops.reload(hw);
4225 msec_delay(10);
4226 }
4227
4228 out:
4229 if (ret_val)
4230 DEBUGOUT1("NVM update error: %d\n", ret_val);
4231
4232 return ret_val;
4233 }
4234
4235 /**
4236 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4237 * @hw: pointer to the HW structure
4238 *
4239 * The NVM checksum is updated by calling the generic update_nvm_checksum,
4240 * which writes the checksum to the shadow ram. The changes in the shadow
4241 * ram are then committed to the EEPROM by processing each bank at a time
4242 * checking for the modified bit and writing only the pending changes.
4243 * After a successful commit, the shadow ram is cleared and is ready for
4244 * future writes.
4245 **/
e1000_update_nvm_checksum_ich8lan(struct e1000_hw * hw)4246 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4247 {
4248 struct e1000_nvm_info *nvm = &hw->nvm;
4249 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4250 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4251 s32 ret_val;
4252 u16 data = 0;
4253
4254 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4255
4256 ret_val = e1000_update_nvm_checksum_generic(hw);
4257 if (ret_val)
4258 goto out;
4259
4260 if (nvm->type != e1000_nvm_flash_sw)
4261 goto out;
4262
4263 nvm->ops.acquire(hw);
4264
4265 /* We're writing to the opposite bank so if we're on bank 1,
4266 * write to bank 0 etc. We also need to erase the segment that
4267 * is going to be written
4268 */
4269 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4270 if (ret_val != E1000_SUCCESS) {
4271 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4272 bank = 0;
4273 }
4274
4275 if (bank == 0) {
4276 new_bank_offset = nvm->flash_bank_size;
4277 old_bank_offset = 0;
4278 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4279 if (ret_val)
4280 goto release;
4281 } else {
4282 old_bank_offset = nvm->flash_bank_size;
4283 new_bank_offset = 0;
4284 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4285 if (ret_val)
4286 goto release;
4287 }
4288 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4289 if (dev_spec->shadow_ram[i].modified) {
4290 data = dev_spec->shadow_ram[i].value;
4291 } else {
4292 ret_val = e1000_read_flash_word_ich8lan(hw, i +
4293 old_bank_offset,
4294 &data);
4295 if (ret_val)
4296 break;
4297 }
4298 /* If the word is 0x13, then make sure the signature bits
4299 * (15:14) are 11b until the commit has completed.
4300 * This will allow us to write 10b which indicates the
4301 * signature is valid. We want to do this after the write
4302 * has completed so that we don't mark the segment valid
4303 * while the write is still in progress
4304 */
4305 if (i == E1000_ICH_NVM_SIG_WORD)
4306 data |= E1000_ICH_NVM_SIG_MASK;
4307
4308 /* Convert offset to bytes. */
4309 act_offset = (i + new_bank_offset) << 1;
4310
4311 usec_delay(100);
4312
4313 /* Write the bytes to the new bank. */
4314 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4315 act_offset,
4316 (u8)data);
4317 if (ret_val)
4318 break;
4319
4320 usec_delay(100);
4321 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4322 act_offset + 1,
4323 (u8)(data >> 8));
4324 if (ret_val)
4325 break;
4326 }
4327
4328 /* Don't bother writing the segment valid bits if sector
4329 * programming failed.
4330 */
4331 if (ret_val) {
4332 DEBUGOUT("Flash commit failed.\n");
4333 goto release;
4334 }
4335
4336 /* Finally validate the new segment by setting bit 15:14
4337 * to 10b in word 0x13 , this can be done without an
4338 * erase as well since these bits are 11 to start with
4339 * and we need to change bit 14 to 0b
4340 */
4341 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4342 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4343 if (ret_val)
4344 goto release;
4345
4346 data &= 0xBFFF;
4347 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4348 (u8)(data >> 8));
4349 if (ret_val)
4350 goto release;
4351
4352 /* And invalidate the previously valid segment by setting
4353 * its signature word (0x13) high_byte to 0b. This can be
4354 * done without an erase because flash erase sets all bits
4355 * to 1's. We can write 1's to 0's without an erase
4356 */
4357 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4358
4359 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4360
4361 if (ret_val)
4362 goto release;
4363
4364 /* Great! Everything worked, we can now clear the cached entries. */
4365 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4366 dev_spec->shadow_ram[i].modified = false;
4367 dev_spec->shadow_ram[i].value = 0xFFFF;
4368 }
4369
4370 release:
4371 nvm->ops.release(hw);
4372
4373 /* Reload the EEPROM, or else modifications will not appear
4374 * until after the next adapter reset.
4375 */
4376 if (!ret_val) {
4377 nvm->ops.reload(hw);
4378 msec_delay(10);
4379 }
4380
4381 out:
4382 if (ret_val)
4383 DEBUGOUT1("NVM update error: %d\n", ret_val);
4384
4385 return ret_val;
4386 }
4387
4388 /**
4389 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4390 * @hw: pointer to the HW structure
4391 *
4392 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4393 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
4394 * calculated, in which case we need to calculate the checksum and set bit 6.
4395 **/
e1000_validate_nvm_checksum_ich8lan(struct e1000_hw * hw)4396 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4397 {
4398 s32 ret_val;
4399 u16 data;
4400 u16 word;
4401 u16 valid_csum_mask;
4402
4403 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4404
4405 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
4406 * the checksum needs to be fixed. This bit is an indication that
4407 * the NVM was prepared by OEM software and did not calculate
4408 * the checksum...a likely scenario.
4409 */
4410 switch (hw->mac.type) {
4411 case e1000_pch_lpt:
4412 case e1000_pch_spt:
4413 case e1000_pch_cnp:
4414 case e1000_pch_tgp:
4415 case e1000_pch_adp:
4416 case e1000_pch_mtp:
4417 case e1000_pch_ptp:
4418 word = NVM_COMPAT;
4419 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4420 break;
4421 default:
4422 word = NVM_FUTURE_INIT_WORD1;
4423 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4424 break;
4425 }
4426
4427 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4428 if (ret_val)
4429 return ret_val;
4430
4431 if (!(data & valid_csum_mask)) {
4432 data |= valid_csum_mask;
4433 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4434 if (ret_val)
4435 return ret_val;
4436 ret_val = hw->nvm.ops.update(hw);
4437 if (ret_val)
4438 return ret_val;
4439 }
4440
4441 return e1000_validate_nvm_checksum_generic(hw);
4442 }
4443
4444 /**
4445 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4446 * @hw: pointer to the HW structure
4447 * @offset: The offset (in bytes) of the byte/word to read.
4448 * @size: Size of data to read, 1=byte 2=word
4449 * @data: The byte(s) to write to the NVM.
4450 *
4451 * Writes one/two bytes to the NVM using the flash access registers.
4452 **/
e1000_write_flash_data_ich8lan(struct e1000_hw * hw,u32 offset,u8 size,u16 data)4453 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4454 u8 size, u16 data)
4455 {
4456 union ich8_hws_flash_status hsfsts;
4457 union ich8_hws_flash_ctrl hsflctl;
4458 u32 flash_linear_addr;
4459 u32 flash_data = 0;
4460 s32 ret_val;
4461 u8 count = 0;
4462
4463 DEBUGFUNC("e1000_write_ich8_data");
4464
4465 if (hw->mac.type >= e1000_pch_spt) {
4466 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4467 return -E1000_ERR_NVM;
4468 } else {
4469 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4470 return -E1000_ERR_NVM;
4471 }
4472
4473 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4474 hw->nvm.flash_base_addr);
4475
4476 do {
4477 usec_delay(1);
4478 /* Steps */
4479 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4480 if (ret_val != E1000_SUCCESS)
4481 break;
4482 /* In SPT, This register is in Lan memory space, not
4483 * flash. Therefore, only 32 bit access is supported
4484 */
4485 if (hw->mac.type >= e1000_pch_spt)
4486 hsflctl.regval =
4487 E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4488 else
4489 hsflctl.regval =
4490 E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4491
4492 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4493 hsflctl.hsf_ctrl.fldbcount = size - 1;
4494 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4495 /* In SPT, This register is in Lan memory space,
4496 * not flash. Therefore, only 32 bit access is
4497 * supported
4498 */
4499 if (hw->mac.type >= e1000_pch_spt)
4500 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4501 hsflctl.regval << 16);
4502 else
4503 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4504 hsflctl.regval);
4505
4506 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4507
4508 if (size == 1)
4509 flash_data = (u32)data & 0x00FF;
4510 else
4511 flash_data = (u32)data;
4512
4513 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4514
4515 /* check if FCERR is set to 1 , if set to 1, clear it
4516 * and try the whole sequence a few more times else done
4517 */
4518 ret_val =
4519 e1000_flash_cycle_ich8lan(hw,
4520 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4521 if (ret_val == E1000_SUCCESS)
4522 break;
4523
4524 /* If we're here, then things are most likely
4525 * completely hosed, but if the error condition
4526 * is detected, it won't hurt to give it another
4527 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4528 */
4529 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4530 if (hsfsts.hsf_status.flcerr)
4531 /* Repeat for some time before giving up. */
4532 continue;
4533 if (!hsfsts.hsf_status.flcdone) {
4534 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4535 break;
4536 }
4537 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4538
4539 return ret_val;
4540 }
4541
4542 /**
4543 * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4544 * @hw: pointer to the HW structure
4545 * @offset: The offset (in bytes) of the dwords to read.
4546 * @data: The 4 bytes to write to the NVM.
4547 *
4548 * Writes one/two/four bytes to the NVM using the flash access registers.
4549 **/
e1000_write_flash_data32_ich8lan(struct e1000_hw * hw,u32 offset,u32 data)4550 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4551 u32 data)
4552 {
4553 union ich8_hws_flash_status hsfsts;
4554 union ich8_hws_flash_ctrl hsflctl;
4555 u32 flash_linear_addr;
4556 s32 ret_val;
4557 u8 count = 0;
4558
4559 DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4560
4561 if (hw->mac.type >= e1000_pch_spt) {
4562 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4563 return -E1000_ERR_NVM;
4564 }
4565 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4566 hw->nvm.flash_base_addr);
4567 do {
4568 usec_delay(1);
4569 /* Steps */
4570 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4571 if (ret_val != E1000_SUCCESS)
4572 break;
4573
4574 /* In SPT, This register is in Lan memory space, not
4575 * flash. Therefore, only 32 bit access is supported
4576 */
4577 if (hw->mac.type >= e1000_pch_spt)
4578 hsflctl.regval = E1000_READ_FLASH_REG(hw,
4579 ICH_FLASH_HSFSTS)
4580 >> 16;
4581 else
4582 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4583 ICH_FLASH_HSFCTL);
4584
4585 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4586 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4587
4588 /* In SPT, This register is in Lan memory space,
4589 * not flash. Therefore, only 32 bit access is
4590 * supported
4591 */
4592 if (hw->mac.type >= e1000_pch_spt)
4593 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4594 hsflctl.regval << 16);
4595 else
4596 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4597 hsflctl.regval);
4598
4599 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4600
4601 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4602
4603 /* check if FCERR is set to 1 , if set to 1, clear it
4604 * and try the whole sequence a few more times else done
4605 */
4606 ret_val = e1000_flash_cycle_ich8lan(hw,
4607 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4608
4609 if (ret_val == E1000_SUCCESS)
4610 break;
4611
4612 /* If we're here, then things are most likely
4613 * completely hosed, but if the error condition
4614 * is detected, it won't hurt to give it another
4615 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4616 */
4617 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4618
4619 if (hsfsts.hsf_status.flcerr)
4620 /* Repeat for some time before giving up. */
4621 continue;
4622 if (!hsfsts.hsf_status.flcdone) {
4623 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4624 break;
4625 }
4626 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4627
4628 return ret_val;
4629 }
4630
4631 /**
4632 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4633 * @hw: pointer to the HW structure
4634 * @offset: The index of the byte to read.
4635 * @data: The byte to write to the NVM.
4636 *
4637 * Writes a single byte to the NVM using the flash access registers.
4638 **/
e1000_write_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 data)4639 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4640 u8 data)
4641 {
4642 u16 word = (u16)data;
4643
4644 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4645
4646 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4647 }
4648
4649 /**
4650 * e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4651 * @hw: pointer to the HW structure
4652 * @offset: The offset of the word to write.
4653 * @dword: The dword to write to the NVM.
4654 *
4655 * Writes a single dword to the NVM using the flash access registers.
4656 * Goes through a retry algorithm before giving up.
4657 **/
e1000_retry_write_flash_dword_ich8lan(struct e1000_hw * hw,u32 offset,u32 dword)4658 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4659 u32 offset, u32 dword)
4660 {
4661 s32 ret_val;
4662 u16 program_retries;
4663
4664 DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4665
4666 /* Must convert word offset into bytes. */
4667 offset <<= 1;
4668
4669 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4670
4671 if (!ret_val)
4672 return ret_val;
4673 for (program_retries = 0; program_retries < 100; program_retries++) {
4674 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4675 usec_delay(100);
4676 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4677 if (ret_val == E1000_SUCCESS)
4678 break;
4679 }
4680 if (program_retries == 100)
4681 return -E1000_ERR_NVM;
4682
4683 return E1000_SUCCESS;
4684 }
4685
4686 /**
4687 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4688 * @hw: pointer to the HW structure
4689 * @offset: The offset of the byte to write.
4690 * @byte: The byte to write to the NVM.
4691 *
4692 * Writes a single byte to the NVM using the flash access registers.
4693 * Goes through a retry algorithm before giving up.
4694 **/
e1000_retry_write_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 byte)4695 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4696 u32 offset, u8 byte)
4697 {
4698 s32 ret_val;
4699 u16 program_retries;
4700
4701 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4702
4703 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4704 if (!ret_val)
4705 return ret_val;
4706
4707 for (program_retries = 0; program_retries < 100; program_retries++) {
4708 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4709 usec_delay(100);
4710 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4711 if (ret_val == E1000_SUCCESS)
4712 break;
4713 }
4714 if (program_retries == 100)
4715 return -E1000_ERR_NVM;
4716
4717 return E1000_SUCCESS;
4718 }
4719
4720 /**
4721 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4722 * @hw: pointer to the HW structure
4723 * @bank: 0 for first bank, 1 for second bank, etc.
4724 *
4725 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4726 * bank N is 4096 * N + flash_reg_addr.
4727 **/
e1000_erase_flash_bank_ich8lan(struct e1000_hw * hw,u32 bank)4728 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4729 {
4730 struct e1000_nvm_info *nvm = &hw->nvm;
4731 union ich8_hws_flash_status hsfsts;
4732 union ich8_hws_flash_ctrl hsflctl;
4733 u32 flash_linear_addr;
4734 /* bank size is in 16bit words - adjust to bytes */
4735 u32 flash_bank_size = nvm->flash_bank_size * 2;
4736 s32 ret_val;
4737 s32 count = 0;
4738 s32 j, iteration, sector_size;
4739
4740 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4741
4742 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4743
4744 /* Determine HW Sector size: Read BERASE bits of hw flash status
4745 * register
4746 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4747 * consecutive sectors. The start index for the nth Hw sector
4748 * can be calculated as = bank * 4096 + n * 256
4749 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4750 * The start index for the nth Hw sector can be calculated
4751 * as = bank * 4096
4752 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4753 * (ich9 only, otherwise error condition)
4754 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4755 */
4756 switch (hsfsts.hsf_status.berasesz) {
4757 case 0:
4758 /* Hw sector size 256 */
4759 sector_size = ICH_FLASH_SEG_SIZE_256;
4760 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4761 break;
4762 case 1:
4763 sector_size = ICH_FLASH_SEG_SIZE_4K;
4764 iteration = 1;
4765 break;
4766 case 2:
4767 sector_size = ICH_FLASH_SEG_SIZE_8K;
4768 iteration = 1;
4769 break;
4770 case 3:
4771 sector_size = ICH_FLASH_SEG_SIZE_64K;
4772 iteration = 1;
4773 break;
4774 default:
4775 return -E1000_ERR_NVM;
4776 }
4777
4778 /* Start with the base address, then add the sector offset. */
4779 flash_linear_addr = hw->nvm.flash_base_addr;
4780 flash_linear_addr += (bank) ? flash_bank_size : 0;
4781
4782 for (j = 0; j < iteration; j++) {
4783 do {
4784 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4785
4786 /* Steps */
4787 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4788 if (ret_val)
4789 return ret_val;
4790
4791 /* Write a value 11 (block Erase) in Flash
4792 * Cycle field in hw flash control
4793 */
4794 if (hw->mac.type >= e1000_pch_spt)
4795 hsflctl.regval =
4796 E1000_READ_FLASH_REG(hw,
4797 ICH_FLASH_HSFSTS)>>16;
4798 else
4799 hsflctl.regval =
4800 E1000_READ_FLASH_REG16(hw,
4801 ICH_FLASH_HSFCTL);
4802
4803 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4804 if (hw->mac.type >= e1000_pch_spt)
4805 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4806 hsflctl.regval << 16);
4807 else
4808 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4809 hsflctl.regval);
4810
4811 /* Write the last 24 bits of an index within the
4812 * block into Flash Linear address field in Flash
4813 * Address.
4814 */
4815 flash_linear_addr += (j * sector_size);
4816 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4817 flash_linear_addr);
4818
4819 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4820 if (ret_val == E1000_SUCCESS)
4821 break;
4822
4823 /* Check if FCERR is set to 1. If 1,
4824 * clear it and try the whole sequence
4825 * a few more times else Done
4826 */
4827 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4828 ICH_FLASH_HSFSTS);
4829 if (hsfsts.hsf_status.flcerr)
4830 /* repeat for some time before giving up */
4831 continue;
4832 else if (!hsfsts.hsf_status.flcdone)
4833 return ret_val;
4834 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4835 }
4836
4837 return E1000_SUCCESS;
4838 }
4839
4840 /**
4841 * e1000_valid_led_default_ich8lan - Set the default LED settings
4842 * @hw: pointer to the HW structure
4843 * @data: Pointer to the LED settings
4844 *
4845 * Reads the LED default settings from the NVM to data. If the NVM LED
4846 * settings is all 0's or F's, set the LED default to a valid LED default
4847 * setting.
4848 **/
e1000_valid_led_default_ich8lan(struct e1000_hw * hw,u16 * data)4849 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4850 {
4851 s32 ret_val;
4852
4853 DEBUGFUNC("e1000_valid_led_default_ich8lan");
4854
4855 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4856 if (ret_val) {
4857 DEBUGOUT("NVM Read Error\n");
4858 return ret_val;
4859 }
4860
4861 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4862 *data = ID_LED_DEFAULT_ICH8LAN;
4863
4864 return E1000_SUCCESS;
4865 }
4866
4867 /**
4868 * e1000_id_led_init_pchlan - store LED configurations
4869 * @hw: pointer to the HW structure
4870 *
4871 * PCH does not control LEDs via the LEDCTL register, rather it uses
4872 * the PHY LED configuration register.
4873 *
4874 * PCH also does not have an "always on" or "always off" mode which
4875 * complicates the ID feature. Instead of using the "on" mode to indicate
4876 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4877 * use "link_up" mode. The LEDs will still ID on request if there is no
4878 * link based on logic in e1000_led_[on|off]_pchlan().
4879 **/
e1000_id_led_init_pchlan(struct e1000_hw * hw)4880 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4881 {
4882 struct e1000_mac_info *mac = &hw->mac;
4883 s32 ret_val;
4884 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4885 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4886 u16 data, i, temp, shift;
4887
4888 DEBUGFUNC("e1000_id_led_init_pchlan");
4889
4890 /* Get default ID LED modes */
4891 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4892 if (ret_val)
4893 return ret_val;
4894
4895 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4896 mac->ledctl_mode1 = mac->ledctl_default;
4897 mac->ledctl_mode2 = mac->ledctl_default;
4898
4899 for (i = 0; i < 4; i++) {
4900 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4901 shift = (i * 5);
4902 switch (temp) {
4903 case ID_LED_ON1_DEF2:
4904 case ID_LED_ON1_ON2:
4905 case ID_LED_ON1_OFF2:
4906 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4907 mac->ledctl_mode1 |= (ledctl_on << shift);
4908 break;
4909 case ID_LED_OFF1_DEF2:
4910 case ID_LED_OFF1_ON2:
4911 case ID_LED_OFF1_OFF2:
4912 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4913 mac->ledctl_mode1 |= (ledctl_off << shift);
4914 break;
4915 default:
4916 /* Do nothing */
4917 break;
4918 }
4919 switch (temp) {
4920 case ID_LED_DEF1_ON2:
4921 case ID_LED_ON1_ON2:
4922 case ID_LED_OFF1_ON2:
4923 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4924 mac->ledctl_mode2 |= (ledctl_on << shift);
4925 break;
4926 case ID_LED_DEF1_OFF2:
4927 case ID_LED_ON1_OFF2:
4928 case ID_LED_OFF1_OFF2:
4929 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4930 mac->ledctl_mode2 |= (ledctl_off << shift);
4931 break;
4932 default:
4933 /* Do nothing */
4934 break;
4935 }
4936 }
4937
4938 return E1000_SUCCESS;
4939 }
4940
4941 /**
4942 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4943 * @hw: pointer to the HW structure
4944 *
4945 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4946 * register, so the bus width is hard coded.
4947 **/
e1000_get_bus_info_ich8lan(struct e1000_hw * hw)4948 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4949 {
4950 struct e1000_bus_info *bus = &hw->bus;
4951 s32 ret_val;
4952
4953 DEBUGFUNC("e1000_get_bus_info_ich8lan");
4954
4955 ret_val = e1000_get_bus_info_pcie_generic(hw);
4956
4957 /* ICH devices are "PCI Express"-ish. They have
4958 * a configuration space, but do not contain
4959 * PCI Express Capability registers, so bus width
4960 * must be hardcoded.
4961 */
4962 if (bus->width == e1000_bus_width_unknown)
4963 bus->width = e1000_bus_width_pcie_x1;
4964
4965 return ret_val;
4966 }
4967
4968 /**
4969 * e1000_reset_hw_ich8lan - Reset the hardware
4970 * @hw: pointer to the HW structure
4971 *
4972 * Does a full reset of the hardware which includes a reset of the PHY and
4973 * MAC.
4974 **/
e1000_reset_hw_ich8lan(struct e1000_hw * hw)4975 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4976 {
4977 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4978 u16 kum_cfg;
4979 u32 ctrl, reg;
4980 s32 ret_val;
4981 u16 pci_cfg;
4982
4983 DEBUGFUNC("e1000_reset_hw_ich8lan");
4984
4985 /* Prevent the PCI-E bus from sticking if there is no TLP connection
4986 * on the last TLP read/write transaction when MAC is reset.
4987 */
4988 ret_val = e1000_disable_pcie_master_generic(hw);
4989 if (ret_val)
4990 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4991
4992 DEBUGOUT("Masking off all interrupts\n");
4993 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4994
4995 /* Disable the Transmit and Receive units. Then delay to allow
4996 * any pending transactions to complete before we hit the MAC
4997 * with the global reset.
4998 */
4999 E1000_WRITE_REG(hw, E1000_RCTL, 0);
5000 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
5001 E1000_WRITE_FLUSH(hw);
5002
5003 msec_delay(10);
5004
5005 /* Workaround for ICH8 bit corruption issue in FIFO memory */
5006 if (hw->mac.type == e1000_ich8lan) {
5007 /* Set Tx and Rx buffer allocation to 8k apiece. */
5008 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
5009 /* Set Packet Buffer Size to 16k. */
5010 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
5011 }
5012
5013 if (hw->mac.type == e1000_pchlan) {
5014 /* Save the NVM K1 bit setting*/
5015 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
5016 if (ret_val)
5017 return ret_val;
5018
5019 if (kum_cfg & E1000_NVM_K1_ENABLE)
5020 dev_spec->nvm_k1_enabled = true;
5021 else
5022 dev_spec->nvm_k1_enabled = false;
5023 }
5024
5025 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5026
5027 if (!hw->phy.ops.check_reset_block(hw)) {
5028 /* Full-chip reset requires MAC and PHY reset at the same
5029 * time to make sure the interface between MAC and the
5030 * external PHY is reset.
5031 */
5032 ctrl |= E1000_CTRL_PHY_RST;
5033
5034 /* Gate automatic PHY configuration by hardware on
5035 * non-managed 82579
5036 */
5037 if ((hw->mac.type == e1000_pch2lan) &&
5038 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
5039 e1000_gate_hw_phy_config_ich8lan(hw, true);
5040 }
5041 ret_val = e1000_acquire_swflag_ich8lan(hw);
5042
5043 /* Read from EXTCNF_CTRL in e1000_acquire_swflag_ich8lan function
5044 * may occur during global reset and cause system hang.
5045 * Configuration space access creates the needed delay.
5046 * Write to E1000_STRAP RO register E1000_PCI_VENDOR_ID_REGISTER value
5047 * insures configuration space read is done before global reset.
5048 */
5049 e1000_read_pci_cfg(hw, E1000_PCI_VENDOR_ID_REGISTER, &pci_cfg);
5050 E1000_WRITE_REG(hw, E1000_STRAP, pci_cfg);
5051 DEBUGOUT("Issuing a global reset to ich8lan\n");
5052 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
5053 /* cannot issue a flush here because it hangs the hardware */
5054 msec_delay(20);
5055
5056 /* Configuration space access improve HW level time sync mechanism.
5057 * Write to E1000_STRAP RO register E1000_PCI_VENDOR_ID_REGISTER
5058 * value to insure configuration space read is done
5059 * before any access to mac register.
5060 */
5061 e1000_read_pci_cfg(hw, E1000_PCI_VENDOR_ID_REGISTER, &pci_cfg);
5062 E1000_WRITE_REG(hw, E1000_STRAP, pci_cfg);
5063
5064 /* Set Phy Config Counter to 50msec */
5065 if (hw->mac.type == e1000_pch2lan) {
5066 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
5067 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
5068 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
5069 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
5070 }
5071
5072
5073 if (ctrl & E1000_CTRL_PHY_RST) {
5074 ret_val = hw->phy.ops.get_cfg_done(hw);
5075 if (ret_val)
5076 return ret_val;
5077
5078 ret_val = e1000_post_phy_reset_ich8lan(hw);
5079 if (ret_val)
5080 return ret_val;
5081 }
5082
5083 /* For PCH, this write will make sure that any noise
5084 * will be detected as a CRC error and be dropped rather than show up
5085 * as a bad packet to the DMA engine.
5086 */
5087 if (hw->mac.type == e1000_pchlan)
5088 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5089
5090 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5091 E1000_READ_REG(hw, E1000_ICR);
5092
5093 reg = E1000_READ_REG(hw, E1000_KABGTXD);
5094 reg |= E1000_KABGTXD_BGSQLBIAS;
5095 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5096
5097 return E1000_SUCCESS;
5098 }
5099
5100 /**
5101 * e1000_init_hw_ich8lan - Initialize the hardware
5102 * @hw: pointer to the HW structure
5103 *
5104 * Prepares the hardware for transmit and receive by doing the following:
5105 * - initialize hardware bits
5106 * - initialize LED identification
5107 * - setup receive address registers
5108 * - setup flow control
5109 * - setup transmit descriptors
5110 * - clear statistics
5111 **/
e1000_init_hw_ich8lan(struct e1000_hw * hw)5112 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5113 {
5114 struct e1000_mac_info *mac = &hw->mac;
5115 u32 ctrl_ext, txdctl, snoop;
5116 s32 ret_val;
5117 u16 i;
5118
5119 DEBUGFUNC("e1000_init_hw_ich8lan");
5120
5121 e1000_initialize_hw_bits_ich8lan(hw);
5122
5123 /* Initialize identification LED */
5124 ret_val = mac->ops.id_led_init(hw);
5125 /* An error is not fatal and we should not stop init due to this */
5126 if (ret_val)
5127 DEBUGOUT("Error initializing identification LED\n");
5128
5129 /* Setup the receive address. */
5130 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5131
5132 /* Zero out the Multicast HASH table */
5133 DEBUGOUT("Zeroing the MTA\n");
5134 for (i = 0; i < mac->mta_reg_count; i++)
5135 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5136
5137 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
5138 * the ME. Disable wakeup by clearing the host wakeup bit.
5139 * Reset the phy after disabling host wakeup to reset the Rx buffer.
5140 */
5141 if (hw->phy.type == e1000_phy_82578) {
5142 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5143 i &= ~BM_WUC_HOST_WU_BIT;
5144 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5145 ret_val = e1000_phy_hw_reset_ich8lan(hw);
5146 if (ret_val)
5147 return ret_val;
5148 }
5149
5150 /* Setup link and flow control */
5151 ret_val = mac->ops.setup_link(hw);
5152
5153 /* Set the transmit descriptor write-back policy for both queues */
5154 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5155 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5156 E1000_TXDCTL_FULL_TX_DESC_WB);
5157 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5158 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5159 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5160 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5161 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5162 E1000_TXDCTL_FULL_TX_DESC_WB);
5163 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5164 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5165 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5166
5167 /* ICH8 has opposite polarity of no_snoop bits.
5168 * By default, we should use snoop behavior.
5169 */
5170 if (mac->type == e1000_ich8lan)
5171 snoop = PCIE_ICH8_SNOOP_ALL;
5172 else
5173 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5174 e1000_set_pcie_no_snoop_generic(hw, snoop);
5175
5176 /* ungate DMA clock to avoid packet loss */
5177 if (mac->type >= e1000_pch_tgp) {
5178 uint32_t fflt_dbg = E1000_READ_REG(hw, E1000_FFLT_DBG);
5179 fflt_dbg |= (1 << 12);
5180 E1000_WRITE_REG(hw, E1000_FFLT_DBG, fflt_dbg);
5181 }
5182
5183 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5184 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5185 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5186
5187 /* Clear all of the statistics registers (clear on read). It is
5188 * important that we do this after we have tried to establish link
5189 * because the symbol error count will increment wildly if there
5190 * is no link.
5191 */
5192 e1000_clear_hw_cntrs_ich8lan(hw);
5193
5194 return ret_val;
5195 }
5196
5197 /**
5198 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5199 * @hw: pointer to the HW structure
5200 *
5201 * Sets/Clears required hardware bits necessary for correctly setting up the
5202 * hardware for transmit and receive.
5203 **/
e1000_initialize_hw_bits_ich8lan(struct e1000_hw * hw)5204 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5205 {
5206 u32 reg;
5207
5208 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5209
5210 /* Extended Device Control */
5211 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5212 reg |= (1 << 22);
5213 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
5214 if (hw->mac.type >= e1000_pchlan)
5215 reg |= E1000_CTRL_EXT_PHYPDEN;
5216 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5217
5218 /* Transmit Descriptor Control 0 */
5219 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5220 reg |= (1 << 22);
5221 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5222
5223 /* Transmit Descriptor Control 1 */
5224 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5225 reg |= (1 << 22);
5226 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5227
5228 /* Transmit Arbitration Control 0 */
5229 reg = E1000_READ_REG(hw, E1000_TARC(0));
5230 if (hw->mac.type == e1000_ich8lan)
5231 reg |= (1 << 28) | (1 << 29);
5232 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5233 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5234
5235 /* Transmit Arbitration Control 1 */
5236 reg = E1000_READ_REG(hw, E1000_TARC(1));
5237 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5238 reg &= ~(1 << 28);
5239 else
5240 reg |= (1 << 28);
5241 reg |= (1 << 24) | (1 << 26) | (1 << 30);
5242 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5243
5244 /* Device Status */
5245 if (hw->mac.type == e1000_ich8lan) {
5246 reg = E1000_READ_REG(hw, E1000_STATUS);
5247 reg &= ~(1U << 31);
5248 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5249 }
5250
5251 /* work-around descriptor data corruption issue during nfs v2 udp
5252 * traffic, just disable the nfs filtering capability
5253 */
5254 reg = E1000_READ_REG(hw, E1000_RFCTL);
5255 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5256
5257 /* Disable IPv6 extension header parsing because some malformed
5258 * IPv6 headers can hang the Rx.
5259 */
5260 if (hw->mac.type == e1000_ich8lan)
5261 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5262 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5263
5264 /* Enable ECC on Lynxpoint */
5265 if (hw->mac.type >= e1000_pch_lpt) {
5266 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5267 reg |= E1000_PBECCSTS_ECC_ENABLE;
5268 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5269
5270 reg = E1000_READ_REG(hw, E1000_CTRL);
5271 reg |= E1000_CTRL_MEHE;
5272 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5273 }
5274
5275 return;
5276 }
5277
5278 /**
5279 * e1000_setup_link_ich8lan - Setup flow control and link settings
5280 * @hw: pointer to the HW structure
5281 *
5282 * Determines which flow control settings to use, then configures flow
5283 * control. Calls the appropriate media-specific link configuration
5284 * function. Assuming the adapter has a valid link partner, a valid link
5285 * should be established. Assumes the hardware has previously been reset
5286 * and the transmitter and receiver are not enabled.
5287 **/
e1000_setup_link_ich8lan(struct e1000_hw * hw)5288 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5289 {
5290 s32 ret_val;
5291
5292 DEBUGFUNC("e1000_setup_link_ich8lan");
5293
5294 /* ICH parts do not have a word in the NVM to determine
5295 * the default flow control setting, so we explicitly
5296 * set it to full.
5297 */
5298 if (hw->fc.requested_mode == e1000_fc_default)
5299 hw->fc.requested_mode = e1000_fc_full;
5300
5301 /* Save off the requested flow control mode for use later. Depending
5302 * on the link partner's capabilities, we may or may not use this mode.
5303 */
5304 hw->fc.current_mode = hw->fc.requested_mode;
5305
5306 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5307 hw->fc.current_mode);
5308
5309 if (!hw->phy.ops.check_reset_block(hw)) {
5310 /* Continue to configure the copper link. */
5311 ret_val = hw->mac.ops.setup_physical_interface(hw);
5312 if (ret_val)
5313 return ret_val;
5314 }
5315
5316 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5317 if ((hw->phy.type == e1000_phy_82578) ||
5318 (hw->phy.type == e1000_phy_82579) ||
5319 (hw->phy.type == e1000_phy_i217) ||
5320 (hw->phy.type == e1000_phy_82577)) {
5321 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5322
5323 ret_val = hw->phy.ops.write_reg(hw,
5324 PHY_REG(BM_PORT_CTRL_PAGE, 27),
5325 hw->fc.pause_time);
5326 if (ret_val)
5327 return ret_val;
5328 }
5329
5330 return e1000_set_fc_watermarks_generic(hw);
5331 }
5332
5333 /**
5334 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5335 * @hw: pointer to the HW structure
5336 *
5337 * Configures the kumeran interface to the PHY to wait the appropriate time
5338 * when polling the PHY, then call the generic setup_copper_link to finish
5339 * configuring the copper link.
5340 **/
e1000_setup_copper_link_ich8lan(struct e1000_hw * hw)5341 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5342 {
5343 u32 ctrl;
5344 s32 ret_val;
5345 u16 reg_data;
5346
5347 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5348
5349 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5350 ctrl |= E1000_CTRL_SLU;
5351 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5352 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5353
5354 /* Set the mac to wait the maximum time between each iteration
5355 * and increase the max iterations when polling the phy;
5356 * this fixes erroneous timeouts at 10Mbps.
5357 */
5358 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5359 0xFFFF);
5360 if (ret_val)
5361 return ret_val;
5362 ret_val = e1000_read_kmrn_reg_generic(hw,
5363 E1000_KMRNCTRLSTA_INBAND_PARAM,
5364 ®_data);
5365 if (ret_val)
5366 return ret_val;
5367 reg_data |= 0x3F;
5368 ret_val = e1000_write_kmrn_reg_generic(hw,
5369 E1000_KMRNCTRLSTA_INBAND_PARAM,
5370 reg_data);
5371 if (ret_val)
5372 return ret_val;
5373
5374 switch (hw->phy.type) {
5375 case e1000_phy_igp_3:
5376 ret_val = e1000_copper_link_setup_igp(hw);
5377 if (ret_val)
5378 return ret_val;
5379 break;
5380 case e1000_phy_bm:
5381 case e1000_phy_82578:
5382 ret_val = e1000_copper_link_setup_m88(hw);
5383 if (ret_val)
5384 return ret_val;
5385 break;
5386 case e1000_phy_82577:
5387 case e1000_phy_82579:
5388 ret_val = e1000_copper_link_setup_82577(hw);
5389 if (ret_val)
5390 return ret_val;
5391 break;
5392 case e1000_phy_ife:
5393 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5394 ®_data);
5395 if (ret_val)
5396 return ret_val;
5397
5398 reg_data &= ~IFE_PMC_AUTO_MDIX;
5399
5400 switch (hw->phy.mdix) {
5401 case 1:
5402 reg_data &= ~IFE_PMC_FORCE_MDIX;
5403 break;
5404 case 2:
5405 reg_data |= IFE_PMC_FORCE_MDIX;
5406 break;
5407 case 0:
5408 default:
5409 reg_data |= IFE_PMC_AUTO_MDIX;
5410 break;
5411 }
5412 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5413 reg_data);
5414 if (ret_val)
5415 return ret_val;
5416 break;
5417 default:
5418 break;
5419 }
5420
5421 return e1000_setup_copper_link_generic(hw);
5422 }
5423
5424 /**
5425 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5426 * @hw: pointer to the HW structure
5427 *
5428 * Calls the PHY specific link setup function and then calls the
5429 * generic setup_copper_link to finish configuring the link for
5430 * Lynxpoint PCH devices
5431 **/
e1000_setup_copper_link_pch_lpt(struct e1000_hw * hw)5432 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5433 {
5434 u32 ctrl;
5435 s32 ret_val;
5436
5437 DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5438
5439 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5440 ctrl |= E1000_CTRL_SLU;
5441 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5442 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5443
5444 ret_val = e1000_copper_link_setup_82577(hw);
5445 if (ret_val)
5446 return ret_val;
5447
5448 return e1000_setup_copper_link_generic(hw);
5449 }
5450
5451 /**
5452 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5453 * @hw: pointer to the HW structure
5454 * @speed: pointer to store current link speed
5455 * @duplex: pointer to store the current link duplex
5456 *
5457 * Calls the generic get_speed_and_duplex to retrieve the current link
5458 * information and then calls the Kumeran lock loss workaround for links at
5459 * gigabit speeds.
5460 **/
e1000_get_link_up_info_ich8lan(struct e1000_hw * hw,u16 * speed,u16 * duplex)5461 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5462 u16 *duplex)
5463 {
5464 s32 ret_val;
5465
5466 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5467
5468 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5469 if (ret_val)
5470 return ret_val;
5471
5472 if ((hw->mac.type == e1000_ich8lan) &&
5473 (hw->phy.type == e1000_phy_igp_3) &&
5474 (*speed == SPEED_1000)) {
5475 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5476 }
5477
5478 return ret_val;
5479 }
5480
5481 /**
5482 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5483 * @hw: pointer to the HW structure
5484 *
5485 * Work-around for 82566 Kumeran PCS lock loss:
5486 * On link status change (i.e. PCI reset, speed change) and link is up and
5487 * speed is gigabit-
5488 * 0) if workaround is optionally disabled do nothing
5489 * 1) wait 1ms for Kumeran link to come up
5490 * 2) check Kumeran Diagnostic register PCS lock loss bit
5491 * 3) if not set the link is locked (all is good), otherwise...
5492 * 4) reset the PHY
5493 * 5) repeat up to 10 times
5494 * Note: this is only called for IGP3 copper when speed is 1gb.
5495 **/
e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw * hw)5496 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5497 {
5498 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5499 u32 phy_ctrl;
5500 s32 ret_val;
5501 u16 i, data;
5502 bool link;
5503
5504 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5505
5506 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5507 return E1000_SUCCESS;
5508
5509 /* Make sure link is up before proceeding. If not just return.
5510 * Attempting this while link is negotiating fouled up link
5511 * stability
5512 */
5513 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5514 if (!link)
5515 return E1000_SUCCESS;
5516
5517 for (i = 0; i < 10; i++) {
5518 /* read once to clear */
5519 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5520 if (ret_val)
5521 return ret_val;
5522 /* and again to get new status */
5523 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5524 if (ret_val)
5525 return ret_val;
5526
5527 /* check for PCS lock */
5528 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5529 return E1000_SUCCESS;
5530
5531 /* Issue PHY reset */
5532 hw->phy.ops.reset(hw);
5533 msec_delay_irq(5);
5534 }
5535 /* Disable GigE link negotiation */
5536 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5537 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5538 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5539 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5540
5541 /* Call gig speed drop workaround on Gig disable before accessing
5542 * any PHY registers
5543 */
5544 e1000_gig_downshift_workaround_ich8lan(hw);
5545
5546 /* unable to acquire PCS lock */
5547 return -E1000_ERR_PHY;
5548 }
5549
5550 /**
5551 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5552 * @hw: pointer to the HW structure
5553 * @state: boolean value used to set the current Kumeran workaround state
5554 *
5555 * If ICH8, set the current Kumeran workaround state (enabled - true
5556 * /disabled - false).
5557 **/
e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw * hw,bool state)5558 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5559 bool state)
5560 {
5561 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5562
5563 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5564
5565 if (hw->mac.type != e1000_ich8lan) {
5566 DEBUGOUT("Workaround applies to ICH8 only.\n");
5567 return;
5568 }
5569
5570 dev_spec->kmrn_lock_loss_workaround_enabled = state;
5571
5572 return;
5573 }
5574
5575 /**
5576 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5577 * @hw: pointer to the HW structure
5578 *
5579 * Workaround for 82566 power-down on D3 entry:
5580 * 1) disable gigabit link
5581 * 2) write VR power-down enable
5582 * 3) read it back
5583 * Continue if successful, else issue LCD reset and repeat
5584 **/
e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw * hw)5585 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5586 {
5587 u32 reg;
5588 u16 data;
5589 u8 retry = 0;
5590
5591 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5592
5593 if (hw->phy.type != e1000_phy_igp_3)
5594 return;
5595
5596 /* Try the workaround twice (if needed) */
5597 do {
5598 /* Disable link */
5599 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5600 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5601 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5602 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5603
5604 /* Call gig speed drop workaround on Gig disable before
5605 * accessing any PHY registers
5606 */
5607 if (hw->mac.type == e1000_ich8lan)
5608 e1000_gig_downshift_workaround_ich8lan(hw);
5609
5610 /* Write VR power-down enable */
5611 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5612 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5613 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5614 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5615
5616 /* Read it back and test */
5617 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5618 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5619 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5620 break;
5621
5622 /* Issue PHY reset and repeat at most one more time */
5623 reg = E1000_READ_REG(hw, E1000_CTRL);
5624 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5625 retry++;
5626 } while (retry);
5627 }
5628
5629 /**
5630 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5631 * @hw: pointer to the HW structure
5632 *
5633 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5634 * LPLU, Gig disable, MDIC PHY reset):
5635 * 1) Set Kumeran Near-end loopback
5636 * 2) Clear Kumeran Near-end loopback
5637 * Should only be called for ICH8[m] devices with any 1G Phy.
5638 **/
e1000_gig_downshift_workaround_ich8lan(struct e1000_hw * hw)5639 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5640 {
5641 s32 ret_val;
5642 u16 reg_data = 0;
5643
5644 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5645
5646 if ((hw->mac.type != e1000_ich8lan) ||
5647 (hw->phy.type == e1000_phy_ife))
5648 return;
5649
5650 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5651 ®_data);
5652 if (ret_val)
5653 return;
5654 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5655 ret_val = e1000_write_kmrn_reg_generic(hw,
5656 E1000_KMRNCTRLSTA_DIAG_OFFSET,
5657 reg_data);
5658 if (ret_val)
5659 return;
5660 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5661 e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5662 reg_data);
5663 }
5664
5665 /**
5666 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5667 * @hw: pointer to the HW structure
5668 *
5669 * During S0 to Sx transition, it is possible the link remains at gig
5670 * instead of negotiating to a lower speed. Before going to Sx, set
5671 * 'Gig Disable' to force link speed negotiation to a lower speed based on
5672 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
5673 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5674 * needs to be written.
5675 * Parts that support (and are linked to a partner which support) EEE in
5676 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5677 * than 10Mbps w/o EEE.
5678 **/
e1000_suspend_workarounds_ich8lan(struct e1000_hw * hw)5679 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5680 {
5681 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5682 u32 phy_ctrl;
5683 s32 ret_val;
5684
5685 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5686
5687 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5688 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5689
5690 if (hw->phy.type == e1000_phy_i217) {
5691 u16 phy_reg, device_id = hw->device_id;
5692
5693 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5694 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5695 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5696 (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5697 (hw->mac.type >= e1000_pch_spt)) {
5698 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5699
5700 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5701 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5702 }
5703
5704 ret_val = hw->phy.ops.acquire(hw);
5705 if (ret_val)
5706 goto out;
5707
5708 if (!dev_spec->eee_disable) {
5709 u16 eee_advert;
5710
5711 ret_val =
5712 e1000_read_emi_reg_locked(hw,
5713 I217_EEE_ADVERTISEMENT,
5714 &eee_advert);
5715 if (ret_val)
5716 goto release;
5717
5718 /* Disable LPLU if both link partners support 100BaseT
5719 * EEE and 100Full is advertised on both ends of the
5720 * link, and enable Auto Enable LPI since there will
5721 * be no driver to enable LPI while in Sx.
5722 */
5723 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5724 (dev_spec->eee_lp_ability &
5725 I82579_EEE_100_SUPPORTED) &&
5726 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5727 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5728 E1000_PHY_CTRL_NOND0A_LPLU);
5729
5730 /* Set Auto Enable LPI after link up */
5731 hw->phy.ops.read_reg_locked(hw,
5732 I217_LPI_GPIO_CTRL,
5733 &phy_reg);
5734 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5735 hw->phy.ops.write_reg_locked(hw,
5736 I217_LPI_GPIO_CTRL,
5737 phy_reg);
5738 }
5739 }
5740
5741 /* For i217 Intel Rapid Start Technology support,
5742 * when the system is going into Sx and no manageability engine
5743 * is present, the driver must configure proxy to reset only on
5744 * power good. LPI (Low Power Idle) state must also reset only
5745 * on power good, as well as the MTA (Multicast table array).
5746 * The SMBus release must also be disabled on LCD reset.
5747 */
5748 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5749 E1000_ICH_FWSM_FW_VALID)) {
5750 /* Enable proxy to reset only on power good. */
5751 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5752 &phy_reg);
5753 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5754 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5755 phy_reg);
5756
5757 /* Set bit enable LPI (EEE) to reset only on
5758 * power good.
5759 */
5760 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5761 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5762 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5763
5764 /* Disable the SMB release on LCD reset. */
5765 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5766 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5767 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5768 }
5769
5770 /* Enable MTA to reset for Intel Rapid Start Technology
5771 * Support
5772 */
5773 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5774 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5775 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5776
5777 release:
5778 hw->phy.ops.release(hw);
5779 }
5780 out:
5781 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5782
5783 if (hw->mac.type == e1000_ich8lan)
5784 e1000_gig_downshift_workaround_ich8lan(hw);
5785
5786 if (hw->mac.type >= e1000_pchlan) {
5787 e1000_oem_bits_config_ich8lan(hw, false);
5788
5789 /* Reset PHY to activate OEM bits on 82577/8 */
5790 if (hw->mac.type == e1000_pchlan)
5791 e1000_phy_hw_reset_generic(hw);
5792
5793 ret_val = hw->phy.ops.acquire(hw);
5794 if (ret_val)
5795 return;
5796 e1000_write_smbus_addr(hw);
5797 hw->phy.ops.release(hw);
5798 }
5799
5800 return;
5801 }
5802
5803 /**
5804 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5805 * @hw: pointer to the HW structure
5806 *
5807 * During Sx to S0 transitions on non-managed devices or managed devices
5808 * on which PHY resets are not blocked, if the PHY registers cannot be
5809 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
5810 * the PHY.
5811 * On i217, setup Intel Rapid Start Technology.
5812 **/
e1000_resume_workarounds_pchlan(struct e1000_hw * hw)5813 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5814 {
5815 s32 ret_val;
5816
5817 DEBUGFUNC("e1000_resume_workarounds_pchlan");
5818 if (hw->mac.type < e1000_pch2lan)
5819 return E1000_SUCCESS;
5820
5821 ret_val = e1000_init_phy_workarounds_pchlan(hw);
5822 if (ret_val) {
5823 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5824 return ret_val;
5825 }
5826
5827 /* For i217 Intel Rapid Start Technology support when the system
5828 * is transitioning from Sx and no manageability engine is present
5829 * configure SMBus to restore on reset, disable proxy, and enable
5830 * the reset on MTA (Multicast table array).
5831 */
5832 if (hw->phy.type == e1000_phy_i217) {
5833 u16 phy_reg;
5834
5835 ret_val = hw->phy.ops.acquire(hw);
5836 if (ret_val) {
5837 DEBUGOUT("Failed to setup iRST\n");
5838 return ret_val;
5839 }
5840
5841 /* Clear Auto Enable LPI after link up */
5842 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5843 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5844 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5845
5846 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5847 E1000_ICH_FWSM_FW_VALID)) {
5848 /* Restore clear on SMB if no manageability engine
5849 * is present
5850 */
5851 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5852 &phy_reg);
5853 if (ret_val)
5854 goto release;
5855 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5856 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5857
5858 /* Disable Proxy */
5859 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5860 }
5861 /* Enable reset on MTA */
5862 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5863 &phy_reg);
5864 if (ret_val)
5865 goto release;
5866 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5867 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5868 release:
5869 if (ret_val)
5870 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5871 hw->phy.ops.release(hw);
5872 return ret_val;
5873 }
5874 return E1000_SUCCESS;
5875 }
5876
5877 /**
5878 * e1000_cleanup_led_ich8lan - Restore the default LED operation
5879 * @hw: pointer to the HW structure
5880 *
5881 * Return the LED back to the default configuration.
5882 **/
e1000_cleanup_led_ich8lan(struct e1000_hw * hw)5883 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5884 {
5885 DEBUGFUNC("e1000_cleanup_led_ich8lan");
5886
5887 if (hw->phy.type == e1000_phy_ife)
5888 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5889 0);
5890
5891 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5892 return E1000_SUCCESS;
5893 }
5894
5895 /**
5896 * e1000_led_on_ich8lan - Turn LEDs on
5897 * @hw: pointer to the HW structure
5898 *
5899 * Turn on the LEDs.
5900 **/
e1000_led_on_ich8lan(struct e1000_hw * hw)5901 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5902 {
5903 DEBUGFUNC("e1000_led_on_ich8lan");
5904
5905 if (hw->phy.type == e1000_phy_ife)
5906 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5907 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5908
5909 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5910 return E1000_SUCCESS;
5911 }
5912
5913 /**
5914 * e1000_led_off_ich8lan - Turn LEDs off
5915 * @hw: pointer to the HW structure
5916 *
5917 * Turn off the LEDs.
5918 **/
e1000_led_off_ich8lan(struct e1000_hw * hw)5919 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5920 {
5921 DEBUGFUNC("e1000_led_off_ich8lan");
5922
5923 if (hw->phy.type == e1000_phy_ife)
5924 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5925 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5926
5927 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5928 return E1000_SUCCESS;
5929 }
5930
5931 /**
5932 * e1000_setup_led_pchlan - Configures SW controllable LED
5933 * @hw: pointer to the HW structure
5934 *
5935 * This prepares the SW controllable LED for use.
5936 **/
e1000_setup_led_pchlan(struct e1000_hw * hw)5937 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5938 {
5939 DEBUGFUNC("e1000_setup_led_pchlan");
5940
5941 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5942 (u16)hw->mac.ledctl_mode1);
5943 }
5944
5945 /**
5946 * e1000_cleanup_led_pchlan - Restore the default LED operation
5947 * @hw: pointer to the HW structure
5948 *
5949 * Return the LED back to the default configuration.
5950 **/
e1000_cleanup_led_pchlan(struct e1000_hw * hw)5951 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5952 {
5953 DEBUGFUNC("e1000_cleanup_led_pchlan");
5954
5955 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5956 (u16)hw->mac.ledctl_default);
5957 }
5958
5959 /**
5960 * e1000_led_on_pchlan - Turn LEDs on
5961 * @hw: pointer to the HW structure
5962 *
5963 * Turn on the LEDs.
5964 **/
e1000_led_on_pchlan(struct e1000_hw * hw)5965 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5966 {
5967 u16 data = (u16)hw->mac.ledctl_mode2;
5968 u32 i, led;
5969
5970 DEBUGFUNC("e1000_led_on_pchlan");
5971
5972 /* If no link, then turn LED on by setting the invert bit
5973 * for each LED that's mode is "link_up" in ledctl_mode2.
5974 */
5975 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5976 for (i = 0; i < 3; i++) {
5977 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5978 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5979 E1000_LEDCTL_MODE_LINK_UP)
5980 continue;
5981 if (led & E1000_PHY_LED0_IVRT)
5982 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5983 else
5984 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5985 }
5986 }
5987
5988 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5989 }
5990
5991 /**
5992 * e1000_led_off_pchlan - Turn LEDs off
5993 * @hw: pointer to the HW structure
5994 *
5995 * Turn off the LEDs.
5996 **/
e1000_led_off_pchlan(struct e1000_hw * hw)5997 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5998 {
5999 u16 data = (u16)hw->mac.ledctl_mode1;
6000 u32 i, led;
6001
6002 DEBUGFUNC("e1000_led_off_pchlan");
6003
6004 /* If no link, then turn LED off by clearing the invert bit
6005 * for each LED that's mode is "link_up" in ledctl_mode1.
6006 */
6007 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
6008 for (i = 0; i < 3; i++) {
6009 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
6010 if ((led & E1000_PHY_LED0_MODE_MASK) !=
6011 E1000_LEDCTL_MODE_LINK_UP)
6012 continue;
6013 if (led & E1000_PHY_LED0_IVRT)
6014 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
6015 else
6016 data |= (E1000_PHY_LED0_IVRT << (i * 5));
6017 }
6018 }
6019
6020 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
6021 }
6022
6023 /**
6024 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
6025 * @hw: pointer to the HW structure
6026 *
6027 * Read appropriate register for the config done bit for completion status
6028 * and configure the PHY through s/w for EEPROM-less parts.
6029 *
6030 * NOTE: some silicon which is EEPROM-less will fail trying to read the
6031 * config done bit, so only an error is logged and continues. If we were
6032 * to return with error, EEPROM-less silicon would not be able to be reset
6033 * or change link.
6034 **/
e1000_get_cfg_done_ich8lan(struct e1000_hw * hw)6035 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
6036 {
6037 s32 ret_val = E1000_SUCCESS;
6038 u32 bank = 0;
6039 u32 status;
6040
6041 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
6042
6043 e1000_get_cfg_done_generic(hw);
6044
6045 /* Wait for indication from h/w that it has completed basic config */
6046 if (hw->mac.type >= e1000_ich10lan) {
6047 e1000_lan_init_done_ich8lan(hw);
6048 } else {
6049 ret_val = e1000_get_auto_rd_done_generic(hw);
6050 if (ret_val) {
6051 /* When auto config read does not complete, do not
6052 * return with an error. This can happen in situations
6053 * where there is no eeprom and prevents getting link.
6054 */
6055 DEBUGOUT("Auto Read Done did not complete\n");
6056 ret_val = E1000_SUCCESS;
6057 }
6058 }
6059
6060 /* Clear PHY Reset Asserted bit */
6061 status = E1000_READ_REG(hw, E1000_STATUS);
6062 if (status & E1000_STATUS_PHYRA)
6063 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
6064 else
6065 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
6066
6067 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
6068 if (hw->mac.type <= e1000_ich9lan) {
6069 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
6070 (hw->phy.type == e1000_phy_igp_3)) {
6071 e1000_phy_init_script_igp3(hw);
6072 }
6073 } else {
6074 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
6075 /* Maybe we should do a basic PHY config */
6076 DEBUGOUT("EEPROM not present\n");
6077 ret_val = -E1000_ERR_CONFIG;
6078 }
6079 }
6080
6081 return ret_val;
6082 }
6083
6084 /**
6085 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6086 * @hw: pointer to the HW structure
6087 *
6088 * In the case of a PHY power down to save power, or to turn off link during a
6089 * driver unload, or wake on lan is not enabled, remove the link.
6090 **/
e1000_power_down_phy_copper_ich8lan(struct e1000_hw * hw)6091 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6092 {
6093 /* If the management interface is not enabled, then power down */
6094 if (!(hw->mac.ops.check_mng_mode(hw) ||
6095 hw->phy.ops.check_reset_block(hw)))
6096 e1000_power_down_phy_copper(hw);
6097
6098 return;
6099 }
6100
6101 /**
6102 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6103 * @hw: pointer to the HW structure
6104 *
6105 * Clears hardware counters specific to the silicon family and calls
6106 * clear_hw_cntrs_generic to clear all general purpose counters.
6107 **/
e1000_clear_hw_cntrs_ich8lan(struct e1000_hw * hw)6108 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6109 {
6110 u16 phy_data;
6111 s32 ret_val;
6112
6113 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6114
6115 e1000_clear_hw_cntrs_base_generic(hw);
6116
6117 E1000_READ_REG(hw, E1000_ALGNERRC);
6118 E1000_READ_REG(hw, E1000_RXERRC);
6119 E1000_READ_REG(hw, E1000_TNCRS);
6120 E1000_READ_REG(hw, E1000_CEXTERR);
6121 E1000_READ_REG(hw, E1000_TSCTC);
6122 E1000_READ_REG(hw, E1000_TSCTFC);
6123
6124 E1000_READ_REG(hw, E1000_MGTPRC);
6125 E1000_READ_REG(hw, E1000_MGTPDC);
6126 E1000_READ_REG(hw, E1000_MGTPTC);
6127
6128 E1000_READ_REG(hw, E1000_IAC);
6129 E1000_READ_REG(hw, E1000_ICRXOC);
6130
6131 /* Clear PHY statistics registers */
6132 if ((hw->phy.type == e1000_phy_82578) ||
6133 (hw->phy.type == e1000_phy_82579) ||
6134 (hw->phy.type == e1000_phy_i217) ||
6135 (hw->phy.type == e1000_phy_82577)) {
6136 ret_val = hw->phy.ops.acquire(hw);
6137 if (ret_val)
6138 return;
6139 ret_val = hw->phy.ops.set_page(hw,
6140 HV_STATS_PAGE << IGP_PAGE_SHIFT);
6141 if (ret_val)
6142 goto release;
6143 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6144 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6145 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6146 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6147 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6148 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6149 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6150 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6151 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6152 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6153 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6154 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6155 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6156 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6157 release:
6158 hw->phy.ops.release(hw);
6159 }
6160 }
6161
6162 /**
6163 * e1000_configure_k0s_lpt - Configure K0s power state
6164 * @hw: pointer to the HW structure
6165 * @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
6166 * 0 corresponds to 128ns, each value over 0 doubles the duration.
6167 * @min_time: Minimum Tx idle period allowed - valid values are 0 to 4.
6168 * 0 corresponds to 128ns, each value over 0 doubles the duration.
6169 *
6170 * Configure the K1 power state based on the provided parameter.
6171 * Assumes semaphore already acquired.
6172 *
6173 * Success returns 0, Failure returns:
6174 * -E1000_ERR_PHY (-2) in case of access error
6175 * -E1000_ERR_PARAM (-4) in case of parameters error
6176 **/
e1000_configure_k0s_lpt(struct e1000_hw * hw,u8 entry_latency,u8 min_time)6177 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
6178 {
6179 s32 ret_val;
6180 u16 kmrn_reg = 0;
6181
6182 DEBUGFUNC("e1000_configure_k0s_lpt");
6183
6184 if (entry_latency > 3 || min_time > 4)
6185 return -E1000_ERR_PARAM;
6186
6187 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6188 &kmrn_reg);
6189 if (ret_val)
6190 return ret_val;
6191
6192 /* for now don't touch the latency */
6193 kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
6194 kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
6195
6196 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6197 kmrn_reg);
6198 if (ret_val)
6199 return ret_val;
6200
6201 return E1000_SUCCESS;
6202 }
6203