1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 /*
36 * 82575EB Gigabit Network Connection
37 * 82575EB Gigabit Backplane Connection
38 * 82575GB Gigabit Network Connection
39 * 82576 Gigabit Network Connection
40 * 82576 Quad Port Gigabit Mezzanine Adapter
41 * 82580 Gigabit Network Connection
42 * I350 Gigabit Network Connection
43 */
44
45 #include "e1000_api.h"
46 #include "e1000_i210.h"
47
48 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
49 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
50 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
51 static void e1000_release_phy_82575(struct e1000_hw *hw);
52 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
53 static void e1000_release_nvm_82575(struct e1000_hw *hw);
54 static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
55 static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw);
56 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
57 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
58 u16 *duplex);
59 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
60 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
61 u16 *data);
62 static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
63 static s32 e1000_reset_hw_82580(struct e1000_hw *hw);
64 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw,
65 u32 offset, u16 *data);
66 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw,
67 u32 offset, u16 data);
68 static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
69 bool active);
70 static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
71 bool active);
72 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
73 bool active);
74 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
75 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
76 static s32 e1000_get_media_type_82575(struct e1000_hw *hw);
77 static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw);
78 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
79 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
80 u32 offset, u16 data);
81 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
82 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
83 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
84 u16 *speed, u16 *duplex);
85 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
86 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
87 static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
88 static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
89 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
90 static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
91 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
92 static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
93 static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
94 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
95 static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
96 static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
97 static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
98 static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
99 u16 offset);
100 static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
101 u16 offset);
102 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
103 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
104 static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
105 static void e1000_clear_vfta_i350(struct e1000_hw *hw);
106
107 static void e1000_i2c_start(struct e1000_hw *hw);
108 static void e1000_i2c_stop(struct e1000_hw *hw);
109 static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data);
110 static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data);
111 static s32 e1000_get_i2c_ack(struct e1000_hw *hw);
112 static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data);
113 static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data);
114 static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
115 static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
116 static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data);
117 static bool e1000_get_i2c_data(u32 *i2cctl);
118
119 static const u16 e1000_82580_rxpbs_table[] = {
120 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
121 #define E1000_82580_RXPBS_TABLE_SIZE \
122 (sizeof(e1000_82580_rxpbs_table) / \
123 sizeof(e1000_82580_rxpbs_table[0]))
124
125
126 /**
127 * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
128 * @hw: pointer to the HW structure
129 *
130 * Called to determine if the I2C pins are being used for I2C or as an
131 * external MDIO interface since the two options are mutually exclusive.
132 **/
e1000_sgmii_uses_mdio_82575(struct e1000_hw * hw)133 static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
134 {
135 u32 reg = 0;
136 bool ext_mdio = FALSE;
137
138 DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
139
140 switch (hw->mac.type) {
141 case e1000_82575:
142 case e1000_82576:
143 reg = E1000_READ_REG(hw, E1000_MDIC);
144 ext_mdio = !!(reg & E1000_MDIC_DEST);
145 break;
146 case e1000_82580:
147 case e1000_i350:
148 case e1000_i354:
149 case e1000_i210:
150 case e1000_i211:
151 reg = E1000_READ_REG(hw, E1000_MDICNFG);
152 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
153 break;
154 default:
155 break;
156 }
157 return ext_mdio;
158 }
159
160 /**
161 * e1000_init_phy_params_82575 - Init PHY func ptrs.
162 * @hw: pointer to the HW structure
163 **/
e1000_init_phy_params_82575(struct e1000_hw * hw)164 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
165 {
166 struct e1000_phy_info *phy = &hw->phy;
167 s32 ret_val = E1000_SUCCESS;
168 u32 ctrl_ext;
169
170 DEBUGFUNC("e1000_init_phy_params_82575");
171
172 phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic;
173 phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic;
174
175 if (hw->phy.media_type != e1000_media_type_copper) {
176 phy->type = e1000_phy_none;
177 goto out;
178 }
179
180 phy->ops.power_up = e1000_power_up_phy_copper;
181 phy->ops.power_down = e1000_power_down_phy_copper_82575;
182
183 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
184 phy->reset_delay_us = 100;
185
186 phy->ops.acquire = e1000_acquire_phy_82575;
187 phy->ops.check_reset_block = e1000_check_reset_block_generic;
188 phy->ops.commit = e1000_phy_sw_reset_generic;
189 phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
190 phy->ops.release = e1000_release_phy_82575;
191
192 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
193
194 if (e1000_sgmii_active_82575(hw)) {
195 phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
196 ctrl_ext |= E1000_CTRL_I2C_ENA;
197 } else {
198 phy->ops.reset = e1000_phy_hw_reset_generic;
199 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
200 }
201
202 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
203 e1000_reset_mdicnfg_82580(hw);
204
205 if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
206 phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
207 phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
208 } else {
209 switch (hw->mac.type) {
210 case e1000_82580:
211 case e1000_i350:
212 case e1000_i354:
213 phy->ops.read_reg = e1000_read_phy_reg_82580;
214 phy->ops.write_reg = e1000_write_phy_reg_82580;
215 break;
216 case e1000_i210:
217 case e1000_i211:
218 phy->ops.read_reg = e1000_read_phy_reg_gs40g;
219 phy->ops.write_reg = e1000_write_phy_reg_gs40g;
220 break;
221 default:
222 phy->ops.read_reg = e1000_read_phy_reg_igp;
223 phy->ops.write_reg = e1000_write_phy_reg_igp;
224 }
225 }
226
227 /* Set phy->phy_addr and phy->id. */
228 ret_val = e1000_get_phy_id_82575(hw);
229
230 /* Verify phy id and set remaining function pointers */
231 switch (phy->id) {
232 case M88E1543_E_PHY_ID:
233 case M88E1512_E_PHY_ID:
234 case I347AT4_E_PHY_ID:
235 case M88E1112_E_PHY_ID:
236 case M88E1340M_E_PHY_ID:
237 case M88E1111_I_PHY_ID:
238 phy->type = e1000_phy_m88;
239 phy->ops.check_polarity = e1000_check_polarity_m88;
240 phy->ops.get_info = e1000_get_phy_info_m88;
241 if (phy->id == I347AT4_E_PHY_ID ||
242 phy->id == M88E1112_E_PHY_ID ||
243 phy->id == M88E1340M_E_PHY_ID)
244 phy->ops.get_cable_length =
245 e1000_get_cable_length_m88_gen2;
246 else if (phy->id == M88E1543_E_PHY_ID ||
247 phy->id == M88E1512_E_PHY_ID)
248 phy->ops.get_cable_length =
249 e1000_get_cable_length_m88_gen2;
250 else
251 phy->ops.get_cable_length = e1000_get_cable_length_m88;
252 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
253 /* Check if this PHY is confgured for media swap. */
254 if (phy->id == M88E1112_E_PHY_ID) {
255 u16 data;
256
257 ret_val = phy->ops.write_reg(hw,
258 E1000_M88E1112_PAGE_ADDR,
259 2);
260 if (ret_val)
261 goto out;
262
263 ret_val = phy->ops.read_reg(hw,
264 E1000_M88E1112_MAC_CTRL_1,
265 &data);
266 if (ret_val)
267 goto out;
268
269 data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
270 E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
271 if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
272 data == E1000_M88E1112_AUTO_COPPER_BASEX)
273 hw->mac.ops.check_for_link =
274 e1000_check_for_link_media_swap;
275 }
276 if (phy->id == M88E1512_E_PHY_ID) {
277 ret_val = e1000_initialize_M88E1512_phy(hw);
278 if (ret_val)
279 goto out;
280 }
281 if (phy->id == M88E1543_E_PHY_ID) {
282 ret_val = e1000_initialize_M88E1543_phy(hw);
283 if (ret_val)
284 goto out;
285 }
286 break;
287 case IGP03E1000_E_PHY_ID:
288 case IGP04E1000_E_PHY_ID:
289 phy->type = e1000_phy_igp_3;
290 phy->ops.check_polarity = e1000_check_polarity_igp;
291 phy->ops.get_info = e1000_get_phy_info_igp;
292 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
293 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
294 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
295 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
296 break;
297 case I82580_I_PHY_ID:
298 case I350_I_PHY_ID:
299 phy->type = e1000_phy_82580;
300 phy->ops.check_polarity = e1000_check_polarity_82577;
301 phy->ops.force_speed_duplex =
302 e1000_phy_force_speed_duplex_82577;
303 phy->ops.get_cable_length = e1000_get_cable_length_82577;
304 phy->ops.get_info = e1000_get_phy_info_82577;
305 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
306 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
307 break;
308 case I210_I_PHY_ID:
309 phy->type = e1000_phy_i210;
310 phy->ops.check_polarity = e1000_check_polarity_m88;
311 phy->ops.get_info = e1000_get_phy_info_m88;
312 phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
313 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
314 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
315 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
316 break;
317 default:
318 ret_val = -E1000_ERR_PHY;
319 goto out;
320 }
321
322 out:
323 return ret_val;
324 }
325
326 /**
327 * e1000_init_nvm_params_82575 - Init NVM func ptrs.
328 * @hw: pointer to the HW structure
329 **/
e1000_init_nvm_params_82575(struct e1000_hw * hw)330 s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
331 {
332 struct e1000_nvm_info *nvm = &hw->nvm;
333 u32 eecd = E1000_READ_REG(hw, E1000_EECD);
334 u16 size;
335
336 DEBUGFUNC("e1000_init_nvm_params_82575");
337
338 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
339 E1000_EECD_SIZE_EX_SHIFT);
340 /*
341 * Added to a constant, "size" becomes the left-shift value
342 * for setting word_size.
343 */
344 size += NVM_WORD_SIZE_BASE_SHIFT;
345
346 /* Just in case size is out of range, cap it to the largest
347 * EEPROM size supported
348 */
349 if (size > 15)
350 size = 15;
351
352 nvm->word_size = 1 << size;
353 if (hw->mac.type < e1000_i210) {
354 nvm->opcode_bits = 8;
355 nvm->delay_usec = 1;
356
357 switch (nvm->override) {
358 case e1000_nvm_override_spi_large:
359 nvm->page_size = 32;
360 nvm->address_bits = 16;
361 break;
362 case e1000_nvm_override_spi_small:
363 nvm->page_size = 8;
364 nvm->address_bits = 8;
365 break;
366 default:
367 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
368 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
369 16 : 8;
370 break;
371 }
372 if (nvm->word_size == (1 << 15))
373 nvm->page_size = 128;
374
375 nvm->type = e1000_nvm_eeprom_spi;
376 } else {
377 nvm->type = e1000_nvm_flash_hw;
378 }
379
380 /* Function Pointers */
381 nvm->ops.acquire = e1000_acquire_nvm_82575;
382 nvm->ops.release = e1000_release_nvm_82575;
383 if (nvm->word_size < (1 << 15))
384 nvm->ops.read = e1000_read_nvm_eerd;
385 else
386 nvm->ops.read = e1000_read_nvm_spi;
387
388 nvm->ops.write = e1000_write_nvm_spi;
389 nvm->ops.validate = e1000_validate_nvm_checksum_generic;
390 nvm->ops.update = e1000_update_nvm_checksum_generic;
391 nvm->ops.valid_led_default = e1000_valid_led_default_82575;
392
393 /* override generic family function pointers for specific descendants */
394 switch (hw->mac.type) {
395 case e1000_82580:
396 nvm->ops.validate = e1000_validate_nvm_checksum_82580;
397 nvm->ops.update = e1000_update_nvm_checksum_82580;
398 break;
399 case e1000_i350:
400 case e1000_i354:
401 nvm->ops.validate = e1000_validate_nvm_checksum_i350;
402 nvm->ops.update = e1000_update_nvm_checksum_i350;
403 break;
404 default:
405 break;
406 }
407
408 return E1000_SUCCESS;
409 }
410
411 /**
412 * e1000_init_mac_params_82575 - Init MAC func ptrs.
413 * @hw: pointer to the HW structure
414 **/
e1000_init_mac_params_82575(struct e1000_hw * hw)415 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
416 {
417 struct e1000_mac_info *mac = &hw->mac;
418 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
419
420 DEBUGFUNC("e1000_init_mac_params_82575");
421
422 /* Derives media type */
423 e1000_get_media_type_82575(hw);
424 /* Set mta register count */
425 mac->mta_reg_count = 128;
426 /* Set uta register count */
427 mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
428 /* Set rar entry count */
429 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
430 if (mac->type == e1000_82576)
431 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
432 if (mac->type == e1000_82580)
433 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
434 if (mac->type == e1000_i350 || mac->type == e1000_i354)
435 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
436
437 /* Disable EEE default settings for EEE supported devices */
438 if (mac->type >= e1000_i350)
439 dev_spec->eee_disable = TRUE;
440
441 /* Allow a single clear of the SW semaphore on I210 and newer */
442 if (mac->type >= e1000_i210)
443 dev_spec->clear_semaphore_once = TRUE;
444
445 /* Set if part includes ASF firmware */
446 mac->asf_firmware_present = TRUE;
447 /* FWSM register */
448 mac->has_fwsm = TRUE;
449 /* ARC supported; valid only if manageability features are enabled. */
450 mac->arc_subsystem_valid =
451 !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK);
452
453 /* Function pointers */
454
455 /* bus type/speed/width */
456 mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
457 /* reset */
458 if (mac->type >= e1000_82580)
459 mac->ops.reset_hw = e1000_reset_hw_82580;
460 else
461 mac->ops.reset_hw = e1000_reset_hw_82575;
462 /* hw initialization */
463 if ((mac->type == e1000_i210) || (mac->type == e1000_i211))
464 mac->ops.init_hw = e1000_init_hw_i210;
465 else
466 mac->ops.init_hw = e1000_init_hw_82575;
467 /* link setup */
468 mac->ops.setup_link = e1000_setup_link_generic;
469 /* physical interface link setup */
470 mac->ops.setup_physical_interface =
471 (hw->phy.media_type == e1000_media_type_copper)
472 ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575;
473 /* physical interface shutdown */
474 mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
475 /* physical interface power up */
476 mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
477 /* check for link */
478 mac->ops.check_for_link = e1000_check_for_link_82575;
479 /* read mac address */
480 mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
481 /* configure collision distance */
482 mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
483 /* multicast address update */
484 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
485 if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) {
486 /* writing VFTA */
487 mac->ops.write_vfta = e1000_write_vfta_i350;
488 /* clearing VFTA */
489 mac->ops.clear_vfta = e1000_clear_vfta_i350;
490 } else {
491 /* writing VFTA */
492 mac->ops.write_vfta = e1000_write_vfta_generic;
493 /* clearing VFTA */
494 mac->ops.clear_vfta = e1000_clear_vfta_generic;
495 }
496 if (hw->mac.type >= e1000_82580)
497 mac->ops.validate_mdi_setting =
498 e1000_validate_mdi_setting_crossover_generic;
499 /* ID LED init */
500 mac->ops.id_led_init = e1000_id_led_init_generic;
501 /* blink LED */
502 mac->ops.blink_led = e1000_blink_led_generic;
503 /* setup LED */
504 mac->ops.setup_led = e1000_setup_led_generic;
505 /* cleanup LED */
506 mac->ops.cleanup_led = e1000_cleanup_led_generic;
507 /* turn on/off LED */
508 mac->ops.led_on = e1000_led_on_generic;
509 mac->ops.led_off = e1000_led_off_generic;
510 /* clear hardware counters */
511 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
512 /* link info */
513 mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
514 /* acquire SW_FW sync */
515 mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575;
516 mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575;
517 if (mac->type >= e1000_i210) {
518 mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210;
519 mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210;
520 }
521
522 /* set lan id for port to determine which phy lock to use */
523 hw->mac.ops.set_lan_id(hw);
524
525 return E1000_SUCCESS;
526 }
527
528 /**
529 * e1000_init_function_pointers_82575 - Init func ptrs.
530 * @hw: pointer to the HW structure
531 *
532 * Called to initialize all function pointers and parameters.
533 **/
e1000_init_function_pointers_82575(struct e1000_hw * hw)534 void e1000_init_function_pointers_82575(struct e1000_hw *hw)
535 {
536 DEBUGFUNC("e1000_init_function_pointers_82575");
537
538 hw->mac.ops.init_params = e1000_init_mac_params_82575;
539 hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
540 hw->phy.ops.init_params = e1000_init_phy_params_82575;
541 hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
542 }
543
544 /**
545 * e1000_acquire_phy_82575 - Acquire rights to access PHY
546 * @hw: pointer to the HW structure
547 *
548 * Acquire access rights to the correct PHY.
549 **/
e1000_acquire_phy_82575(struct e1000_hw * hw)550 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
551 {
552 u16 mask = E1000_SWFW_PHY0_SM;
553
554 DEBUGFUNC("e1000_acquire_phy_82575");
555
556 if (hw->bus.func == E1000_FUNC_1)
557 mask = E1000_SWFW_PHY1_SM;
558 else if (hw->bus.func == E1000_FUNC_2)
559 mask = E1000_SWFW_PHY2_SM;
560 else if (hw->bus.func == E1000_FUNC_3)
561 mask = E1000_SWFW_PHY3_SM;
562
563 return hw->mac.ops.acquire_swfw_sync(hw, mask);
564 }
565
566 /**
567 * e1000_release_phy_82575 - Release rights to access PHY
568 * @hw: pointer to the HW structure
569 *
570 * A wrapper to release access rights to the correct PHY.
571 **/
e1000_release_phy_82575(struct e1000_hw * hw)572 static void e1000_release_phy_82575(struct e1000_hw *hw)
573 {
574 u16 mask = E1000_SWFW_PHY0_SM;
575
576 DEBUGFUNC("e1000_release_phy_82575");
577
578 if (hw->bus.func == E1000_FUNC_1)
579 mask = E1000_SWFW_PHY1_SM;
580 else if (hw->bus.func == E1000_FUNC_2)
581 mask = E1000_SWFW_PHY2_SM;
582 else if (hw->bus.func == E1000_FUNC_3)
583 mask = E1000_SWFW_PHY3_SM;
584
585 hw->mac.ops.release_swfw_sync(hw, mask);
586 }
587
588 /**
589 * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
590 * @hw: pointer to the HW structure
591 * @offset: register offset to be read
592 * @data: pointer to the read data
593 *
594 * Reads the PHY register at offset using the serial gigabit media independent
595 * interface and stores the retrieved information in data.
596 **/
e1000_read_phy_reg_sgmii_82575(struct e1000_hw * hw,u32 offset,u16 * data)597 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
598 u16 *data)
599 {
600 s32 ret_val = -E1000_ERR_PARAM;
601
602 DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
603
604 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
605 DEBUGOUT1("PHY Address %u is out of range\n", offset);
606 goto out;
607 }
608
609 ret_val = hw->phy.ops.acquire(hw);
610 if (ret_val)
611 goto out;
612
613 ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
614
615 hw->phy.ops.release(hw);
616
617 out:
618 return ret_val;
619 }
620
621 /**
622 * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
623 * @hw: pointer to the HW structure
624 * @offset: register offset to write to
625 * @data: data to write at register offset
626 *
627 * Writes the data to PHY register at the offset using the serial gigabit
628 * media independent interface.
629 **/
e1000_write_phy_reg_sgmii_82575(struct e1000_hw * hw,u32 offset,u16 data)630 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
631 u16 data)
632 {
633 s32 ret_val = -E1000_ERR_PARAM;
634
635 DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
636
637 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
638 DEBUGOUT1("PHY Address %d is out of range\n", offset);
639 goto out;
640 }
641
642 ret_val = hw->phy.ops.acquire(hw);
643 if (ret_val)
644 goto out;
645
646 ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
647
648 hw->phy.ops.release(hw);
649
650 out:
651 return ret_val;
652 }
653
654 /**
655 * e1000_get_phy_id_82575 - Retrieve PHY addr and id
656 * @hw: pointer to the HW structure
657 *
658 * Retrieves the PHY address and ID for both PHY's which do and do not use
659 * sgmi interface.
660 **/
e1000_get_phy_id_82575(struct e1000_hw * hw)661 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
662 {
663 struct e1000_phy_info *phy = &hw->phy;
664 s32 ret_val = E1000_SUCCESS;
665 u16 phy_id;
666 u32 ctrl_ext;
667 u32 mdic;
668
669 DEBUGFUNC("e1000_get_phy_id_82575");
670
671 /* some i354 devices need an extra read for phy id */
672 if (hw->mac.type == e1000_i354)
673 e1000_get_phy_id(hw);
674
675 /*
676 * For SGMII PHYs, we try the list of possible addresses until
677 * we find one that works. For non-SGMII PHYs
678 * (e.g. integrated copper PHYs), an address of 1 should
679 * work. The result of this function should mean phy->phy_addr
680 * and phy->id are set correctly.
681 */
682 if (!e1000_sgmii_active_82575(hw)) {
683 phy->addr = 1;
684 ret_val = e1000_get_phy_id(hw);
685 goto out;
686 }
687
688 if (e1000_sgmii_uses_mdio_82575(hw)) {
689 switch (hw->mac.type) {
690 case e1000_82575:
691 case e1000_82576:
692 mdic = E1000_READ_REG(hw, E1000_MDIC);
693 mdic &= E1000_MDIC_PHY_MASK;
694 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
695 break;
696 case e1000_82580:
697 case e1000_i350:
698 case e1000_i354:
699 case e1000_i210:
700 case e1000_i211:
701 mdic = E1000_READ_REG(hw, E1000_MDICNFG);
702 mdic &= E1000_MDICNFG_PHY_MASK;
703 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
704 break;
705 default:
706 ret_val = -E1000_ERR_PHY;
707 goto out;
708 break;
709 }
710 ret_val = e1000_get_phy_id(hw);
711 goto out;
712 }
713
714 /* Power on sgmii phy if it is disabled */
715 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
716 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
717 ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
718 E1000_WRITE_FLUSH(hw);
719 msec_delay(300);
720
721 /*
722 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
723 * Therefore, we need to test 1-7
724 */
725 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
726 ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
727 if (ret_val == E1000_SUCCESS) {
728 DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
729 phy_id, phy->addr);
730 /*
731 * At the time of this writing, The M88 part is
732 * the only supported SGMII PHY product.
733 */
734 if (phy_id == M88_VENDOR)
735 break;
736 } else {
737 DEBUGOUT1("PHY address %u was unreadable\n",
738 phy->addr);
739 }
740 }
741
742 /* A valid PHY type couldn't be found. */
743 if (phy->addr == 8) {
744 phy->addr = 0;
745 ret_val = -E1000_ERR_PHY;
746 } else {
747 ret_val = e1000_get_phy_id(hw);
748 }
749
750 /* restore previous sfp cage power state */
751 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
752
753 out:
754 return ret_val;
755 }
756
757 /**
758 * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
759 * @hw: pointer to the HW structure
760 *
761 * Resets the PHY using the serial gigabit media independent interface.
762 **/
e1000_phy_hw_reset_sgmii_82575(struct e1000_hw * hw)763 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
764 {
765 s32 ret_val = E1000_SUCCESS;
766 struct e1000_phy_info *phy = &hw->phy;
767
768 DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
769
770 /*
771 * This isn't a TRUE "hard" reset, but is the only reset
772 * available to us at this time.
773 */
774
775 DEBUGOUT("Soft resetting SGMII attached PHY...\n");
776
777 if (!(hw->phy.ops.write_reg))
778 goto out;
779
780 /*
781 * SFP documentation requires the following to configure the SPF module
782 * to work on SGMII. No further documentation is given.
783 */
784 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
785 if (ret_val)
786 goto out;
787
788 ret_val = hw->phy.ops.commit(hw);
789 if (ret_val)
790 goto out;
791
792 if (phy->id == M88E1512_E_PHY_ID)
793 ret_val = e1000_initialize_M88E1512_phy(hw);
794 out:
795 return ret_val;
796 }
797
798 /**
799 * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
800 * @hw: pointer to the HW structure
801 * @active: TRUE to enable LPLU, FALSE to disable
802 *
803 * Sets the LPLU D0 state according to the active flag. When
804 * activating LPLU this function also disables smart speed
805 * and vice versa. LPLU will not be activated unless the
806 * device autonegotiation advertisement meets standards of
807 * either 10 or 10/100 or 10/100/1000 at all duplexes.
808 * This is a function pointer entry point only called by
809 * PHY setup routines.
810 **/
e1000_set_d0_lplu_state_82575(struct e1000_hw * hw,bool active)811 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
812 {
813 struct e1000_phy_info *phy = &hw->phy;
814 s32 ret_val = E1000_SUCCESS;
815 u16 data;
816
817 DEBUGFUNC("e1000_set_d0_lplu_state_82575");
818
819 if (!(hw->phy.ops.read_reg))
820 goto out;
821
822 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
823 if (ret_val)
824 goto out;
825
826 if (active) {
827 data |= IGP02E1000_PM_D0_LPLU;
828 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
829 data);
830 if (ret_val)
831 goto out;
832
833 /* When LPLU is enabled, we should disable SmartSpeed */
834 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
835 &data);
836 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
837 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
838 data);
839 if (ret_val)
840 goto out;
841 } else {
842 data &= ~IGP02E1000_PM_D0_LPLU;
843 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
844 data);
845 /*
846 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
847 * during Dx states where the power conservation is most
848 * important. During driver activity we should enable
849 * SmartSpeed, so performance is maintained.
850 */
851 if (phy->smart_speed == e1000_smart_speed_on) {
852 ret_val = phy->ops.read_reg(hw,
853 IGP01E1000_PHY_PORT_CONFIG,
854 &data);
855 if (ret_val)
856 goto out;
857
858 data |= IGP01E1000_PSCFR_SMART_SPEED;
859 ret_val = phy->ops.write_reg(hw,
860 IGP01E1000_PHY_PORT_CONFIG,
861 data);
862 if (ret_val)
863 goto out;
864 } else if (phy->smart_speed == e1000_smart_speed_off) {
865 ret_val = phy->ops.read_reg(hw,
866 IGP01E1000_PHY_PORT_CONFIG,
867 &data);
868 if (ret_val)
869 goto out;
870
871 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
872 ret_val = phy->ops.write_reg(hw,
873 IGP01E1000_PHY_PORT_CONFIG,
874 data);
875 if (ret_val)
876 goto out;
877 }
878 }
879
880 out:
881 return ret_val;
882 }
883
884 /**
885 * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
886 * @hw: pointer to the HW structure
887 * @active: TRUE to enable LPLU, FALSE to disable
888 *
889 * Sets the LPLU D0 state according to the active flag. When
890 * activating LPLU this function also disables smart speed
891 * and vice versa. LPLU will not be activated unless the
892 * device autonegotiation advertisement meets standards of
893 * either 10 or 10/100 or 10/100/1000 at all duplexes.
894 * This is a function pointer entry point only called by
895 * PHY setup routines.
896 **/
e1000_set_d0_lplu_state_82580(struct e1000_hw * hw,bool active)897 static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
898 {
899 struct e1000_phy_info *phy = &hw->phy;
900 u32 data;
901
902 DEBUGFUNC("e1000_set_d0_lplu_state_82580");
903
904 data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
905
906 if (active) {
907 data |= E1000_82580_PM_D0_LPLU;
908
909 /* When LPLU is enabled, we should disable SmartSpeed */
910 data &= ~E1000_82580_PM_SPD;
911 } else {
912 data &= ~E1000_82580_PM_D0_LPLU;
913
914 /*
915 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
916 * during Dx states where the power conservation is most
917 * important. During driver activity we should enable
918 * SmartSpeed, so performance is maintained.
919 */
920 if (phy->smart_speed == e1000_smart_speed_on)
921 data |= E1000_82580_PM_SPD;
922 else if (phy->smart_speed == e1000_smart_speed_off)
923 data &= ~E1000_82580_PM_SPD;
924 }
925
926 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
927 return E1000_SUCCESS;
928 }
929
930 /**
931 * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
932 * @hw: pointer to the HW structure
933 * @active: boolean used to enable/disable lplu
934 *
935 * Success returns 0, Failure returns 1
936 *
937 * The low power link up (lplu) state is set to the power management level D3
938 * and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
939 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
940 * is used during Dx states where the power conservation is most important.
941 * During driver activity, SmartSpeed should be enabled so performance is
942 * maintained.
943 **/
e1000_set_d3_lplu_state_82580(struct e1000_hw * hw,bool active)944 s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
945 {
946 struct e1000_phy_info *phy = &hw->phy;
947 u32 data;
948
949 DEBUGFUNC("e1000_set_d3_lplu_state_82580");
950
951 data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
952
953 if (!active) {
954 data &= ~E1000_82580_PM_D3_LPLU;
955 /*
956 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
957 * during Dx states where the power conservation is most
958 * important. During driver activity we should enable
959 * SmartSpeed, so performance is maintained.
960 */
961 if (phy->smart_speed == e1000_smart_speed_on)
962 data |= E1000_82580_PM_SPD;
963 else if (phy->smart_speed == e1000_smart_speed_off)
964 data &= ~E1000_82580_PM_SPD;
965 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
966 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
967 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
968 data |= E1000_82580_PM_D3_LPLU;
969 /* When LPLU is enabled, we should disable SmartSpeed */
970 data &= ~E1000_82580_PM_SPD;
971 }
972
973 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
974 return E1000_SUCCESS;
975 }
976
977 /**
978 * e1000_acquire_nvm_82575 - Request for access to EEPROM
979 * @hw: pointer to the HW structure
980 *
981 * Acquire the necessary semaphores for exclusive access to the EEPROM.
982 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
983 * Return successful if access grant bit set, else clear the request for
984 * EEPROM access and return -E1000_ERR_NVM (-1).
985 **/
e1000_acquire_nvm_82575(struct e1000_hw * hw)986 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
987 {
988 s32 ret_val = E1000_SUCCESS;
989
990 DEBUGFUNC("e1000_acquire_nvm_82575");
991
992 ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
993 if (ret_val)
994 goto out;
995
996 /*
997 * Check if there is some access
998 * error this access may hook on
999 */
1000 if (hw->mac.type == e1000_i350) {
1001 u32 eecd = E1000_READ_REG(hw, E1000_EECD);
1002 if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
1003 E1000_EECD_TIMEOUT)) {
1004 /* Clear all access error flags */
1005 E1000_WRITE_REG(hw, E1000_EECD, eecd |
1006 E1000_EECD_ERROR_CLR);
1007 DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
1008 }
1009 }
1010
1011 if (hw->mac.type == e1000_82580) {
1012 u32 eecd = E1000_READ_REG(hw, E1000_EECD);
1013 if (eecd & E1000_EECD_BLOCKED) {
1014 /* Clear access error flag */
1015 E1000_WRITE_REG(hw, E1000_EECD, eecd |
1016 E1000_EECD_BLOCKED);
1017 DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
1018 }
1019 }
1020
1021 ret_val = e1000_acquire_nvm_generic(hw);
1022 if (ret_val)
1023 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
1024
1025 out:
1026 return ret_val;
1027 }
1028
1029 /**
1030 * e1000_release_nvm_82575 - Release exclusive access to EEPROM
1031 * @hw: pointer to the HW structure
1032 *
1033 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
1034 * then release the semaphores acquired.
1035 **/
e1000_release_nvm_82575(struct e1000_hw * hw)1036 static void e1000_release_nvm_82575(struct e1000_hw *hw)
1037 {
1038 DEBUGFUNC("e1000_release_nvm_82575");
1039
1040 e1000_release_nvm_generic(hw);
1041
1042 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
1043 }
1044
1045 /**
1046 * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
1047 * @hw: pointer to the HW structure
1048 * @mask: specifies which semaphore to acquire
1049 *
1050 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
1051 * will also specify which port we're acquiring the lock for.
1052 **/
e1000_acquire_swfw_sync_82575(struct e1000_hw * hw,u16 mask)1053 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1054 {
1055 u32 swfw_sync;
1056 u32 swmask = mask;
1057 u32 fwmask = mask << 16;
1058 s32 ret_val = E1000_SUCCESS;
1059 s32 i = 0, timeout = 200;
1060
1061 DEBUGFUNC("e1000_acquire_swfw_sync_82575");
1062
1063 while (i < timeout) {
1064 if (e1000_get_hw_semaphore_generic(hw)) {
1065 ret_val = -E1000_ERR_SWFW_SYNC;
1066 goto out;
1067 }
1068
1069 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
1070 if (!(swfw_sync & (fwmask | swmask)))
1071 break;
1072
1073 /*
1074 * Firmware currently using resource (fwmask)
1075 * or other software thread using resource (swmask)
1076 */
1077 e1000_put_hw_semaphore_generic(hw);
1078 msec_delay_irq(5);
1079 i++;
1080 }
1081
1082 if (i == timeout) {
1083 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
1084 ret_val = -E1000_ERR_SWFW_SYNC;
1085 goto out;
1086 }
1087
1088 swfw_sync |= swmask;
1089 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
1090
1091 e1000_put_hw_semaphore_generic(hw);
1092
1093 out:
1094 return ret_val;
1095 }
1096
1097 /**
1098 * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
1099 * @hw: pointer to the HW structure
1100 * @mask: specifies which semaphore to acquire
1101 *
1102 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
1103 * will also specify which port we're releasing the lock for.
1104 **/
e1000_release_swfw_sync_82575(struct e1000_hw * hw,u16 mask)1105 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1106 {
1107 u32 swfw_sync;
1108
1109 DEBUGFUNC("e1000_release_swfw_sync_82575");
1110
1111 while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
1112 ; /* Empty */
1113
1114 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
1115 swfw_sync &= ~mask;
1116 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
1117
1118 e1000_put_hw_semaphore_generic(hw);
1119 }
1120
1121 /**
1122 * e1000_get_cfg_done_82575 - Read config done bit
1123 * @hw: pointer to the HW structure
1124 *
1125 * Read the management control register for the config done bit for
1126 * completion status. NOTE: silicon which is EEPROM-less will fail trying
1127 * to read the config done bit, so an error is *ONLY* logged and returns
1128 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
1129 * would not be able to be reset or change link.
1130 **/
e1000_get_cfg_done_82575(struct e1000_hw * hw)1131 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
1132 {
1133 s32 timeout = PHY_CFG_TIMEOUT;
1134 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1135
1136 DEBUGFUNC("e1000_get_cfg_done_82575");
1137
1138 if (hw->bus.func == E1000_FUNC_1)
1139 mask = E1000_NVM_CFG_DONE_PORT_1;
1140 else if (hw->bus.func == E1000_FUNC_2)
1141 mask = E1000_NVM_CFG_DONE_PORT_2;
1142 else if (hw->bus.func == E1000_FUNC_3)
1143 mask = E1000_NVM_CFG_DONE_PORT_3;
1144 while (timeout) {
1145 if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
1146 break;
1147 msec_delay(1);
1148 timeout--;
1149 }
1150 if (!timeout)
1151 DEBUGOUT("MNG configuration cycle has not completed.\n");
1152
1153 /* If EEPROM is not marked present, init the PHY manually */
1154 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
1155 (hw->phy.type == e1000_phy_igp_3))
1156 e1000_phy_init_script_igp3(hw);
1157
1158 return E1000_SUCCESS;
1159 }
1160
1161 /**
1162 * e1000_get_link_up_info_82575 - Get link speed/duplex info
1163 * @hw: pointer to the HW structure
1164 * @speed: stores the current speed
1165 * @duplex: stores the current duplex
1166 *
1167 * This is a wrapper function, if using the serial gigabit media independent
1168 * interface, use PCS to retrieve the link speed and duplex information.
1169 * Otherwise, use the generic function to get the link speed and duplex info.
1170 **/
e1000_get_link_up_info_82575(struct e1000_hw * hw,u16 * speed,u16 * duplex)1171 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
1172 u16 *duplex)
1173 {
1174 s32 ret_val;
1175
1176 DEBUGFUNC("e1000_get_link_up_info_82575");
1177
1178 if (hw->phy.media_type != e1000_media_type_copper)
1179 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
1180 duplex);
1181 else
1182 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
1183 duplex);
1184
1185 return ret_val;
1186 }
1187
1188 /**
1189 * e1000_check_for_link_82575 - Check for link
1190 * @hw: pointer to the HW structure
1191 *
1192 * If sgmii is enabled, then use the pcs register to determine link, otherwise
1193 * use the generic interface for determining link.
1194 **/
e1000_check_for_link_82575(struct e1000_hw * hw)1195 static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
1196 {
1197 s32 ret_val;
1198 u16 speed, duplex;
1199
1200 DEBUGFUNC("e1000_check_for_link_82575");
1201
1202 if (hw->phy.media_type != e1000_media_type_copper) {
1203 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
1204 &duplex);
1205 /*
1206 * Use this flag to determine if link needs to be checked or
1207 * not. If we have link clear the flag so that we do not
1208 * continue to check for link.
1209 */
1210 hw->mac.get_link_status = !hw->mac.serdes_has_link;
1211
1212 /*
1213 * Configure Flow Control now that Auto-Neg has completed.
1214 * First, we need to restore the desired flow control
1215 * settings because we may have had to re-autoneg with a
1216 * different link partner.
1217 */
1218 ret_val = e1000_config_fc_after_link_up_generic(hw);
1219 if (ret_val)
1220 DEBUGOUT("Error configuring flow control\n");
1221 } else {
1222 ret_val = e1000_check_for_copper_link_generic(hw);
1223 }
1224
1225 return ret_val;
1226 }
1227
1228 /**
1229 * e1000_check_for_link_media_swap - Check which M88E1112 interface linked
1230 * @hw: pointer to the HW structure
1231 *
1232 * Poll the M88E1112 interfaces to see which interface achieved link.
1233 */
e1000_check_for_link_media_swap(struct e1000_hw * hw)1234 static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
1235 {
1236 struct e1000_phy_info *phy = &hw->phy;
1237 s32 ret_val;
1238 u16 data;
1239 u8 port = 0;
1240
1241 DEBUGFUNC("e1000_check_for_link_media_swap");
1242
1243 /* Check for copper. */
1244 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
1245 if (ret_val)
1246 return ret_val;
1247
1248 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
1249 if (ret_val)
1250 return ret_val;
1251
1252 if (data & E1000_M88E1112_STATUS_LINK)
1253 port = E1000_MEDIA_PORT_COPPER;
1254
1255 /* Check for other. */
1256 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
1257 if (ret_val)
1258 return ret_val;
1259
1260 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
1261 if (ret_val)
1262 return ret_val;
1263
1264 if (data & E1000_M88E1112_STATUS_LINK)
1265 port = E1000_MEDIA_PORT_OTHER;
1266
1267 /* Determine if a swap needs to happen. */
1268 if (port && (hw->dev_spec._82575.media_port != port)) {
1269 hw->dev_spec._82575.media_port = port;
1270 hw->dev_spec._82575.media_changed = TRUE;
1271 }
1272
1273 if (port == E1000_MEDIA_PORT_COPPER) {
1274 /* reset page to 0 */
1275 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
1276 if (ret_val)
1277 return ret_val;
1278 e1000_check_for_link_82575(hw);
1279 } else {
1280 e1000_check_for_link_82575(hw);
1281 /* reset page to 0 */
1282 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
1283 if (ret_val)
1284 return ret_val;
1285 }
1286
1287 return E1000_SUCCESS;
1288 }
1289
1290 /**
1291 * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
1292 * @hw: pointer to the HW structure
1293 **/
e1000_power_up_serdes_link_82575(struct e1000_hw * hw)1294 static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
1295 {
1296 u32 reg;
1297
1298 DEBUGFUNC("e1000_power_up_serdes_link_82575");
1299
1300 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1301 !e1000_sgmii_active_82575(hw))
1302 return;
1303
1304 /* Enable PCS to turn on link */
1305 reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1306 reg |= E1000_PCS_CFG_PCS_EN;
1307 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1308
1309 /* Power up the laser */
1310 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1311 reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1312 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1313
1314 /* flush the write to verify completion */
1315 E1000_WRITE_FLUSH(hw);
1316 msec_delay(1);
1317 }
1318
1319 /**
1320 * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
1321 * @hw: pointer to the HW structure
1322 * @speed: stores the current speed
1323 * @duplex: stores the current duplex
1324 *
1325 * Using the physical coding sub-layer (PCS), retrieve the current speed and
1326 * duplex, then store the values in the pointers provided.
1327 **/
e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw * hw,u16 * speed,u16 * duplex)1328 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
1329 u16 *speed, u16 *duplex)
1330 {
1331 struct e1000_mac_info *mac = &hw->mac;
1332 u32 pcs;
1333 u32 status;
1334
1335 DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
1336
1337 /*
1338 * Read the PCS Status register for link state. For non-copper mode,
1339 * the status register is not accurate. The PCS status register is
1340 * used instead.
1341 */
1342 pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
1343
1344 /*
1345 * The link up bit determines when link is up on autoneg.
1346 */
1347 if (pcs & E1000_PCS_LSTS_LINK_OK) {
1348 mac->serdes_has_link = TRUE;
1349
1350 /* Detect and store PCS speed */
1351 if (pcs & E1000_PCS_LSTS_SPEED_1000)
1352 *speed = SPEED_1000;
1353 else if (pcs & E1000_PCS_LSTS_SPEED_100)
1354 *speed = SPEED_100;
1355 else
1356 *speed = SPEED_10;
1357
1358 /* Detect and store PCS duplex */
1359 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
1360 *duplex = FULL_DUPLEX;
1361 else
1362 *duplex = HALF_DUPLEX;
1363
1364 /* Check if it is an I354 2.5Gb backplane connection. */
1365 if (mac->type == e1000_i354) {
1366 status = E1000_READ_REG(hw, E1000_STATUS);
1367 if ((status & E1000_STATUS_2P5_SKU) &&
1368 !(status & E1000_STATUS_2P5_SKU_OVER)) {
1369 *speed = SPEED_2500;
1370 *duplex = FULL_DUPLEX;
1371 DEBUGOUT("2500 Mbs, ");
1372 DEBUGOUT("Full Duplex\n");
1373 }
1374 }
1375
1376 } else {
1377 mac->serdes_has_link = FALSE;
1378 *speed = 0;
1379 *duplex = 0;
1380 }
1381
1382 return E1000_SUCCESS;
1383 }
1384
1385 /**
1386 * e1000_shutdown_serdes_link_82575 - Remove link during power down
1387 * @hw: pointer to the HW structure
1388 *
1389 * In the case of serdes shut down sfp and PCS on driver unload
1390 * when management pass thru is not enabled.
1391 **/
e1000_shutdown_serdes_link_82575(struct e1000_hw * hw)1392 void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
1393 {
1394 u32 reg;
1395
1396 DEBUGFUNC("e1000_shutdown_serdes_link_82575");
1397
1398 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1399 !e1000_sgmii_active_82575(hw))
1400 return;
1401
1402 if (!e1000_enable_mng_pass_thru(hw)) {
1403 /* Disable PCS to turn off link */
1404 reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1405 reg &= ~E1000_PCS_CFG_PCS_EN;
1406 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1407
1408 /* shutdown the laser */
1409 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1410 reg |= E1000_CTRL_EXT_SDP3_DATA;
1411 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1412
1413 /* flush the write to verify completion */
1414 E1000_WRITE_FLUSH(hw);
1415 msec_delay(1);
1416 }
1417
1418 return;
1419 }
1420
1421 /**
1422 * e1000_reset_hw_82575 - Reset hardware
1423 * @hw: pointer to the HW structure
1424 *
1425 * This resets the hardware into a known state.
1426 **/
e1000_reset_hw_82575(struct e1000_hw * hw)1427 static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
1428 {
1429 u32 ctrl;
1430 s32 ret_val;
1431
1432 DEBUGFUNC("e1000_reset_hw_82575");
1433
1434 /*
1435 * Prevent the PCI-E bus from sticking if there is no TLP connection
1436 * on the last TLP read/write transaction when MAC is reset.
1437 */
1438 ret_val = e1000_disable_pcie_master_generic(hw);
1439 if (ret_val)
1440 DEBUGOUT("PCI-E Master disable polling has failed.\n");
1441
1442 /* set the completion timeout for interface */
1443 ret_val = e1000_set_pcie_completion_timeout(hw);
1444 if (ret_val)
1445 DEBUGOUT("PCI-E Set completion timeout has failed.\n");
1446
1447 DEBUGOUT("Masking off all interrupts\n");
1448 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1449
1450 E1000_WRITE_REG(hw, E1000_RCTL, 0);
1451 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1452 E1000_WRITE_FLUSH(hw);
1453
1454 msec_delay(10);
1455
1456 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1457
1458 DEBUGOUT("Issuing a global reset to MAC\n");
1459 E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
1460
1461 ret_val = e1000_get_auto_rd_done_generic(hw);
1462 if (ret_val) {
1463 /*
1464 * When auto config read does not complete, do not
1465 * return with an error. This can happen in situations
1466 * where there is no eeprom and prevents getting link.
1467 */
1468 DEBUGOUT("Auto Read Done did not complete\n");
1469 }
1470
1471 /* If EEPROM is not present, run manual init scripts */
1472 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES))
1473 e1000_reset_init_script_82575(hw);
1474
1475 /* Clear any pending interrupt events. */
1476 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1477 E1000_READ_REG(hw, E1000_ICR);
1478
1479 /* Install any alternate MAC address into RAR0 */
1480 ret_val = e1000_check_alt_mac_addr_generic(hw);
1481
1482 return ret_val;
1483 }
1484
1485 /**
1486 * e1000_init_hw_82575 - Initialize hardware
1487 * @hw: pointer to the HW structure
1488 *
1489 * This inits the hardware readying it for operation.
1490 **/
e1000_init_hw_82575(struct e1000_hw * hw)1491 s32 e1000_init_hw_82575(struct e1000_hw *hw)
1492 {
1493 struct e1000_mac_info *mac = &hw->mac;
1494 s32 ret_val;
1495 u16 i, rar_count = mac->rar_entry_count;
1496
1497 DEBUGFUNC("e1000_init_hw_82575");
1498
1499 /* Initialize identification LED */
1500 ret_val = mac->ops.id_led_init(hw);
1501 if (ret_val) {
1502 DEBUGOUT("Error initializing identification LED\n");
1503 /* This is not fatal and we should not stop init due to this */
1504 }
1505
1506 /* Disabling VLAN filtering */
1507 DEBUGOUT("Initializing the IEEE VLAN\n");
1508 mac->ops.clear_vfta(hw);
1509
1510 /* Setup the receive address */
1511 e1000_init_rx_addrs_generic(hw, rar_count);
1512
1513 /* Zero out the Multicast HASH table */
1514 DEBUGOUT("Zeroing the MTA\n");
1515 for (i = 0; i < mac->mta_reg_count; i++)
1516 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
1517
1518 /* Zero out the Unicast HASH table */
1519 DEBUGOUT("Zeroing the UTA\n");
1520 for (i = 0; i < mac->uta_reg_count; i++)
1521 E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
1522
1523 /* Setup link and flow control */
1524 ret_val = mac->ops.setup_link(hw);
1525
1526 /* Set the default MTU size */
1527 hw->dev_spec._82575.mtu = 1500;
1528
1529 /*
1530 * Clear all of the statistics registers (clear on read). It is
1531 * important that we do this after we have tried to establish link
1532 * because the symbol error count will increment wildly if there
1533 * is no link.
1534 */
1535 e1000_clear_hw_cntrs_82575(hw);
1536
1537 return ret_val;
1538 }
1539
1540 /**
1541 * e1000_setup_copper_link_82575 - Configure copper link settings
1542 * @hw: pointer to the HW structure
1543 *
1544 * Configures the link for auto-neg or forced speed and duplex. Then we check
1545 * for link, once link is established calls to configure collision distance
1546 * and flow control are called.
1547 **/
e1000_setup_copper_link_82575(struct e1000_hw * hw)1548 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
1549 {
1550 u32 ctrl;
1551 s32 ret_val;
1552 u32 phpm_reg;
1553
1554 DEBUGFUNC("e1000_setup_copper_link_82575");
1555
1556 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1557 ctrl |= E1000_CTRL_SLU;
1558 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1559 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1560
1561 /* Clear Go Link Disconnect bit on supported devices */
1562 switch (hw->mac.type) {
1563 case e1000_82580:
1564 case e1000_i350:
1565 case e1000_i210:
1566 case e1000_i211:
1567 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1568 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1569 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1570 break;
1571 default:
1572 break;
1573 }
1574
1575 ret_val = e1000_setup_serdes_link_82575(hw);
1576 if (ret_val)
1577 goto out;
1578
1579 if (e1000_sgmii_active_82575(hw)) {
1580 /* allow time for SFP cage time to power up phy */
1581 msec_delay(300);
1582
1583 ret_val = hw->phy.ops.reset(hw);
1584 if (ret_val) {
1585 DEBUGOUT("Error resetting the PHY.\n");
1586 goto out;
1587 }
1588 }
1589 switch (hw->phy.type) {
1590 case e1000_phy_i210:
1591 case e1000_phy_m88:
1592 switch (hw->phy.id) {
1593 case I347AT4_E_PHY_ID:
1594 case M88E1112_E_PHY_ID:
1595 case M88E1340M_E_PHY_ID:
1596 case M88E1543_E_PHY_ID:
1597 case M88E1512_E_PHY_ID:
1598 case I210_I_PHY_ID:
1599 ret_val = e1000_copper_link_setup_m88_gen2(hw);
1600 break;
1601 default:
1602 ret_val = e1000_copper_link_setup_m88(hw);
1603 break;
1604 }
1605 break;
1606 case e1000_phy_igp_3:
1607 ret_val = e1000_copper_link_setup_igp(hw);
1608 break;
1609 case e1000_phy_82580:
1610 ret_val = e1000_copper_link_setup_82577(hw);
1611 break;
1612 default:
1613 ret_val = -E1000_ERR_PHY;
1614 break;
1615 }
1616
1617 if (ret_val)
1618 goto out;
1619
1620 ret_val = e1000_setup_copper_link_generic(hw);
1621 out:
1622 return ret_val;
1623 }
1624
1625 /**
1626 * e1000_setup_serdes_link_82575 - Setup link for serdes
1627 * @hw: pointer to the HW structure
1628 *
1629 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1630 * used on copper connections where the serialized gigabit media independent
1631 * interface (sgmii), or serdes fiber is being used. Configures the link
1632 * for auto-negotiation or forces speed/duplex.
1633 **/
e1000_setup_serdes_link_82575(struct e1000_hw * hw)1634 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
1635 {
1636 u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1637 bool pcs_autoneg;
1638 s32 ret_val = E1000_SUCCESS;
1639 u16 data;
1640
1641 DEBUGFUNC("e1000_setup_serdes_link_82575");
1642
1643 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1644 !e1000_sgmii_active_82575(hw))
1645 return ret_val;
1646
1647 /*
1648 * On the 82575, SerDes loopback mode persists until it is
1649 * explicitly turned off or a power cycle is performed. A read to
1650 * the register does not indicate its status. Therefore, we ensure
1651 * loopback mode is disabled during initialization.
1652 */
1653 E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1654
1655 /* power on the sfp cage if present */
1656 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1657 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1658 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1659
1660 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1661 ctrl_reg |= E1000_CTRL_SLU;
1662
1663 /* set both sw defined pins on 82575/82576*/
1664 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
1665 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1666
1667 reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1668
1669 /* default pcs_autoneg to the same setting as mac autoneg */
1670 pcs_autoneg = hw->mac.autoneg;
1671
1672 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1673 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1674 /* sgmii mode lets the phy handle forcing speed/duplex */
1675 pcs_autoneg = TRUE;
1676 /* autoneg time out should be disabled for SGMII mode */
1677 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1678 break;
1679 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1680 /* disable PCS autoneg and support parallel detect only */
1681 pcs_autoneg = FALSE;
1682 /* fall through to default case */
1683 default:
1684 if (hw->mac.type == e1000_82575 ||
1685 hw->mac.type == e1000_82576) {
1686 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1687 if (ret_val) {
1688 DEBUGOUT("NVM Read Error\n");
1689 return ret_val;
1690 }
1691
1692 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
1693 pcs_autoneg = FALSE;
1694 }
1695
1696 /*
1697 * non-SGMII modes only supports a speed of 1000/Full for the
1698 * link so it is best to just force the MAC and let the pcs
1699 * link either autoneg or be forced to 1000/Full
1700 */
1701 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1702 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1703
1704 /* set speed of 1000/Full if speed/duplex is forced */
1705 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1706 break;
1707 }
1708
1709 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1710
1711 /*
1712 * New SerDes mode allows for forcing speed or autonegotiating speed
1713 * at 1gb. Autoneg should be default set by most drivers. This is the
1714 * mode that will be compatible with older link partners and switches.
1715 * However, both are supported by the hardware and some drivers/tools.
1716 */
1717 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1718 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1719
1720 if (pcs_autoneg) {
1721 /* Set PCS register for autoneg */
1722 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1723 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1724
1725 /* Disable force flow control for autoneg */
1726 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
1727
1728 /* Configure flow control advertisement for autoneg */
1729 anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
1730 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
1731
1732 switch (hw->fc.requested_mode) {
1733 case e1000_fc_full:
1734 case e1000_fc_rx_pause:
1735 anadv_reg |= E1000_TXCW_ASM_DIR;
1736 anadv_reg |= E1000_TXCW_PAUSE;
1737 break;
1738 case e1000_fc_tx_pause:
1739 anadv_reg |= E1000_TXCW_ASM_DIR;
1740 break;
1741 default:
1742 break;
1743 }
1744
1745 E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg);
1746
1747 DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1748 } else {
1749 /* Set PCS register for forced link */
1750 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1751
1752 /* Force flow control for forced link */
1753 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1754
1755 DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1756 }
1757
1758 E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1759
1760 if (!pcs_autoneg && !e1000_sgmii_active_82575(hw))
1761 e1000_force_mac_fc_generic(hw);
1762
1763 return ret_val;
1764 }
1765
1766 /**
1767 * e1000_get_media_type_82575 - derives current media type.
1768 * @hw: pointer to the HW structure
1769 *
1770 * The media type is chosen reflecting few settings.
1771 * The following are taken into account:
1772 * - link mode set in the current port Init Control Word #3
1773 * - current link mode settings in CSR register
1774 * - MDIO vs. I2C PHY control interface chosen
1775 * - SFP module media type
1776 **/
e1000_get_media_type_82575(struct e1000_hw * hw)1777 static s32 e1000_get_media_type_82575(struct e1000_hw *hw)
1778 {
1779 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1780 s32 ret_val = E1000_SUCCESS;
1781 u32 ctrl_ext = 0;
1782 u32 link_mode = 0;
1783
1784 /* Set internal phy as default */
1785 dev_spec->sgmii_active = FALSE;
1786 dev_spec->module_plugged = FALSE;
1787
1788 /* Get CSR setting */
1789 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1790
1791 /* extract link mode setting */
1792 link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
1793
1794 switch (link_mode) {
1795 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1796 hw->phy.media_type = e1000_media_type_internal_serdes;
1797 break;
1798 case E1000_CTRL_EXT_LINK_MODE_GMII:
1799 hw->phy.media_type = e1000_media_type_copper;
1800 break;
1801 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1802 /* Get phy control interface type set (MDIO vs. I2C)*/
1803 if (e1000_sgmii_uses_mdio_82575(hw)) {
1804 hw->phy.media_type = e1000_media_type_copper;
1805 dev_spec->sgmii_active = TRUE;
1806 break;
1807 }
1808 /* fall through for I2C based SGMII */
1809 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
1810 /* read media type from SFP EEPROM */
1811 ret_val = e1000_set_sfp_media_type_82575(hw);
1812 if ((ret_val != E1000_SUCCESS) ||
1813 (hw->phy.media_type == e1000_media_type_unknown)) {
1814 /*
1815 * If media type was not identified then return media
1816 * type defined by the CTRL_EXT settings.
1817 */
1818 hw->phy.media_type = e1000_media_type_internal_serdes;
1819
1820 if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
1821 hw->phy.media_type = e1000_media_type_copper;
1822 dev_spec->sgmii_active = TRUE;
1823 }
1824
1825 break;
1826 }
1827
1828 /* do not change link mode for 100BaseFX */
1829 if (dev_spec->eth_flags.e100_base_fx)
1830 break;
1831
1832 /* change current link mode setting */
1833 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
1834
1835 if (hw->phy.media_type == e1000_media_type_copper)
1836 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
1837 else
1838 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
1839
1840 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1841
1842 break;
1843 }
1844
1845 return ret_val;
1846 }
1847
1848 /**
1849 * e1000_set_sfp_media_type_82575 - derives SFP module media type.
1850 * @hw: pointer to the HW structure
1851 *
1852 * The media type is chosen based on SFP module.
1853 * compatibility flags retrieved from SFP ID EEPROM.
1854 **/
e1000_set_sfp_media_type_82575(struct e1000_hw * hw)1855 static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw)
1856 {
1857 s32 ret_val = E1000_ERR_CONFIG;
1858 u32 ctrl_ext = 0;
1859 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1860 struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags;
1861 u8 tranceiver_type = 0;
1862 s32 timeout = 3;
1863
1864 /* Turn I2C interface ON and power on sfp cage */
1865 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1866 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1867 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
1868
1869 E1000_WRITE_FLUSH(hw);
1870
1871 /* Read SFP module data */
1872 while (timeout) {
1873 ret_val = e1000_read_sfp_data_byte(hw,
1874 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
1875 &tranceiver_type);
1876 if (ret_val == E1000_SUCCESS)
1877 break;
1878 msec_delay(100);
1879 timeout--;
1880 }
1881 if (ret_val != E1000_SUCCESS)
1882 goto out;
1883
1884 ret_val = e1000_read_sfp_data_byte(hw,
1885 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
1886 (u8 *)eth_flags);
1887 if (ret_val != E1000_SUCCESS)
1888 goto out;
1889
1890 /* Check if there is some SFP module plugged and powered */
1891 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
1892 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
1893 dev_spec->module_plugged = TRUE;
1894 if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
1895 hw->phy.media_type = e1000_media_type_internal_serdes;
1896 } else if (eth_flags->e100_base_fx) {
1897 dev_spec->sgmii_active = TRUE;
1898 hw->phy.media_type = e1000_media_type_internal_serdes;
1899 } else if (eth_flags->e1000_base_t) {
1900 dev_spec->sgmii_active = TRUE;
1901 hw->phy.media_type = e1000_media_type_copper;
1902 } else {
1903 hw->phy.media_type = e1000_media_type_unknown;
1904 DEBUGOUT("PHY module has not been recognized\n");
1905 goto out;
1906 }
1907 } else {
1908 hw->phy.media_type = e1000_media_type_unknown;
1909 }
1910 ret_val = E1000_SUCCESS;
1911 out:
1912 /* Restore I2C interface setting */
1913 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1914 return ret_val;
1915 }
1916
1917 /**
1918 * e1000_valid_led_default_82575 - Verify a valid default LED config
1919 * @hw: pointer to the HW structure
1920 * @data: pointer to the NVM (EEPROM)
1921 *
1922 * Read the EEPROM for the current default LED configuration. If the
1923 * LED configuration is not valid, set to a valid LED configuration.
1924 **/
e1000_valid_led_default_82575(struct e1000_hw * hw,u16 * data)1925 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
1926 {
1927 s32 ret_val;
1928
1929 DEBUGFUNC("e1000_valid_led_default_82575");
1930
1931 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1932 if (ret_val) {
1933 DEBUGOUT("NVM Read Error\n");
1934 goto out;
1935 }
1936
1937 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1938 switch (hw->phy.media_type) {
1939 case e1000_media_type_internal_serdes:
1940 *data = ID_LED_DEFAULT_82575_SERDES;
1941 break;
1942 case e1000_media_type_copper:
1943 default:
1944 *data = ID_LED_DEFAULT;
1945 break;
1946 }
1947 }
1948 out:
1949 return ret_val;
1950 }
1951
1952 /**
1953 * e1000_sgmii_active_82575 - Return sgmii state
1954 * @hw: pointer to the HW structure
1955 *
1956 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1957 * which can be enabled for use in the embedded applications. Simply
1958 * return the current state of the sgmii interface.
1959 **/
e1000_sgmii_active_82575(struct e1000_hw * hw)1960 static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
1961 {
1962 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1963 return dev_spec->sgmii_active;
1964 }
1965
1966 /**
1967 * e1000_reset_init_script_82575 - Inits HW defaults after reset
1968 * @hw: pointer to the HW structure
1969 *
1970 * Inits recommended HW defaults after a reset when there is no EEPROM
1971 * detected. This is only for the 82575.
1972 **/
e1000_reset_init_script_82575(struct e1000_hw * hw)1973 static s32 e1000_reset_init_script_82575(struct e1000_hw *hw)
1974 {
1975 DEBUGFUNC("e1000_reset_init_script_82575");
1976
1977 if (hw->mac.type == e1000_82575) {
1978 DEBUGOUT("Running reset init script for 82575\n");
1979 /* SerDes configuration via SERDESCTRL */
1980 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
1981 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
1982 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
1983 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
1984
1985 /* CCM configuration via CCMCTL register */
1986 e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
1987 e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
1988
1989 /* PCIe lanes configuration */
1990 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
1991 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
1992 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
1993 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
1994
1995 /* PCIe PLL Configuration */
1996 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
1997 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
1998 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
1999 }
2000
2001 return E1000_SUCCESS;
2002 }
2003
2004 /**
2005 * e1000_read_mac_addr_82575 - Read device MAC address
2006 * @hw: pointer to the HW structure
2007 **/
e1000_read_mac_addr_82575(struct e1000_hw * hw)2008 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
2009 {
2010 s32 ret_val;
2011
2012 DEBUGFUNC("e1000_read_mac_addr_82575");
2013
2014 /*
2015 * If there's an alternate MAC address place it in RAR0
2016 * so that it will override the Si installed default perm
2017 * address.
2018 */
2019 ret_val = e1000_check_alt_mac_addr_generic(hw);
2020 if (ret_val)
2021 goto out;
2022
2023 ret_val = e1000_read_mac_addr_generic(hw);
2024
2025 out:
2026 return ret_val;
2027 }
2028
2029 /**
2030 * e1000_config_collision_dist_82575 - Configure collision distance
2031 * @hw: pointer to the HW structure
2032 *
2033 * Configures the collision distance to the default value and is used
2034 * during link setup.
2035 **/
e1000_config_collision_dist_82575(struct e1000_hw * hw)2036 static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
2037 {
2038 u32 tctl_ext;
2039
2040 DEBUGFUNC("e1000_config_collision_dist_82575");
2041
2042 tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
2043
2044 tctl_ext &= ~E1000_TCTL_EXT_COLD;
2045 tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
2046
2047 E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
2048 E1000_WRITE_FLUSH(hw);
2049 }
2050
2051 /**
2052 * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
2053 * @hw: pointer to the HW structure
2054 *
2055 * In the case of a PHY power down to save power, or to turn off link during a
2056 * driver unload, or wake on lan is not enabled, remove the link.
2057 **/
e1000_power_down_phy_copper_82575(struct e1000_hw * hw)2058 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
2059 {
2060 struct e1000_phy_info *phy = &hw->phy;
2061
2062 if (!(phy->ops.check_reset_block))
2063 return;
2064
2065 /* If the management interface is not enabled, then power down */
2066 if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
2067 e1000_power_down_phy_copper(hw);
2068
2069 return;
2070 }
2071
2072 /**
2073 * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
2074 * @hw: pointer to the HW structure
2075 *
2076 * Clears the hardware counters by reading the counter registers.
2077 **/
e1000_clear_hw_cntrs_82575(struct e1000_hw * hw)2078 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
2079 {
2080 DEBUGFUNC("e1000_clear_hw_cntrs_82575");
2081
2082 e1000_clear_hw_cntrs_base_generic(hw);
2083
2084 E1000_READ_REG(hw, E1000_PRC64);
2085 E1000_READ_REG(hw, E1000_PRC127);
2086 E1000_READ_REG(hw, E1000_PRC255);
2087 E1000_READ_REG(hw, E1000_PRC511);
2088 E1000_READ_REG(hw, E1000_PRC1023);
2089 E1000_READ_REG(hw, E1000_PRC1522);
2090 E1000_READ_REG(hw, E1000_PTC64);
2091 E1000_READ_REG(hw, E1000_PTC127);
2092 E1000_READ_REG(hw, E1000_PTC255);
2093 E1000_READ_REG(hw, E1000_PTC511);
2094 E1000_READ_REG(hw, E1000_PTC1023);
2095 E1000_READ_REG(hw, E1000_PTC1522);
2096
2097 E1000_READ_REG(hw, E1000_ALGNERRC);
2098 E1000_READ_REG(hw, E1000_RXERRC);
2099 E1000_READ_REG(hw, E1000_TNCRS);
2100 E1000_READ_REG(hw, E1000_CEXTERR);
2101 E1000_READ_REG(hw, E1000_TSCTC);
2102 E1000_READ_REG(hw, E1000_TSCTFC);
2103
2104 E1000_READ_REG(hw, E1000_MGTPRC);
2105 E1000_READ_REG(hw, E1000_MGTPDC);
2106 E1000_READ_REG(hw, E1000_MGTPTC);
2107
2108 E1000_READ_REG(hw, E1000_IAC);
2109 E1000_READ_REG(hw, E1000_ICRXOC);
2110
2111 E1000_READ_REG(hw, E1000_ICRXPTC);
2112 E1000_READ_REG(hw, E1000_ICRXATC);
2113 E1000_READ_REG(hw, E1000_ICTXPTC);
2114 E1000_READ_REG(hw, E1000_ICTXATC);
2115 E1000_READ_REG(hw, E1000_ICTXQEC);
2116 E1000_READ_REG(hw, E1000_ICTXQMTC);
2117 E1000_READ_REG(hw, E1000_ICRXDMTC);
2118
2119 E1000_READ_REG(hw, E1000_CBTMPC);
2120 E1000_READ_REG(hw, E1000_HTDPMC);
2121 E1000_READ_REG(hw, E1000_CBRMPC);
2122 E1000_READ_REG(hw, E1000_RPTHC);
2123 E1000_READ_REG(hw, E1000_HGPTC);
2124 E1000_READ_REG(hw, E1000_HTCBDPC);
2125 E1000_READ_REG(hw, E1000_HGORCL);
2126 E1000_READ_REG(hw, E1000_HGORCH);
2127 E1000_READ_REG(hw, E1000_HGOTCL);
2128 E1000_READ_REG(hw, E1000_HGOTCH);
2129 E1000_READ_REG(hw, E1000_LENERRS);
2130
2131 /* This register should not be read in copper configurations */
2132 if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
2133 e1000_sgmii_active_82575(hw))
2134 E1000_READ_REG(hw, E1000_SCVPC);
2135 }
2136
2137 /**
2138 * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
2139 * @hw: pointer to the HW structure
2140 *
2141 * After Rx enable, if manageability is enabled then there is likely some
2142 * bad data at the start of the fifo and possibly in the DMA fifo. This
2143 * function clears the fifos and flushes any packets that came in as rx was
2144 * being enabled.
2145 **/
e1000_rx_fifo_flush_82575(struct e1000_hw * hw)2146 void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
2147 {
2148 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
2149 int i, ms_wait;
2150
2151 DEBUGFUNC("e1000_rx_fifo_flush_82575");
2152
2153 /* disable IPv6 options as per hardware errata */
2154 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2155 rfctl |= E1000_RFCTL_IPV6_EX_DIS;
2156 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2157
2158 if (hw->mac.type != e1000_82575 ||
2159 !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
2160 return;
2161
2162 /* Disable all Rx queues */
2163 for (i = 0; i < 4; i++) {
2164 rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
2165 E1000_WRITE_REG(hw, E1000_RXDCTL(i),
2166 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
2167 }
2168 /* Poll all queues to verify they have shut down */
2169 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
2170 msec_delay(1);
2171 rx_enabled = 0;
2172 for (i = 0; i < 4; i++)
2173 rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
2174 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
2175 break;
2176 }
2177
2178 if (ms_wait == 10)
2179 DEBUGOUT("Queue disable timed out after 10ms\n");
2180
2181 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
2182 * incoming packets are rejected. Set enable and wait 2ms so that
2183 * any packet that was coming in as RCTL.EN was set is flushed
2184 */
2185 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
2186
2187 rlpml = E1000_READ_REG(hw, E1000_RLPML);
2188 E1000_WRITE_REG(hw, E1000_RLPML, 0);
2189
2190 rctl = E1000_READ_REG(hw, E1000_RCTL);
2191 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
2192 temp_rctl |= E1000_RCTL_LPE;
2193
2194 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
2195 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
2196 E1000_WRITE_FLUSH(hw);
2197 msec_delay(2);
2198
2199 /* Enable Rx queues that were previously enabled and restore our
2200 * previous state
2201 */
2202 for (i = 0; i < 4; i++)
2203 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
2204 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2205 E1000_WRITE_FLUSH(hw);
2206
2207 E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
2208 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2209
2210 /* Flush receive errors generated by workaround */
2211 E1000_READ_REG(hw, E1000_ROC);
2212 E1000_READ_REG(hw, E1000_RNBC);
2213 E1000_READ_REG(hw, E1000_MPC);
2214 }
2215
2216 /**
2217 * e1000_set_pcie_completion_timeout - set pci-e completion timeout
2218 * @hw: pointer to the HW structure
2219 *
2220 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
2221 * however the hardware default for these parts is 500us to 1ms which is less
2222 * than the 10ms recommended by the pci-e spec. To address this we need to
2223 * increase the value to either 10ms to 200ms for capability version 1 config,
2224 * or 16ms to 55ms for version 2.
2225 **/
e1000_set_pcie_completion_timeout(struct e1000_hw * hw)2226 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
2227 {
2228 u32 gcr = E1000_READ_REG(hw, E1000_GCR);
2229 s32 ret_val = E1000_SUCCESS;
2230 u16 pcie_devctl2;
2231
2232 /* only take action if timeout value is defaulted to 0 */
2233 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
2234 goto out;
2235
2236 /*
2237 * if capababilities version is type 1 we can write the
2238 * timeout of 10ms to 200ms through the GCR register
2239 */
2240 if (!(gcr & E1000_GCR_CAP_VER2)) {
2241 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
2242 goto out;
2243 }
2244
2245 /*
2246 * for version 2 capabilities we need to write the config space
2247 * directly in order to set the completion timeout value for
2248 * 16ms to 55ms
2249 */
2250 ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2251 &pcie_devctl2);
2252 if (ret_val)
2253 goto out;
2254
2255 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
2256
2257 ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2258 &pcie_devctl2);
2259 out:
2260 /* disable completion timeout resend */
2261 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
2262
2263 E1000_WRITE_REG(hw, E1000_GCR, gcr);
2264 return ret_val;
2265 }
2266
2267 /**
2268 * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
2269 * @hw: pointer to the hardware struct
2270 * @enable: state to enter, either enabled or disabled
2271 * @pf: Physical Function pool - do not set anti-spoofing for the PF
2272 *
2273 * enables/disables L2 switch anti-spoofing functionality.
2274 **/
e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw * hw,bool enable,int pf)2275 void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
2276 {
2277 u32 reg_val, reg_offset;
2278
2279 switch (hw->mac.type) {
2280 case e1000_82576:
2281 reg_offset = E1000_DTXSWC;
2282 break;
2283 case e1000_i350:
2284 case e1000_i354:
2285 reg_offset = E1000_TXSWC;
2286 break;
2287 default:
2288 return;
2289 }
2290
2291 reg_val = E1000_READ_REG(hw, reg_offset);
2292 if (enable) {
2293 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
2294 E1000_DTXSWC_VLAN_SPOOF_MASK);
2295 /* The PF can spoof - it has to in order to
2296 * support emulation mode NICs
2297 */
2298 reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
2299 } else {
2300 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
2301 E1000_DTXSWC_VLAN_SPOOF_MASK);
2302 }
2303 E1000_WRITE_REG(hw, reg_offset, reg_val);
2304 }
2305
2306 /**
2307 * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
2308 * @hw: pointer to the hardware struct
2309 * @enable: state to enter, either enabled or disabled
2310 *
2311 * enables/disables L2 switch loopback functionality.
2312 **/
e1000_vmdq_set_loopback_pf(struct e1000_hw * hw,bool enable)2313 void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
2314 {
2315 u32 dtxswc;
2316
2317 switch (hw->mac.type) {
2318 case e1000_82576:
2319 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
2320 if (enable)
2321 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2322 else
2323 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2324 E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
2325 break;
2326 case e1000_i350:
2327 case e1000_i354:
2328 dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
2329 if (enable)
2330 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2331 else
2332 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2333 E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
2334 break;
2335 default:
2336 /* Currently no other hardware supports loopback */
2337 break;
2338 }
2339
2340
2341 }
2342
2343 /**
2344 * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
2345 * @hw: pointer to the hardware struct
2346 * @enable: state to enter, either enabled or disabled
2347 *
2348 * enables/disables replication of packets across multiple pools.
2349 **/
e1000_vmdq_set_replication_pf(struct e1000_hw * hw,bool enable)2350 void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
2351 {
2352 u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2353
2354 if (enable)
2355 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
2356 else
2357 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
2358
2359 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2360 }
2361
2362 /**
2363 * e1000_read_phy_reg_82580 - Read 82580 MDI control register
2364 * @hw: pointer to the HW structure
2365 * @offset: register offset to be read
2366 * @data: pointer to the read data
2367 *
2368 * Reads the MDI control register in the PHY at offset and stores the
2369 * information read to data.
2370 **/
e1000_read_phy_reg_82580(struct e1000_hw * hw,u32 offset,u16 * data)2371 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
2372 {
2373 s32 ret_val;
2374
2375 DEBUGFUNC("e1000_read_phy_reg_82580");
2376
2377 ret_val = hw->phy.ops.acquire(hw);
2378 if (ret_val)
2379 goto out;
2380
2381 ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
2382
2383 hw->phy.ops.release(hw);
2384
2385 out:
2386 return ret_val;
2387 }
2388
2389 /**
2390 * e1000_write_phy_reg_82580 - Write 82580 MDI control register
2391 * @hw: pointer to the HW structure
2392 * @offset: register offset to write to
2393 * @data: data to write to register at offset
2394 *
2395 * Writes data to MDI control register in the PHY at offset.
2396 **/
e1000_write_phy_reg_82580(struct e1000_hw * hw,u32 offset,u16 data)2397 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
2398 {
2399 s32 ret_val;
2400
2401 DEBUGFUNC("e1000_write_phy_reg_82580");
2402
2403 ret_val = hw->phy.ops.acquire(hw);
2404 if (ret_val)
2405 goto out;
2406
2407 ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
2408
2409 hw->phy.ops.release(hw);
2410
2411 out:
2412 return ret_val;
2413 }
2414
2415 /**
2416 * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
2417 * @hw: pointer to the HW structure
2418 *
2419 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
2420 * the values found in the EEPROM. This addresses an issue in which these
2421 * bits are not restored from EEPROM after reset.
2422 **/
e1000_reset_mdicnfg_82580(struct e1000_hw * hw)2423 static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
2424 {
2425 s32 ret_val = E1000_SUCCESS;
2426 u32 mdicnfg;
2427 u16 nvm_data = 0;
2428
2429 DEBUGFUNC("e1000_reset_mdicnfg_82580");
2430
2431 if (hw->mac.type != e1000_82580)
2432 goto out;
2433 if (!e1000_sgmii_active_82575(hw))
2434 goto out;
2435
2436 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2437 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2438 &nvm_data);
2439 if (ret_val) {
2440 DEBUGOUT("NVM Read Error\n");
2441 goto out;
2442 }
2443
2444 mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
2445 if (nvm_data & NVM_WORD24_EXT_MDIO)
2446 mdicnfg |= E1000_MDICNFG_EXT_MDIO;
2447 if (nvm_data & NVM_WORD24_COM_MDIO)
2448 mdicnfg |= E1000_MDICNFG_COM_MDIO;
2449 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
2450 out:
2451 return ret_val;
2452 }
2453
2454 /**
2455 * e1000_reset_hw_82580 - Reset hardware
2456 * @hw: pointer to the HW structure
2457 *
2458 * This resets function or entire device (all ports, etc.)
2459 * to a known state.
2460 **/
e1000_reset_hw_82580(struct e1000_hw * hw)2461 static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
2462 {
2463 s32 ret_val = E1000_SUCCESS;
2464 /* BH SW mailbox bit in SW_FW_SYNC */
2465 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
2466 u32 ctrl;
2467 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
2468
2469 DEBUGFUNC("e1000_reset_hw_82580");
2470
2471 hw->dev_spec._82575.global_device_reset = FALSE;
2472
2473 /* 82580 does not reliably do global_device_reset due to hw errata */
2474 if (hw->mac.type == e1000_82580)
2475 global_device_reset = FALSE;
2476
2477 /* Get current control state. */
2478 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2479
2480 /*
2481 * Prevent the PCI-E bus from sticking if there is no TLP connection
2482 * on the last TLP read/write transaction when MAC is reset.
2483 */
2484 ret_val = e1000_disable_pcie_master_generic(hw);
2485 if (ret_val)
2486 DEBUGOUT("PCI-E Master disable polling has failed.\n");
2487
2488 DEBUGOUT("Masking off all interrupts\n");
2489 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
2490 E1000_WRITE_REG(hw, E1000_RCTL, 0);
2491 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
2492 E1000_WRITE_FLUSH(hw);
2493
2494 msec_delay(10);
2495
2496 /* Determine whether or not a global dev reset is requested */
2497 if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw,
2498 swmbsw_mask))
2499 global_device_reset = FALSE;
2500
2501 if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) &
2502 E1000_STAT_DEV_RST_SET))
2503 ctrl |= E1000_CTRL_DEV_RST;
2504 else
2505 ctrl |= E1000_CTRL_RST;
2506
2507 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
2508
2509 switch (hw->device_id) {
2510 case E1000_DEV_ID_DH89XXCC_SGMII:
2511 break;
2512 default:
2513 E1000_WRITE_FLUSH(hw);
2514 break;
2515 }
2516
2517 /* Add delay to insure DEV_RST or RST has time to complete */
2518 msec_delay(5);
2519
2520 ret_val = e1000_get_auto_rd_done_generic(hw);
2521 if (ret_val) {
2522 /*
2523 * When auto config read does not complete, do not
2524 * return with an error. This can happen in situations
2525 * where there is no eeprom and prevents getting link.
2526 */
2527 DEBUGOUT("Auto Read Done did not complete\n");
2528 }
2529
2530 /* clear global device reset status bit */
2531 E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
2532
2533 /* Clear any pending interrupt events. */
2534 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
2535 E1000_READ_REG(hw, E1000_ICR);
2536
2537 ret_val = e1000_reset_mdicnfg_82580(hw);
2538 if (ret_val)
2539 DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
2540
2541 /* Install any alternate MAC address into RAR0 */
2542 ret_val = e1000_check_alt_mac_addr_generic(hw);
2543
2544 /* Release semaphore */
2545 if (global_device_reset)
2546 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
2547
2548 return ret_val;
2549 }
2550
2551 /**
2552 * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
2553 * @data: data received by reading RXPBS register
2554 *
2555 * The 82580 uses a table based approach for packet buffer allocation sizes.
2556 * This function converts the retrieved value into the correct table value
2557 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
2558 * 0x0 36 72 144 1 2 4 8 16
2559 * 0x8 35 70 140 rsv rsv rsv rsv rsv
2560 */
e1000_rxpbs_adjust_82580(u32 data)2561 u16 e1000_rxpbs_adjust_82580(u32 data)
2562 {
2563 u16 ret_val = 0;
2564
2565 if (data < E1000_82580_RXPBS_TABLE_SIZE)
2566 ret_val = e1000_82580_rxpbs_table[data];
2567
2568 return ret_val;
2569 }
2570
2571 /**
2572 * e1000_validate_nvm_checksum_with_offset - Validate EEPROM
2573 * checksum
2574 * @hw: pointer to the HW structure
2575 * @offset: offset in words of the checksum protected region
2576 *
2577 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2578 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2579 **/
e1000_validate_nvm_checksum_with_offset(struct e1000_hw * hw,u16 offset)2580 s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2581 {
2582 s32 ret_val = E1000_SUCCESS;
2583 u16 checksum = 0;
2584 u16 i, nvm_data;
2585
2586 DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
2587
2588 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2589 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2590 if (ret_val) {
2591 DEBUGOUT("NVM Read Error\n");
2592 goto out;
2593 }
2594 checksum += nvm_data;
2595 }
2596
2597 if (checksum != (u16) NVM_SUM) {
2598 DEBUGOUT("NVM Checksum Invalid\n");
2599 ret_val = -E1000_ERR_NVM;
2600 goto out;
2601 }
2602
2603 out:
2604 return ret_val;
2605 }
2606
2607 /**
2608 * e1000_update_nvm_checksum_with_offset - Update EEPROM
2609 * checksum
2610 * @hw: pointer to the HW structure
2611 * @offset: offset in words of the checksum protected region
2612 *
2613 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2614 * up to the checksum. Then calculates the EEPROM checksum and writes the
2615 * value to the EEPROM.
2616 **/
e1000_update_nvm_checksum_with_offset(struct e1000_hw * hw,u16 offset)2617 s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2618 {
2619 s32 ret_val;
2620 u16 checksum = 0;
2621 u16 i, nvm_data;
2622
2623 DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
2624
2625 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2626 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2627 if (ret_val) {
2628 DEBUGOUT("NVM Read Error while updating checksum.\n");
2629 goto out;
2630 }
2631 checksum += nvm_data;
2632 }
2633 checksum = (u16) NVM_SUM - checksum;
2634 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2635 &checksum);
2636 if (ret_val)
2637 DEBUGOUT("NVM Write Error while updating checksum.\n");
2638
2639 out:
2640 return ret_val;
2641 }
2642
2643 /**
2644 * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
2645 * @hw: pointer to the HW structure
2646 *
2647 * Calculates the EEPROM section checksum by reading/adding each word of
2648 * the EEPROM and then verifies that the sum of the EEPROM is
2649 * equal to 0xBABA.
2650 **/
e1000_validate_nvm_checksum_82580(struct e1000_hw * hw)2651 static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
2652 {
2653 s32 ret_val;
2654 u16 eeprom_regions_count = 1;
2655 u16 j, nvm_data;
2656 u16 nvm_offset;
2657
2658 DEBUGFUNC("e1000_validate_nvm_checksum_82580");
2659
2660 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2661 if (ret_val) {
2662 DEBUGOUT("NVM Read Error\n");
2663 goto out;
2664 }
2665
2666 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2667 /* if chekcsums compatibility bit is set validate checksums
2668 * for all 4 ports. */
2669 eeprom_regions_count = 4;
2670 }
2671
2672 for (j = 0; j < eeprom_regions_count; j++) {
2673 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2674 ret_val = e1000_validate_nvm_checksum_with_offset(hw,
2675 nvm_offset);
2676 if (ret_val != E1000_SUCCESS)
2677 goto out;
2678 }
2679
2680 out:
2681 return ret_val;
2682 }
2683
2684 /**
2685 * e1000_update_nvm_checksum_82580 - Update EEPROM checksum
2686 * @hw: pointer to the HW structure
2687 *
2688 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2689 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2690 * checksum and writes the value to the EEPROM.
2691 **/
e1000_update_nvm_checksum_82580(struct e1000_hw * hw)2692 static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
2693 {
2694 s32 ret_val;
2695 u16 j, nvm_data;
2696 u16 nvm_offset;
2697
2698 DEBUGFUNC("e1000_update_nvm_checksum_82580");
2699
2700 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2701 if (ret_val) {
2702 DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n");
2703 goto out;
2704 }
2705
2706 if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) {
2707 /* set compatibility bit to validate checksums appropriately */
2708 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2709 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2710 &nvm_data);
2711 if (ret_val) {
2712 DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n");
2713 goto out;
2714 }
2715 }
2716
2717 for (j = 0; j < 4; j++) {
2718 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2719 ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
2720 if (ret_val)
2721 goto out;
2722 }
2723
2724 out:
2725 return ret_val;
2726 }
2727
2728 /**
2729 * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
2730 * @hw: pointer to the HW structure
2731 *
2732 * Calculates the EEPROM section checksum by reading/adding each word of
2733 * the EEPROM and then verifies that the sum of the EEPROM is
2734 * equal to 0xBABA.
2735 **/
e1000_validate_nvm_checksum_i350(struct e1000_hw * hw)2736 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
2737 {
2738 s32 ret_val = E1000_SUCCESS;
2739 u16 j;
2740 u16 nvm_offset;
2741
2742 DEBUGFUNC("e1000_validate_nvm_checksum_i350");
2743
2744 for (j = 0; j < 4; j++) {
2745 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2746 ret_val = e1000_validate_nvm_checksum_with_offset(hw,
2747 nvm_offset);
2748 if (ret_val != E1000_SUCCESS)
2749 goto out;
2750 }
2751
2752 out:
2753 return ret_val;
2754 }
2755
2756 /**
2757 * e1000_update_nvm_checksum_i350 - Update EEPROM checksum
2758 * @hw: pointer to the HW structure
2759 *
2760 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2761 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2762 * checksum and writes the value to the EEPROM.
2763 **/
e1000_update_nvm_checksum_i350(struct e1000_hw * hw)2764 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
2765 {
2766 s32 ret_val = E1000_SUCCESS;
2767 u16 j;
2768 u16 nvm_offset;
2769
2770 DEBUGFUNC("e1000_update_nvm_checksum_i350");
2771
2772 for (j = 0; j < 4; j++) {
2773 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2774 ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
2775 if (ret_val != E1000_SUCCESS)
2776 goto out;
2777 }
2778
2779 out:
2780 return ret_val;
2781 }
2782
2783 /**
2784 * __e1000_access_emi_reg - Read/write EMI register
2785 * @hw: pointer to the HW structure
2786 * @addr: EMI address to program
2787 * @data: pointer to value to read/write from/to the EMI address
2788 * @read: boolean flag to indicate read or write
2789 **/
__e1000_access_emi_reg(struct e1000_hw * hw,u16 address,u16 * data,bool read)2790 static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address,
2791 u16 *data, bool read)
2792 {
2793 s32 ret_val;
2794
2795 DEBUGFUNC("__e1000_access_emi_reg");
2796
2797 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2798 if (ret_val)
2799 return ret_val;
2800
2801 if (read)
2802 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
2803 else
2804 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
2805
2806 return ret_val;
2807 }
2808
2809 /**
2810 * e1000_read_emi_reg - Read Extended Management Interface register
2811 * @hw: pointer to the HW structure
2812 * @addr: EMI address to program
2813 * @data: value to be read from the EMI address
2814 **/
e1000_read_emi_reg(struct e1000_hw * hw,u16 addr,u16 * data)2815 s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2816 {
2817 DEBUGFUNC("e1000_read_emi_reg");
2818
2819 return __e1000_access_emi_reg(hw, addr, data, TRUE);
2820 }
2821
2822 /**
2823 * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY
2824 * @hw: pointer to the HW structure
2825 *
2826 * Initialize Marvell 1512 to work correctly with Avoton.
2827 **/
e1000_initialize_M88E1512_phy(struct e1000_hw * hw)2828 s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw)
2829 {
2830 struct e1000_phy_info *phy = &hw->phy;
2831 s32 ret_val = E1000_SUCCESS;
2832
2833 DEBUGFUNC("e1000_initialize_M88E1512_phy");
2834
2835 /* Check if this is correct PHY. */
2836 if (phy->id != M88E1512_E_PHY_ID)
2837 goto out;
2838
2839 /* Switch to PHY page 0xFF. */
2840 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
2841 if (ret_val)
2842 goto out;
2843
2844 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
2845 if (ret_val)
2846 goto out;
2847
2848 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
2849 if (ret_val)
2850 goto out;
2851
2852 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
2853 if (ret_val)
2854 goto out;
2855
2856 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
2857 if (ret_val)
2858 goto out;
2859
2860 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
2861 if (ret_val)
2862 goto out;
2863
2864 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
2865 if (ret_val)
2866 goto out;
2867
2868 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
2869 if (ret_val)
2870 goto out;
2871
2872 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
2873 if (ret_val)
2874 goto out;
2875
2876 /* Switch to PHY page 0xFB. */
2877 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
2878 if (ret_val)
2879 goto out;
2880
2881 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
2882 if (ret_val)
2883 goto out;
2884
2885 /* Switch to PHY page 0x12. */
2886 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
2887 if (ret_val)
2888 goto out;
2889
2890 /* Change mode to SGMII-to-Copper */
2891 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
2892 if (ret_val)
2893 goto out;
2894
2895 /* Return the PHY to page 0. */
2896 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
2897 if (ret_val)
2898 goto out;
2899
2900 ret_val = phy->ops.commit(hw);
2901 if (ret_val) {
2902 DEBUGOUT("Error committing the PHY changes\n");
2903 return ret_val;
2904 }
2905
2906 msec_delay(1000);
2907 out:
2908 return ret_val;
2909 }
2910
2911 /**
2912 * e1000_initialize_M88E1543_phy - Initialize M88E1543 PHY
2913 * @hw: pointer to the HW structure
2914 *
2915 * Initialize Marvell 1543 to work correctly with Avoton.
2916 **/
e1000_initialize_M88E1543_phy(struct e1000_hw * hw)2917 s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw)
2918 {
2919 struct e1000_phy_info *phy = &hw->phy;
2920 s32 ret_val = E1000_SUCCESS;
2921
2922 DEBUGFUNC("e1000_initialize_M88E1543_phy");
2923
2924 /* Check if this is correct PHY. */
2925 if (phy->id != M88E1543_E_PHY_ID)
2926 goto out;
2927
2928 /* Switch to PHY page 0xFF. */
2929 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
2930 if (ret_val)
2931 goto out;
2932
2933 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
2934 if (ret_val)
2935 goto out;
2936
2937 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
2938 if (ret_val)
2939 goto out;
2940
2941 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
2942 if (ret_val)
2943 goto out;
2944
2945 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
2946 if (ret_val)
2947 goto out;
2948
2949 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
2950 if (ret_val)
2951 goto out;
2952
2953 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
2954 if (ret_val)
2955 goto out;
2956
2957 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C);
2958 if (ret_val)
2959 goto out;
2960
2961 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
2962 if (ret_val)
2963 goto out;
2964
2965 /* Switch to PHY page 0xFB. */
2966 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
2967 if (ret_val)
2968 goto out;
2969
2970 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0xC00D);
2971 if (ret_val)
2972 goto out;
2973
2974 /* Switch to PHY page 0x12. */
2975 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
2976 if (ret_val)
2977 goto out;
2978
2979 /* Change mode to SGMII-to-Copper */
2980 ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
2981 if (ret_val)
2982 goto out;
2983
2984 /* Switch to PHY page 1. */
2985 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1);
2986 if (ret_val)
2987 goto out;
2988
2989 /* Change mode to 1000BASE-X/SGMII and autoneg enable; reset */
2990 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140);
2991 if (ret_val)
2992 goto out;
2993
2994 /* Return the PHY to page 0. */
2995 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
2996 if (ret_val)
2997 goto out;
2998
2999 ret_val = phy->ops.commit(hw);
3000 if (ret_val) {
3001 DEBUGOUT("Error committing the PHY changes\n");
3002 return ret_val;
3003 }
3004
3005 msec_delay(1000);
3006 out:
3007 return ret_val;
3008 }
3009
3010 /**
3011 * e1000_set_eee_i350 - Enable/disable EEE support
3012 * @hw: pointer to the HW structure
3013 * @adv1g: boolean flag enabling 1G EEE advertisement
3014 * @adv100m: boolean flag enabling 100M EEE advertisement
3015 *
3016 * Enable/disable EEE based on setting in dev_spec structure.
3017 *
3018 **/
e1000_set_eee_i350(struct e1000_hw * hw,bool adv1G,bool adv100M)3019 s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
3020 {
3021 u32 ipcnfg, eeer;
3022
3023 DEBUGFUNC("e1000_set_eee_i350");
3024
3025 if ((hw->mac.type < e1000_i350) ||
3026 (hw->phy.media_type != e1000_media_type_copper))
3027 goto out;
3028 ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
3029 eeer = E1000_READ_REG(hw, E1000_EEER);
3030
3031 /* enable or disable per user setting */
3032 if (!(hw->dev_spec._82575.eee_disable)) {
3033 u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
3034
3035 if (adv100M)
3036 ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
3037 else
3038 ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
3039
3040 if (adv1G)
3041 ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
3042 else
3043 ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
3044
3045 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
3046 E1000_EEER_LPI_FC);
3047
3048 /* This bit should not be set in normal operation. */
3049 if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
3050 DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
3051 } else {
3052 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
3053 eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
3054 E1000_EEER_LPI_FC);
3055 }
3056 E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
3057 E1000_WRITE_REG(hw, E1000_EEER, eeer);
3058 E1000_READ_REG(hw, E1000_IPCNFG);
3059 E1000_READ_REG(hw, E1000_EEER);
3060 out:
3061
3062 return E1000_SUCCESS;
3063 }
3064
3065 /**
3066 * e1000_set_eee_i354 - Enable/disable EEE support
3067 * @hw: pointer to the HW structure
3068 * @adv1g: boolean flag enabling 1G EEE advertisement
3069 * @adv100m: boolean flag enabling 100M EEE advertisement
3070 *
3071 * Enable/disable EEE legacy mode based on setting in dev_spec structure.
3072 *
3073 **/
e1000_set_eee_i354(struct e1000_hw * hw,bool adv1G,bool adv100M)3074 s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
3075 {
3076 struct e1000_phy_info *phy = &hw->phy;
3077 s32 ret_val = E1000_SUCCESS;
3078 u16 phy_data;
3079
3080 DEBUGFUNC("e1000_set_eee_i354");
3081
3082 if ((hw->phy.media_type != e1000_media_type_copper) ||
3083 ((phy->id != M88E1543_E_PHY_ID) &&
3084 (phy->id != M88E1512_E_PHY_ID)))
3085 goto out;
3086
3087 if (!hw->dev_spec._82575.eee_disable) {
3088 /* Switch to PHY page 18. */
3089 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
3090 if (ret_val)
3091 goto out;
3092
3093 ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
3094 &phy_data);
3095 if (ret_val)
3096 goto out;
3097
3098 phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
3099 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
3100 phy_data);
3101 if (ret_val)
3102 goto out;
3103
3104 /* Return the PHY to page 0. */
3105 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
3106 if (ret_val)
3107 goto out;
3108
3109 /* Turn on EEE advertisement. */
3110 ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
3111 E1000_EEE_ADV_DEV_I354,
3112 &phy_data);
3113 if (ret_val)
3114 goto out;
3115
3116 if (adv100M)
3117 phy_data |= E1000_EEE_ADV_100_SUPPORTED;
3118 else
3119 phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
3120
3121 if (adv1G)
3122 phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
3123 else
3124 phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
3125
3126 ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
3127 E1000_EEE_ADV_DEV_I354,
3128 phy_data);
3129 } else {
3130 /* Turn off EEE advertisement. */
3131 ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
3132 E1000_EEE_ADV_DEV_I354,
3133 &phy_data);
3134 if (ret_val)
3135 goto out;
3136
3137 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
3138 E1000_EEE_ADV_1000_SUPPORTED);
3139 ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
3140 E1000_EEE_ADV_DEV_I354,
3141 phy_data);
3142 }
3143
3144 out:
3145 return ret_val;
3146 }
3147
3148 /**
3149 * e1000_get_eee_status_i354 - Get EEE status
3150 * @hw: pointer to the HW structure
3151 * @status: EEE status
3152 *
3153 * Get EEE status by guessing based on whether Tx or Rx LPI indications have
3154 * been received.
3155 **/
e1000_get_eee_status_i354(struct e1000_hw * hw,bool * status)3156 s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status)
3157 {
3158 struct e1000_phy_info *phy = &hw->phy;
3159 s32 ret_val = E1000_SUCCESS;
3160 u16 phy_data;
3161
3162 DEBUGFUNC("e1000_get_eee_status_i354");
3163
3164 /* Check if EEE is supported on this device. */
3165 if ((hw->phy.media_type != e1000_media_type_copper) ||
3166 ((phy->id != M88E1543_E_PHY_ID) &&
3167 (phy->id != M88E1512_E_PHY_ID)))
3168 goto out;
3169
3170 ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
3171 E1000_PCS_STATUS_DEV_I354,
3172 &phy_data);
3173 if (ret_val)
3174 goto out;
3175
3176 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
3177 E1000_PCS_STATUS_RX_LPI_RCVD) ? TRUE : FALSE;
3178
3179 out:
3180 return ret_val;
3181 }
3182
3183 /* Due to a hw errata, if the host tries to configure the VFTA register
3184 * while performing queries from the BMC or DMA, then the VFTA in some
3185 * cases won't be written.
3186 */
3187
3188 /**
3189 * e1000_clear_vfta_i350 - Clear VLAN filter table
3190 * @hw: pointer to the HW structure
3191 *
3192 * Clears the register array which contains the VLAN filter table by
3193 * setting all the values to 0.
3194 **/
e1000_clear_vfta_i350(struct e1000_hw * hw)3195 void e1000_clear_vfta_i350(struct e1000_hw *hw)
3196 {
3197 u32 offset;
3198 int i;
3199
3200 DEBUGFUNC("e1000_clear_vfta_350");
3201
3202 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
3203 for (i = 0; i < 10; i++)
3204 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
3205
3206 E1000_WRITE_FLUSH(hw);
3207 }
3208 }
3209
3210 /**
3211 * e1000_write_vfta_i350 - Write value to VLAN filter table
3212 * @hw: pointer to the HW structure
3213 * @offset: register offset in VLAN filter table
3214 * @value: register value written to VLAN filter table
3215 *
3216 * Writes value at the given offset in the register array which stores
3217 * the VLAN filter table.
3218 **/
e1000_write_vfta_i350(struct e1000_hw * hw,u32 offset,u32 value)3219 void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
3220 {
3221 int i;
3222
3223 DEBUGFUNC("e1000_write_vfta_350");
3224
3225 for (i = 0; i < 10; i++)
3226 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
3227
3228 E1000_WRITE_FLUSH(hw);
3229 }
3230
3231
3232 /**
3233 * e1000_set_i2c_bb - Enable I2C bit-bang
3234 * @hw: pointer to the HW structure
3235 *
3236 * Enable I2C bit-bang interface
3237 *
3238 **/
e1000_set_i2c_bb(struct e1000_hw * hw)3239 s32 e1000_set_i2c_bb(struct e1000_hw *hw)
3240 {
3241 s32 ret_val = E1000_SUCCESS;
3242 u32 ctrl_ext, i2cparams;
3243
3244 DEBUGFUNC("e1000_set_i2c_bb");
3245
3246 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3247 ctrl_ext |= E1000_CTRL_I2C_ENA;
3248 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3249 E1000_WRITE_FLUSH(hw);
3250
3251 i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS);
3252 i2cparams |= E1000_I2CBB_EN;
3253 i2cparams |= E1000_I2C_DATA_OE_N;
3254 i2cparams |= E1000_I2C_CLK_OE_N;
3255 E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams);
3256 E1000_WRITE_FLUSH(hw);
3257
3258 return ret_val;
3259 }
3260
3261 /**
3262 * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C
3263 * @hw: pointer to hardware structure
3264 * @byte_offset: byte offset to read
3265 * @dev_addr: device address
3266 * @data: value read
3267 *
3268 * Performs byte read operation over I2C interface at
3269 * a specified device address.
3270 **/
e1000_read_i2c_byte_generic(struct e1000_hw * hw,u8 byte_offset,u8 dev_addr,u8 * data)3271 s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
3272 u8 dev_addr, u8 *data)
3273 {
3274 s32 status = E1000_SUCCESS;
3275 u32 max_retry = 10;
3276 u32 retry = 1;
3277 u16 swfw_mask = 0;
3278
3279 bool nack = TRUE;
3280
3281 DEBUGFUNC("e1000_read_i2c_byte_generic");
3282
3283 swfw_mask = E1000_SWFW_PHY0_SM;
3284
3285 do {
3286 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
3287 != E1000_SUCCESS) {
3288 status = E1000_ERR_SWFW_SYNC;
3289 goto read_byte_out;
3290 }
3291
3292 e1000_i2c_start(hw);
3293
3294 /* Device Address and write indication */
3295 status = e1000_clock_out_i2c_byte(hw, dev_addr);
3296 if (status != E1000_SUCCESS)
3297 goto fail;
3298
3299 status = e1000_get_i2c_ack(hw);
3300 if (status != E1000_SUCCESS)
3301 goto fail;
3302
3303 status = e1000_clock_out_i2c_byte(hw, byte_offset);
3304 if (status != E1000_SUCCESS)
3305 goto fail;
3306
3307 status = e1000_get_i2c_ack(hw);
3308 if (status != E1000_SUCCESS)
3309 goto fail;
3310
3311 e1000_i2c_start(hw);
3312
3313 /* Device Address and read indication */
3314 status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1));
3315 if (status != E1000_SUCCESS)
3316 goto fail;
3317
3318 status = e1000_get_i2c_ack(hw);
3319 if (status != E1000_SUCCESS)
3320 goto fail;
3321
3322 status = e1000_clock_in_i2c_byte(hw, data);
3323 if (status != E1000_SUCCESS)
3324 goto fail;
3325
3326 status = e1000_clock_out_i2c_bit(hw, nack);
3327 if (status != E1000_SUCCESS)
3328 goto fail;
3329
3330 e1000_i2c_stop(hw);
3331 break;
3332
3333 fail:
3334 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
3335 msec_delay(100);
3336 e1000_i2c_bus_clear(hw);
3337 retry++;
3338 if (retry < max_retry)
3339 DEBUGOUT("I2C byte read error - Retrying.\n");
3340 else
3341 DEBUGOUT("I2C byte read error.\n");
3342
3343 } while (retry < max_retry);
3344
3345 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
3346
3347 read_byte_out:
3348
3349 return status;
3350 }
3351
3352 /**
3353 * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C
3354 * @hw: pointer to hardware structure
3355 * @byte_offset: byte offset to write
3356 * @dev_addr: device address
3357 * @data: value to write
3358 *
3359 * Performs byte write operation over I2C interface at
3360 * a specified device address.
3361 **/
e1000_write_i2c_byte_generic(struct e1000_hw * hw,u8 byte_offset,u8 dev_addr,u8 data)3362 s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
3363 u8 dev_addr, u8 data)
3364 {
3365 s32 status = E1000_SUCCESS;
3366 u32 max_retry = 1;
3367 u32 retry = 0;
3368 u16 swfw_mask = 0;
3369
3370 DEBUGFUNC("e1000_write_i2c_byte_generic");
3371
3372 swfw_mask = E1000_SWFW_PHY0_SM;
3373
3374 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) {
3375 status = E1000_ERR_SWFW_SYNC;
3376 goto write_byte_out;
3377 }
3378
3379 do {
3380 e1000_i2c_start(hw);
3381
3382 status = e1000_clock_out_i2c_byte(hw, dev_addr);
3383 if (status != E1000_SUCCESS)
3384 goto fail;
3385
3386 status = e1000_get_i2c_ack(hw);
3387 if (status != E1000_SUCCESS)
3388 goto fail;
3389
3390 status = e1000_clock_out_i2c_byte(hw, byte_offset);
3391 if (status != E1000_SUCCESS)
3392 goto fail;
3393
3394 status = e1000_get_i2c_ack(hw);
3395 if (status != E1000_SUCCESS)
3396 goto fail;
3397
3398 status = e1000_clock_out_i2c_byte(hw, data);
3399 if (status != E1000_SUCCESS)
3400 goto fail;
3401
3402 status = e1000_get_i2c_ack(hw);
3403 if (status != E1000_SUCCESS)
3404 goto fail;
3405
3406 e1000_i2c_stop(hw);
3407 break;
3408
3409 fail:
3410 e1000_i2c_bus_clear(hw);
3411 retry++;
3412 if (retry < max_retry)
3413 DEBUGOUT("I2C byte write error - Retrying.\n");
3414 else
3415 DEBUGOUT("I2C byte write error.\n");
3416 } while (retry < max_retry);
3417
3418 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
3419
3420 write_byte_out:
3421
3422 return status;
3423 }
3424
3425 /**
3426 * e1000_i2c_start - Sets I2C start condition
3427 * @hw: pointer to hardware structure
3428 *
3429 * Sets I2C start condition (High -> Low on SDA while SCL is High)
3430 **/
e1000_i2c_start(struct e1000_hw * hw)3431 static void e1000_i2c_start(struct e1000_hw *hw)
3432 {
3433 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
3434
3435 DEBUGFUNC("e1000_i2c_start");
3436
3437 /* Start condition must begin with data and clock high */
3438 e1000_set_i2c_data(hw, &i2cctl, 1);
3439 e1000_raise_i2c_clk(hw, &i2cctl);
3440
3441 /* Setup time for start condition (4.7us) */
3442 usec_delay(E1000_I2C_T_SU_STA);
3443
3444 e1000_set_i2c_data(hw, &i2cctl, 0);
3445
3446 /* Hold time for start condition (4us) */
3447 usec_delay(E1000_I2C_T_HD_STA);
3448
3449 e1000_lower_i2c_clk(hw, &i2cctl);
3450
3451 /* Minimum low period of clock is 4.7 us */
3452 usec_delay(E1000_I2C_T_LOW);
3453
3454 }
3455
3456 /**
3457 * e1000_i2c_stop - Sets I2C stop condition
3458 * @hw: pointer to hardware structure
3459 *
3460 * Sets I2C stop condition (Low -> High on SDA while SCL is High)
3461 **/
e1000_i2c_stop(struct e1000_hw * hw)3462 static void e1000_i2c_stop(struct e1000_hw *hw)
3463 {
3464 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
3465
3466 DEBUGFUNC("e1000_i2c_stop");
3467
3468 /* Stop condition must begin with data low and clock high */
3469 e1000_set_i2c_data(hw, &i2cctl, 0);
3470 e1000_raise_i2c_clk(hw, &i2cctl);
3471
3472 /* Setup time for stop condition (4us) */
3473 usec_delay(E1000_I2C_T_SU_STO);
3474
3475 e1000_set_i2c_data(hw, &i2cctl, 1);
3476
3477 /* bus free time between stop and start (4.7us)*/
3478 usec_delay(E1000_I2C_T_BUF);
3479 }
3480
3481 /**
3482 * e1000_clock_in_i2c_byte - Clocks in one byte via I2C
3483 * @hw: pointer to hardware structure
3484 * @data: data byte to clock in
3485 *
3486 * Clocks in one byte data via I2C data/clock
3487 **/
e1000_clock_in_i2c_byte(struct e1000_hw * hw,u8 * data)3488 static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data)
3489 {
3490 s32 i;
3491 bool bit = 0;
3492
3493 DEBUGFUNC("e1000_clock_in_i2c_byte");
3494
3495 *data = 0;
3496 for (i = 7; i >= 0; i--) {
3497 e1000_clock_in_i2c_bit(hw, &bit);
3498 *data |= bit << i;
3499 }
3500
3501 return E1000_SUCCESS;
3502 }
3503
3504 /**
3505 * e1000_clock_out_i2c_byte - Clocks out one byte via I2C
3506 * @hw: pointer to hardware structure
3507 * @data: data byte clocked out
3508 *
3509 * Clocks out one byte data via I2C data/clock
3510 **/
e1000_clock_out_i2c_byte(struct e1000_hw * hw,u8 data)3511 static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data)
3512 {
3513 s32 status = E1000_SUCCESS;
3514 s32 i;
3515 u32 i2cctl;
3516 bool bit = 0;
3517
3518 DEBUGFUNC("e1000_clock_out_i2c_byte");
3519
3520 for (i = 7; i >= 0; i--) {
3521 bit = (data >> i) & 0x1;
3522 status = e1000_clock_out_i2c_bit(hw, bit);
3523
3524 if (status != E1000_SUCCESS)
3525 break;
3526 }
3527
3528 /* Release SDA line (set high) */
3529 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
3530
3531 i2cctl |= E1000_I2C_DATA_OE_N;
3532 E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
3533 E1000_WRITE_FLUSH(hw);
3534
3535 return status;
3536 }
3537
3538 /**
3539 * e1000_get_i2c_ack - Polls for I2C ACK
3540 * @hw: pointer to hardware structure
3541 *
3542 * Clocks in/out one bit via I2C data/clock
3543 **/
e1000_get_i2c_ack(struct e1000_hw * hw)3544 static s32 e1000_get_i2c_ack(struct e1000_hw *hw)
3545 {
3546 s32 status = E1000_SUCCESS;
3547 u32 i = 0;
3548 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
3549 u32 timeout = 10;
3550 bool ack = TRUE;
3551
3552 DEBUGFUNC("e1000_get_i2c_ack");
3553
3554 e1000_raise_i2c_clk(hw, &i2cctl);
3555
3556 /* Minimum high period of clock is 4us */
3557 usec_delay(E1000_I2C_T_HIGH);
3558
3559 /* Wait until SCL returns high */
3560 for (i = 0; i < timeout; i++) {
3561 usec_delay(1);
3562 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
3563 if (i2cctl & E1000_I2C_CLK_IN)
3564 break;
3565 }
3566 if (!(i2cctl & E1000_I2C_CLK_IN))
3567 return E1000_ERR_I2C;
3568
3569 ack = e1000_get_i2c_data(&i2cctl);
3570 if (ack) {
3571 DEBUGOUT("I2C ack was not received.\n");
3572 status = E1000_ERR_I2C;
3573 }
3574
3575 e1000_lower_i2c_clk(hw, &i2cctl);
3576
3577 /* Minimum low period of clock is 4.7 us */
3578 usec_delay(E1000_I2C_T_LOW);
3579
3580 return status;
3581 }
3582
3583 /**
3584 * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
3585 * @hw: pointer to hardware structure
3586 * @data: read data value
3587 *
3588 * Clocks in one bit via I2C data/clock
3589 **/
e1000_clock_in_i2c_bit(struct e1000_hw * hw,bool * data)3590 static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data)
3591 {
3592 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
3593
3594 DEBUGFUNC("e1000_clock_in_i2c_bit");
3595
3596 e1000_raise_i2c_clk(hw, &i2cctl);
3597
3598 /* Minimum high period of clock is 4us */
3599 usec_delay(E1000_I2C_T_HIGH);
3600
3601 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
3602 *data = e1000_get_i2c_data(&i2cctl);
3603
3604 e1000_lower_i2c_clk(hw, &i2cctl);
3605
3606 /* Minimum low period of clock is 4.7 us */
3607 usec_delay(E1000_I2C_T_LOW);
3608
3609 return E1000_SUCCESS;
3610 }
3611
3612 /**
3613 * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
3614 * @hw: pointer to hardware structure
3615 * @data: data value to write
3616 *
3617 * Clocks out one bit via I2C data/clock
3618 **/
e1000_clock_out_i2c_bit(struct e1000_hw * hw,bool data)3619 static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data)
3620 {
3621 s32 status;
3622 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
3623
3624 DEBUGFUNC("e1000_clock_out_i2c_bit");
3625
3626 status = e1000_set_i2c_data(hw, &i2cctl, data);
3627 if (status == E1000_SUCCESS) {
3628 e1000_raise_i2c_clk(hw, &i2cctl);
3629
3630 /* Minimum high period of clock is 4us */
3631 usec_delay(E1000_I2C_T_HIGH);
3632
3633 e1000_lower_i2c_clk(hw, &i2cctl);
3634
3635 /* Minimum low period of clock is 4.7 us.
3636 * This also takes care of the data hold time.
3637 */
3638 usec_delay(E1000_I2C_T_LOW);
3639 } else {
3640 status = E1000_ERR_I2C;
3641 DEBUGOUT1("I2C data was not set to %X\n", data);
3642 }
3643
3644 return status;
3645 }
3646 /**
3647 * e1000_raise_i2c_clk - Raises the I2C SCL clock
3648 * @hw: pointer to hardware structure
3649 * @i2cctl: Current value of I2CCTL register
3650 *
3651 * Raises the I2C clock line '0'->'1'
3652 **/
e1000_raise_i2c_clk(struct e1000_hw * hw,u32 * i2cctl)3653 static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
3654 {
3655 DEBUGFUNC("e1000_raise_i2c_clk");
3656
3657 *i2cctl |= E1000_I2C_CLK_OUT;
3658 *i2cctl &= ~E1000_I2C_CLK_OE_N;
3659 E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
3660 E1000_WRITE_FLUSH(hw);
3661
3662 /* SCL rise time (1000ns) */
3663 usec_delay(E1000_I2C_T_RISE);
3664 }
3665
3666 /**
3667 * e1000_lower_i2c_clk - Lowers the I2C SCL clock
3668 * @hw: pointer to hardware structure
3669 * @i2cctl: Current value of I2CCTL register
3670 *
3671 * Lowers the I2C clock line '1'->'0'
3672 **/
e1000_lower_i2c_clk(struct e1000_hw * hw,u32 * i2cctl)3673 static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
3674 {
3675
3676 DEBUGFUNC("e1000_lower_i2c_clk");
3677
3678 *i2cctl &= ~E1000_I2C_CLK_OUT;
3679 *i2cctl &= ~E1000_I2C_CLK_OE_N;
3680 E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
3681 E1000_WRITE_FLUSH(hw);
3682
3683 /* SCL fall time (300ns) */
3684 usec_delay(E1000_I2C_T_FALL);
3685 }
3686
3687 /**
3688 * e1000_set_i2c_data - Sets the I2C data bit
3689 * @hw: pointer to hardware structure
3690 * @i2cctl: Current value of I2CCTL register
3691 * @data: I2C data value (0 or 1) to set
3692 *
3693 * Sets the I2C data bit
3694 **/
e1000_set_i2c_data(struct e1000_hw * hw,u32 * i2cctl,bool data)3695 static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data)
3696 {
3697 s32 status = E1000_SUCCESS;
3698
3699 DEBUGFUNC("e1000_set_i2c_data");
3700
3701 if (data)
3702 *i2cctl |= E1000_I2C_DATA_OUT;
3703 else
3704 *i2cctl &= ~E1000_I2C_DATA_OUT;
3705
3706 *i2cctl &= ~E1000_I2C_DATA_OE_N;
3707 *i2cctl |= E1000_I2C_CLK_OE_N;
3708 E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
3709 E1000_WRITE_FLUSH(hw);
3710
3711 /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
3712 usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA);
3713
3714 *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
3715 if (data != e1000_get_i2c_data(i2cctl)) {
3716 status = E1000_ERR_I2C;
3717 DEBUGOUT1("Error - I2C data was not set to %X.\n", data);
3718 }
3719
3720 return status;
3721 }
3722
3723 /**
3724 * e1000_get_i2c_data - Reads the I2C SDA data bit
3725 * @hw: pointer to hardware structure
3726 * @i2cctl: Current value of I2CCTL register
3727 *
3728 * Returns the I2C data bit value
3729 **/
e1000_get_i2c_data(u32 * i2cctl)3730 static bool e1000_get_i2c_data(u32 *i2cctl)
3731 {
3732 bool data;
3733
3734 DEBUGFUNC("e1000_get_i2c_data");
3735
3736 if (*i2cctl & E1000_I2C_DATA_IN)
3737 data = 1;
3738 else
3739 data = 0;
3740
3741 return data;
3742 }
3743
3744 /**
3745 * e1000_i2c_bus_clear - Clears the I2C bus
3746 * @hw: pointer to hardware structure
3747 *
3748 * Clears the I2C bus by sending nine clock pulses.
3749 * Used when data line is stuck low.
3750 **/
e1000_i2c_bus_clear(struct e1000_hw * hw)3751 void e1000_i2c_bus_clear(struct e1000_hw *hw)
3752 {
3753 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
3754 u32 i;
3755
3756 DEBUGFUNC("e1000_i2c_bus_clear");
3757
3758 e1000_i2c_start(hw);
3759
3760 e1000_set_i2c_data(hw, &i2cctl, 1);
3761
3762 for (i = 0; i < 9; i++) {
3763 e1000_raise_i2c_clk(hw, &i2cctl);
3764
3765 /* Min high period of clock is 4us */
3766 usec_delay(E1000_I2C_T_HIGH);
3767
3768 e1000_lower_i2c_clk(hw, &i2cctl);
3769
3770 /* Min low period of clock is 4.7us*/
3771 usec_delay(E1000_I2C_T_LOW);
3772 }
3773
3774 e1000_i2c_start(hw);
3775
3776 /* Put the i2c bus back to default state */
3777 e1000_i2c_stop(hw);
3778 }
3779
3780