1 /******************************************************************************
2
3 Copyright (c) 2001-2012, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82599.c,v 1.8 2012/07/05 20:51:44 jfv Exp $*/
34
35 #include "ixgbe_type.h"
36 #include "ixgbe_82599.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40
41 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
42 ixgbe_link_speed speed,
43 bool autoneg,
44 bool autoneg_wait_to_complete);
45 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
46 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
47 u16 offset, u16 *data);
48 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
49 u16 words, u16 *data);
50
ixgbe_init_mac_link_ops_82599(struct ixgbe_hw * hw)51 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
52 {
53 struct ixgbe_mac_info *mac = &hw->mac;
54
55 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
56
57 /* enable the laser control functions for SFP+ fiber */
58 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
59 mac->ops.disable_tx_laser =
60 &ixgbe_disable_tx_laser_multispeed_fiber;
61 mac->ops.enable_tx_laser =
62 &ixgbe_enable_tx_laser_multispeed_fiber;
63 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
64
65 } else {
66 mac->ops.disable_tx_laser = NULL;
67 mac->ops.enable_tx_laser = NULL;
68 mac->ops.flap_tx_laser = NULL;
69 }
70
71 if (hw->phy.multispeed_fiber) {
72 /* Set up dual speed SFP+ support */
73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
74 } else {
75 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
76 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
77 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
78 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
79 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
80 } else {
81 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
82 }
83 }
84 }
85
86 /**
87 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
88 * @hw: pointer to hardware structure
89 *
90 * Initialize any function pointers that were not able to be
91 * set during init_shared_code because the PHY/SFP type was
92 * not known. Perform the SFP init if necessary.
93 *
94 **/
ixgbe_init_phy_ops_82599(struct ixgbe_hw * hw)95 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
96 {
97 struct ixgbe_mac_info *mac = &hw->mac;
98 struct ixgbe_phy_info *phy = &hw->phy;
99 s32 ret_val = IXGBE_SUCCESS;
100
101 DEBUGFUNC("ixgbe_init_phy_ops_82599");
102
103 /* Identify the PHY or SFP module */
104 ret_val = phy->ops.identify(hw);
105 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
106 goto init_phy_ops_out;
107
108 /* Setup function pointers based on detected SFP module and speeds */
109 ixgbe_init_mac_link_ops_82599(hw);
110 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
111 hw->phy.ops.reset = NULL;
112
113 /* If copper media, overwrite with copper function pointers */
114 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
115 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
116 mac->ops.get_link_capabilities =
117 &ixgbe_get_copper_link_capabilities_generic;
118 }
119
120 /* Set necessary function pointers based on phy type */
121 switch (hw->phy.type) {
122 case ixgbe_phy_tn:
123 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
124 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
125 phy->ops.get_firmware_version =
126 &ixgbe_get_phy_firmware_version_tnx;
127 break;
128 default:
129 break;
130 }
131 init_phy_ops_out:
132 return ret_val;
133 }
134
ixgbe_setup_sfp_modules_82599(struct ixgbe_hw * hw)135 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
136 {
137 s32 ret_val = IXGBE_SUCCESS;
138 u32 reg_anlp1 = 0;
139 u32 i = 0;
140 u16 list_offset, data_offset, data_value;
141
142 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
143
144 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
145 ixgbe_init_mac_link_ops_82599(hw);
146
147 hw->phy.ops.reset = NULL;
148
149 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
150 &data_offset);
151 if (ret_val != IXGBE_SUCCESS)
152 goto setup_sfp_out;
153
154 /* PHY config will finish before releasing the semaphore */
155 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
156 IXGBE_GSSR_MAC_CSR_SM);
157 if (ret_val != IXGBE_SUCCESS) {
158 ret_val = IXGBE_ERR_SWFW_SYNC;
159 goto setup_sfp_out;
160 }
161
162 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
163 while (data_value != 0xffff) {
164 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
165 IXGBE_WRITE_FLUSH(hw);
166 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
167 }
168
169 /* Release the semaphore */
170 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
171 /* Delay obtaining semaphore again to allow FW access */
172 msec_delay(hw->eeprom.semaphore_delay);
173
174 /* Now restart DSP by setting Restart_AN and clearing LMS */
175 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
176 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
177 IXGBE_AUTOC_AN_RESTART));
178
179 /* Wait for AN to leave state 0 */
180 for (i = 0; i < 10; i++) {
181 msec_delay(4);
182 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
183 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
184 break;
185 }
186 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
187 DEBUGOUT("sfp module setup not complete\n");
188 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
189 goto setup_sfp_out;
190 }
191
192 /* Restart DSP by setting Restart_AN and return to SFI mode */
193 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
194 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
195 IXGBE_AUTOC_AN_RESTART));
196 }
197
198 setup_sfp_out:
199 return ret_val;
200 }
201
202 /**
203 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
204 * @hw: pointer to hardware structure
205 *
206 * Initialize the function pointers and assign the MAC type for 82599.
207 * Does not touch the hardware.
208 **/
209
ixgbe_init_ops_82599(struct ixgbe_hw * hw)210 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
211 {
212 struct ixgbe_mac_info *mac = &hw->mac;
213 struct ixgbe_phy_info *phy = &hw->phy;
214 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
215 s32 ret_val;
216
217 DEBUGFUNC("ixgbe_init_ops_82599");
218
219 ret_val = ixgbe_init_phy_ops_generic(hw);
220 ret_val = ixgbe_init_ops_generic(hw);
221
222 /* PHY */
223 phy->ops.identify = &ixgbe_identify_phy_82599;
224 phy->ops.init = &ixgbe_init_phy_ops_82599;
225
226 /* MAC */
227 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
228 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
229 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
230 mac->ops.get_supported_physical_layer =
231 &ixgbe_get_supported_physical_layer_82599;
232 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
233 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
234 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
235 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
236 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
237 mac->ops.start_hw = &ixgbe_start_hw_82599;
238 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
239 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
240 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
241 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
242 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
243
244 /* RAR, Multicast, VLAN */
245 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
246 mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
247 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
248 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
249 mac->rar_highwater = 1;
250 mac->ops.set_vfta = &ixgbe_set_vfta_generic;
251 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
252 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
253 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
254 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
255 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
256 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
257
258 /* Link */
259 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
260 mac->ops.check_link = &ixgbe_check_mac_link_generic;
261 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
262 ixgbe_init_mac_link_ops_82599(hw);
263
264 mac->mcft_size = 128;
265 mac->vft_size = 128;
266 mac->num_rar_entries = 128;
267 mac->rx_pb_size = 512;
268 mac->max_tx_queues = 128;
269 mac->max_rx_queues = 128;
270 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
271
272 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
273 IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
274
275 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
276
277 /* EEPROM */
278 eeprom->ops.read = &ixgbe_read_eeprom_82599;
279 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
280
281 /* Manageability interface */
282 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
283
284
285 return ret_val;
286 }
287
288 /**
289 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
290 * @hw: pointer to hardware structure
291 * @speed: pointer to link speed
292 * @negotiation: TRUE when autoneg or autotry is enabled
293 *
294 * Determines the link capabilities by reading the AUTOC register.
295 **/
ixgbe_get_link_capabilities_82599(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * negotiation)296 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
297 ixgbe_link_speed *speed,
298 bool *negotiation)
299 {
300 s32 status = IXGBE_SUCCESS;
301 u32 autoc = 0;
302
303 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
304
305
306 /* Check if 1G SFP module. */
307 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
308 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
309 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
310 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
311 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
312 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
313 *speed = IXGBE_LINK_SPEED_1GB_FULL;
314 *negotiation = TRUE;
315 goto out;
316 }
317
318 /*
319 * Determine link capabilities based on the stored value of AUTOC,
320 * which represents EEPROM defaults. If AUTOC value has not
321 * been stored, use the current register values.
322 */
323 if (hw->mac.orig_link_settings_stored)
324 autoc = hw->mac.orig_autoc;
325 else
326 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
327
328 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
329 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
330 *speed = IXGBE_LINK_SPEED_1GB_FULL;
331 *negotiation = FALSE;
332 break;
333
334 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
335 *speed = IXGBE_LINK_SPEED_10GB_FULL;
336 *negotiation = FALSE;
337 break;
338
339 case IXGBE_AUTOC_LMS_1G_AN:
340 *speed = IXGBE_LINK_SPEED_1GB_FULL;
341 *negotiation = TRUE;
342 break;
343
344 case IXGBE_AUTOC_LMS_10G_SERIAL:
345 *speed = IXGBE_LINK_SPEED_10GB_FULL;
346 *negotiation = FALSE;
347 break;
348
349 case IXGBE_AUTOC_LMS_KX4_KX_KR:
350 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
351 *speed = IXGBE_LINK_SPEED_UNKNOWN;
352 if (autoc & IXGBE_AUTOC_KR_SUPP)
353 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
354 if (autoc & IXGBE_AUTOC_KX4_SUPP)
355 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
356 if (autoc & IXGBE_AUTOC_KX_SUPP)
357 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
358 *negotiation = TRUE;
359 break;
360
361 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
362 *speed = IXGBE_LINK_SPEED_100_FULL;
363 if (autoc & IXGBE_AUTOC_KR_SUPP)
364 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
365 if (autoc & IXGBE_AUTOC_KX4_SUPP)
366 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
367 if (autoc & IXGBE_AUTOC_KX_SUPP)
368 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
369 *negotiation = TRUE;
370 break;
371
372 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
373 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
374 *negotiation = FALSE;
375 break;
376
377 default:
378 status = IXGBE_ERR_LINK_SETUP;
379 goto out;
380 }
381
382 if (hw->phy.multispeed_fiber) {
383 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
384 IXGBE_LINK_SPEED_1GB_FULL;
385 *negotiation = TRUE;
386 }
387
388 out:
389 return status;
390 }
391
392 /**
393 * ixgbe_get_media_type_82599 - Get media type
394 * @hw: pointer to hardware structure
395 *
396 * Returns the media type (fiber, copper, backplane)
397 **/
ixgbe_get_media_type_82599(struct ixgbe_hw * hw)398 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
399 {
400 enum ixgbe_media_type media_type;
401
402 DEBUGFUNC("ixgbe_get_media_type_82599");
403
404 /* Detect if there is a copper PHY attached. */
405 switch (hw->phy.type) {
406 case ixgbe_phy_cu_unknown:
407 case ixgbe_phy_tn:
408 media_type = ixgbe_media_type_copper;
409 goto out;
410 default:
411 break;
412 }
413
414 switch (hw->device_id) {
415 case IXGBE_DEV_ID_82599_KX4:
416 case IXGBE_DEV_ID_82599_KX4_MEZZ:
417 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
418 case IXGBE_DEV_ID_82599_KR:
419 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
420 case IXGBE_DEV_ID_82599_XAUI_LOM:
421 /* Default device ID is mezzanine card KX/KX4 */
422 media_type = ixgbe_media_type_backplane;
423 break;
424 case IXGBE_DEV_ID_82599_SFP:
425 case IXGBE_DEV_ID_82599_SFP_FCOE:
426 case IXGBE_DEV_ID_82599_SFP_EM:
427 case IXGBE_DEV_ID_82599_SFP_SF2:
428 case IXGBE_DEV_ID_82599EN_SFP:
429 media_type = ixgbe_media_type_fiber;
430 break;
431 case IXGBE_DEV_ID_82599_CX4:
432 media_type = ixgbe_media_type_cx4;
433 break;
434 case IXGBE_DEV_ID_82599_T3_LOM:
435 media_type = ixgbe_media_type_copper;
436 break;
437 default:
438 media_type = ixgbe_media_type_unknown;
439 break;
440 }
441 out:
442 return media_type;
443 }
444
445 /**
446 * ixgbe_start_mac_link_82599 - Setup MAC link settings
447 * @hw: pointer to hardware structure
448 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
449 *
450 * Configures link settings based on values in the ixgbe_hw struct.
451 * Restarts the link. Performs autonegotiation if needed.
452 **/
ixgbe_start_mac_link_82599(struct ixgbe_hw * hw,bool autoneg_wait_to_complete)453 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
454 bool autoneg_wait_to_complete)
455 {
456 u32 autoc_reg;
457 u32 links_reg;
458 u32 i;
459 s32 status = IXGBE_SUCCESS;
460
461 DEBUGFUNC("ixgbe_start_mac_link_82599");
462
463
464 /* Restart link */
465 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
466 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
467 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
468
469 /* Only poll for autoneg to complete if specified to do so */
470 if (autoneg_wait_to_complete) {
471 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
472 IXGBE_AUTOC_LMS_KX4_KX_KR ||
473 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
474 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
475 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
476 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
477 links_reg = 0; /* Just in case Autoneg time = 0 */
478 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
479 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
480 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
481 break;
482 msec_delay(100);
483 }
484 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
485 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
486 DEBUGOUT("Autoneg did not complete.\n");
487 }
488 }
489 }
490
491 /* Add delay to filter out noises during initial link setup */
492 msec_delay(50);
493
494 return status;
495 }
496
497 /**
498 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
499 * @hw: pointer to hardware structure
500 *
501 * The base drivers may require better control over SFP+ module
502 * PHY states. This includes selectively shutting down the Tx
503 * laser on the PHY, effectively halting physical link.
504 **/
ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)505 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
506 {
507 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
508
509 /* Disable tx laser; allow 100us to go dark per spec */
510 esdp_reg |= IXGBE_ESDP_SDP3;
511 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
512 IXGBE_WRITE_FLUSH(hw);
513 usec_delay(100);
514 }
515
516 /**
517 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
518 * @hw: pointer to hardware structure
519 *
520 * The base drivers may require better control over SFP+ module
521 * PHY states. This includes selectively turning on the Tx
522 * laser on the PHY, effectively starting physical link.
523 **/
ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)524 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
525 {
526 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
527
528 /* Enable tx laser; allow 100ms to light up */
529 esdp_reg &= ~IXGBE_ESDP_SDP3;
530 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
531 IXGBE_WRITE_FLUSH(hw);
532 msec_delay(100);
533 }
534
535 /**
536 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
537 * @hw: pointer to hardware structure
538 *
539 * When the driver changes the link speeds that it can support,
540 * it sets autotry_restart to TRUE to indicate that we need to
541 * initiate a new autotry session with the link partner. To do
542 * so, we set the speed then disable and re-enable the tx laser, to
543 * alert the link partner that it also needs to restart autotry on its
544 * end. This is consistent with TRUE clause 37 autoneg, which also
545 * involves a loss of signal.
546 **/
ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)547 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
548 {
549 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
550
551 if (hw->mac.autotry_restart) {
552 ixgbe_disable_tx_laser_multispeed_fiber(hw);
553 ixgbe_enable_tx_laser_multispeed_fiber(hw);
554 hw->mac.autotry_restart = FALSE;
555 }
556 }
557
558 /**
559 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
560 * @hw: pointer to hardware structure
561 * @speed: new link speed
562 * @autoneg: TRUE if autonegotiation enabled
563 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
564 *
565 * Set the link speed in the AUTOC register and restarts link.
566 **/
ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)567 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
568 ixgbe_link_speed speed, bool autoneg,
569 bool autoneg_wait_to_complete)
570 {
571 s32 status = IXGBE_SUCCESS;
572 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
573 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
574 u32 speedcnt = 0;
575 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
576 u32 i = 0;
577 bool link_up = FALSE;
578 bool negotiation;
579
580 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
581
582 /* Mask off requested but non-supported speeds */
583 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
584 if (status != IXGBE_SUCCESS)
585 return status;
586
587 speed &= link_speed;
588
589 /*
590 * Try each speed one by one, highest priority first. We do this in
591 * software because 10gb fiber doesn't support speed autonegotiation.
592 */
593 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
594 speedcnt++;
595 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
596
597 /* If we already have link at this speed, just jump out */
598 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
599 if (status != IXGBE_SUCCESS)
600 return status;
601
602 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
603 goto out;
604
605 /* Set the module link speed */
606 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
607 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
608 IXGBE_WRITE_FLUSH(hw);
609
610 /* Allow module to change analog characteristics (1G->10G) */
611 msec_delay(40);
612
613 status = ixgbe_setup_mac_link_82599(hw,
614 IXGBE_LINK_SPEED_10GB_FULL,
615 autoneg,
616 autoneg_wait_to_complete);
617 if (status != IXGBE_SUCCESS)
618 return status;
619
620 /* Flap the tx laser if it has not already been done */
621 ixgbe_flap_tx_laser(hw);
622
623 /*
624 * Wait for the controller to acquire link. Per IEEE 802.3ap,
625 * Section 73.10.2, we may have to wait up to 500ms if KR is
626 * attempted. 82599 uses the same timing for 10g SFI.
627 */
628 for (i = 0; i < 5; i++) {
629 /* Wait for the link partner to also set speed */
630 msec_delay(100);
631
632 /* If we have link, just jump out */
633 status = ixgbe_check_link(hw, &link_speed,
634 &link_up, FALSE);
635 if (status != IXGBE_SUCCESS)
636 return status;
637
638 if (link_up)
639 goto out;
640 }
641 }
642
643 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
644 speedcnt++;
645 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
646 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
647
648 /* If we already have link at this speed, just jump out */
649 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
650 if (status != IXGBE_SUCCESS)
651 return status;
652
653 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
654 goto out;
655
656 /* Set the module link speed */
657 esdp_reg &= ~IXGBE_ESDP_SDP5;
658 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
659 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
660 IXGBE_WRITE_FLUSH(hw);
661
662 /* Allow module to change analog characteristics (10G->1G) */
663 msec_delay(40);
664
665 status = ixgbe_setup_mac_link_82599(hw,
666 IXGBE_LINK_SPEED_1GB_FULL,
667 autoneg,
668 autoneg_wait_to_complete);
669 if (status != IXGBE_SUCCESS)
670 return status;
671
672 /* Flap the tx laser if it has not already been done */
673 ixgbe_flap_tx_laser(hw);
674
675 /* Wait for the link partner to also set speed */
676 msec_delay(100);
677
678 /* If we have link, just jump out */
679 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
680 if (status != IXGBE_SUCCESS)
681 return status;
682
683 if (link_up)
684 goto out;
685 }
686
687 /*
688 * We didn't get link. Configure back to the highest speed we tried,
689 * (if there was more than one). We call ourselves back with just the
690 * single highest speed that the user requested.
691 */
692 if (speedcnt > 1)
693 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
694 highest_link_speed, autoneg, autoneg_wait_to_complete);
695
696 out:
697 /* Set autoneg_advertised value based on input link speed */
698 hw->phy.autoneg_advertised = 0;
699
700 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
701 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
702
703 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
704 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
705
706 return status;
707 }
708
709 /**
710 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
711 * @hw: pointer to hardware structure
712 * @speed: new link speed
713 * @autoneg: TRUE if autonegotiation enabled
714 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
715 *
716 * Implements the Intel SmartSpeed algorithm.
717 **/
ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)718 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
719 ixgbe_link_speed speed, bool autoneg,
720 bool autoneg_wait_to_complete)
721 {
722 s32 status = IXGBE_SUCCESS;
723 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
724 s32 i, j;
725 bool link_up = FALSE;
726 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
727
728 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
729
730 /* Set autoneg_advertised value based on input link speed */
731 hw->phy.autoneg_advertised = 0;
732
733 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
734 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
735
736 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
737 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
738
739 if (speed & IXGBE_LINK_SPEED_100_FULL)
740 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
741
742 /*
743 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
744 * autoneg advertisement if link is unable to be established at the
745 * highest negotiated rate. This can sometimes happen due to integrity
746 * issues with the physical media connection.
747 */
748
749 /* First, try to get link with full advertisement */
750 hw->phy.smart_speed_active = FALSE;
751 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
752 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
753 autoneg_wait_to_complete);
754 if (status != IXGBE_SUCCESS)
755 goto out;
756
757 /*
758 * Wait for the controller to acquire link. Per IEEE 802.3ap,
759 * Section 73.10.2, we may have to wait up to 500ms if KR is
760 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
761 * Table 9 in the AN MAS.
762 */
763 for (i = 0; i < 5; i++) {
764 msec_delay(100);
765
766 /* If we have link, just jump out */
767 status = ixgbe_check_link(hw, &link_speed, &link_up,
768 FALSE);
769 if (status != IXGBE_SUCCESS)
770 goto out;
771
772 if (link_up)
773 goto out;
774 }
775 }
776
777 /*
778 * We didn't get link. If we advertised KR plus one of KX4/KX
779 * (or BX4/BX), then disable KR and try again.
780 */
781 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
782 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
783 goto out;
784
785 /* Turn SmartSpeed on to disable KR support */
786 hw->phy.smart_speed_active = TRUE;
787 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
788 autoneg_wait_to_complete);
789 if (status != IXGBE_SUCCESS)
790 goto out;
791
792 /*
793 * Wait for the controller to acquire link. 600ms will allow for
794 * the AN link_fail_inhibit_timer as well for multiple cycles of
795 * parallel detect, both 10g and 1g. This allows for the maximum
796 * connect attempts as defined in the AN MAS table 73-7.
797 */
798 for (i = 0; i < 6; i++) {
799 msec_delay(100);
800
801 /* If we have link, just jump out */
802 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
803 if (status != IXGBE_SUCCESS)
804 goto out;
805
806 if (link_up)
807 goto out;
808 }
809
810 /* We didn't get link. Turn SmartSpeed back off. */
811 hw->phy.smart_speed_active = FALSE;
812 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
813 autoneg_wait_to_complete);
814
815 out:
816 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
817 DEBUGOUT("Smartspeed has downgraded the link speed "
818 "from the maximum advertised\n");
819 return status;
820 }
821
822 /**
823 * ixgbe_setup_mac_link_82599 - Set MAC link speed
824 * @hw: pointer to hardware structure
825 * @speed: new link speed
826 * @autoneg: TRUE if autonegotiation enabled
827 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
828 *
829 * Set the link speed in the AUTOC register and restarts link.
830 **/
ixgbe_setup_mac_link_82599(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)831 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
832 ixgbe_link_speed speed, bool autoneg,
833 bool autoneg_wait_to_complete)
834 {
835 s32 status = IXGBE_SUCCESS;
836 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
837 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
838 u32 start_autoc = autoc;
839 u32 orig_autoc = 0;
840 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
841 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
842 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
843 u32 links_reg;
844 u32 i;
845 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
846
847 DEBUGFUNC("ixgbe_setup_mac_link_82599");
848
849 /* Check to see if speed passed in is supported. */
850 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
851 if (status != IXGBE_SUCCESS)
852 goto out;
853
854 speed &= link_capabilities;
855
856 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
857 status = IXGBE_ERR_LINK_SETUP;
858 goto out;
859 }
860
861 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
862 if (hw->mac.orig_link_settings_stored)
863 orig_autoc = hw->mac.orig_autoc;
864 else
865 orig_autoc = autoc;
866
867 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
868 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
869 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
870 /* Set KX4/KX/KR support according to speed requested */
871 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
872 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
873 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
874 autoc |= IXGBE_AUTOC_KX4_SUPP;
875 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
876 (hw->phy.smart_speed_active == FALSE))
877 autoc |= IXGBE_AUTOC_KR_SUPP;
878 }
879 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
880 autoc |= IXGBE_AUTOC_KX_SUPP;
881 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
882 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
883 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
884 /* Switch from 1G SFI to 10G SFI if requested */
885 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
886 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
887 autoc &= ~IXGBE_AUTOC_LMS_MASK;
888 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
889 }
890 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
891 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
892 /* Switch from 10G SFI to 1G SFI if requested */
893 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
894 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
895 autoc &= ~IXGBE_AUTOC_LMS_MASK;
896 if (autoneg)
897 autoc |= IXGBE_AUTOC_LMS_1G_AN;
898 else
899 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
900 }
901 }
902
903 if (autoc != start_autoc) {
904 /* Restart link */
905 autoc |= IXGBE_AUTOC_AN_RESTART;
906 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
907
908 /* Only poll for autoneg to complete if specified to do so */
909 if (autoneg_wait_to_complete) {
910 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
911 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
912 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
913 links_reg = 0; /*Just in case Autoneg time=0*/
914 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
915 links_reg =
916 IXGBE_READ_REG(hw, IXGBE_LINKS);
917 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
918 break;
919 msec_delay(100);
920 }
921 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
922 status =
923 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
924 DEBUGOUT("Autoneg did not complete.\n");
925 }
926 }
927 }
928
929 /* Add delay to filter out noises during initial link setup */
930 msec_delay(50);
931 }
932
933 out:
934 return status;
935 }
936
937 /**
938 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
939 * @hw: pointer to hardware structure
940 * @speed: new link speed
941 * @autoneg: TRUE if autonegotiation enabled
942 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
943 *
944 * Restarts link on PHY and MAC based on settings passed in.
945 **/
ixgbe_setup_copper_link_82599(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)946 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
947 ixgbe_link_speed speed,
948 bool autoneg,
949 bool autoneg_wait_to_complete)
950 {
951 s32 status;
952
953 DEBUGFUNC("ixgbe_setup_copper_link_82599");
954
955 /* Setup the PHY according to input speed */
956 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
957 autoneg_wait_to_complete);
958 if (status == IXGBE_SUCCESS) {
959 /* Set up MAC */
960 status =
961 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
962 }
963
964 return status;
965 }
966
967 /**
968 * ixgbe_reset_hw_82599 - Perform hardware reset
969 * @hw: pointer to hardware structure
970 *
971 * Resets the hardware by resetting the transmit and receive units, masks
972 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
973 * reset.
974 **/
ixgbe_reset_hw_82599(struct ixgbe_hw * hw)975 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
976 {
977 ixgbe_link_speed link_speed;
978 s32 status;
979 u32 ctrl, i, autoc, autoc2;
980 bool link_up = FALSE;
981
982 DEBUGFUNC("ixgbe_reset_hw_82599");
983
984 /* Call adapter stop to disable tx/rx and clear interrupts */
985 status = hw->mac.ops.stop_adapter(hw);
986 if (status != IXGBE_SUCCESS)
987 goto reset_hw_out;
988
989 /* flush pending Tx transactions */
990 ixgbe_clear_tx_pending(hw);
991
992 /* PHY ops must be identified and initialized prior to reset */
993
994 /* Identify PHY and related function pointers */
995 status = hw->phy.ops.init(hw);
996
997 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
998 goto reset_hw_out;
999
1000 /* Setup SFP module if there is one present. */
1001 if (hw->phy.sfp_setup_needed) {
1002 status = hw->mac.ops.setup_sfp(hw);
1003 hw->phy.sfp_setup_needed = FALSE;
1004 }
1005
1006 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1007 goto reset_hw_out;
1008
1009 /* Reset PHY */
1010 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1011 hw->phy.ops.reset(hw);
1012
1013 mac_reset_top:
1014 /*
1015 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1016 * If link reset is used when link is up, it might reset the PHY when
1017 * mng is using it. If link is down or the flag to force full link
1018 * reset is set, then perform link reset.
1019 */
1020 ctrl = IXGBE_CTRL_LNK_RST;
1021 if (!hw->force_full_reset) {
1022 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1023 if (link_up)
1024 ctrl = IXGBE_CTRL_RST;
1025 }
1026
1027 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1028 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1029 IXGBE_WRITE_FLUSH(hw);
1030
1031 /* Poll for reset bit to self-clear indicating reset is complete */
1032 for (i = 0; i < 10; i++) {
1033 usec_delay(1);
1034 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1035 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1036 break;
1037 }
1038
1039 if (ctrl & IXGBE_CTRL_RST_MASK) {
1040 status = IXGBE_ERR_RESET_FAILED;
1041 DEBUGOUT("Reset polling failed to complete.\n");
1042 }
1043
1044 msec_delay(50);
1045
1046 /*
1047 * Double resets are required for recovery from certain error
1048 * conditions. Between resets, it is necessary to stall to allow time
1049 * for any pending HW events to complete.
1050 */
1051 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1052 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1053 goto mac_reset_top;
1054 }
1055
1056 /*
1057 * Store the original AUTOC/AUTOC2 values if they have not been
1058 * stored off yet. Otherwise restore the stored original
1059 * values since the reset operation sets back to defaults.
1060 */
1061 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1062 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1063 if (hw->mac.orig_link_settings_stored == FALSE) {
1064 hw->mac.orig_autoc = autoc;
1065 hw->mac.orig_autoc2 = autoc2;
1066 hw->mac.orig_link_settings_stored = TRUE;
1067 } else {
1068 if (autoc != hw->mac.orig_autoc)
1069 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
1070 IXGBE_AUTOC_AN_RESTART));
1071
1072 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1073 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1074 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1075 autoc2 |= (hw->mac.orig_autoc2 &
1076 IXGBE_AUTOC2_UPPER_MASK);
1077 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1078 }
1079 }
1080
1081 /* Store the permanent mac address */
1082 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1083
1084 /*
1085 * Store MAC address from RAR0, clear receive address registers, and
1086 * clear the multicast table. Also reset num_rar_entries to 128,
1087 * since we modify this value when programming the SAN MAC address.
1088 */
1089 hw->mac.num_rar_entries = 128;
1090 hw->mac.ops.init_rx_addrs(hw);
1091
1092 /* Store the permanent SAN mac address */
1093 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1094
1095 /* Add the SAN MAC address to the RAR only if it's a valid address */
1096 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1097 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1098 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1099
1100 /* Save the SAN MAC RAR index */
1101 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1102
1103 /* Reserve the last RAR for the SAN MAC address */
1104 hw->mac.num_rar_entries--;
1105 }
1106
1107 /* Store the alternative WWNN/WWPN prefix */
1108 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1109 &hw->mac.wwpn_prefix);
1110
1111 reset_hw_out:
1112 return status;
1113 }
1114
1115 /**
1116 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1117 * @hw: pointer to hardware structure
1118 **/
ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw * hw)1119 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1120 {
1121 int i;
1122 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1123 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1124
1125 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1126
1127 /*
1128 * Before starting reinitialization process,
1129 * FDIRCMD.CMD must be zero.
1130 */
1131 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1132 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1133 IXGBE_FDIRCMD_CMD_MASK))
1134 break;
1135 usec_delay(10);
1136 }
1137 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1138 DEBUGOUT("Flow Director previous command isn't complete, "
1139 "aborting table re-initialization.\n");
1140 return IXGBE_ERR_FDIR_REINIT_FAILED;
1141 }
1142
1143 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1144 IXGBE_WRITE_FLUSH(hw);
1145 /*
1146 * 82599 adapters flow director init flow cannot be restarted,
1147 * Workaround 82599 silicon errata by performing the following steps
1148 * before re-writing the FDIRCTRL control register with the same value.
1149 * - write 1 to bit 8 of FDIRCMD register &
1150 * - write 0 to bit 8 of FDIRCMD register
1151 */
1152 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1153 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1154 IXGBE_FDIRCMD_CLEARHT));
1155 IXGBE_WRITE_FLUSH(hw);
1156 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1157 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1158 ~IXGBE_FDIRCMD_CLEARHT));
1159 IXGBE_WRITE_FLUSH(hw);
1160 /*
1161 * Clear FDIR Hash register to clear any leftover hashes
1162 * waiting to be programmed.
1163 */
1164 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1165 IXGBE_WRITE_FLUSH(hw);
1166
1167 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1168 IXGBE_WRITE_FLUSH(hw);
1169
1170 /* Poll init-done after we write FDIRCTRL register */
1171 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1172 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1173 IXGBE_FDIRCTRL_INIT_DONE)
1174 break;
1175 usec_delay(10);
1176 }
1177 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1178 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1179 return IXGBE_ERR_FDIR_REINIT_FAILED;
1180 }
1181
1182 /* Clear FDIR statistics registers (read to clear) */
1183 (void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1184 (void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1185 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1186 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1187 (void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1188
1189 return IXGBE_SUCCESS;
1190 }
1191
1192 /**
1193 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1194 * @hw: pointer to hardware structure
1195 * @fdirctrl: value to write to flow director control register
1196 **/
ixgbe_fdir_enable_82599(struct ixgbe_hw * hw,u32 fdirctrl)1197 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1198 {
1199 int i;
1200
1201 DEBUGFUNC("ixgbe_fdir_enable_82599");
1202
1203 /* Prime the keys for hashing */
1204 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1205 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1206
1207 /*
1208 * Poll init-done after we write the register. Estimated times:
1209 * 10G: PBALLOC = 11b, timing is 60us
1210 * 1G: PBALLOC = 11b, timing is 600us
1211 * 100M: PBALLOC = 11b, timing is 6ms
1212 *
1213 * Multiple these timings by 4 if under full Rx load
1214 *
1215 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1216 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1217 * this might not finish in our poll time, but we can live with that
1218 * for now.
1219 */
1220 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1221 IXGBE_WRITE_FLUSH(hw);
1222 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1223 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1224 IXGBE_FDIRCTRL_INIT_DONE)
1225 break;
1226 msec_delay(1);
1227 }
1228
1229 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1230 DEBUGOUT("Flow Director poll time exceeded!\n");
1231 }
1232
1233 /**
1234 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1235 * @hw: pointer to hardware structure
1236 * @fdirctrl: value to write to flow director control register, initially
1237 * contains just the value of the Rx packet buffer allocation
1238 **/
ixgbe_init_fdir_signature_82599(struct ixgbe_hw * hw,u32 fdirctrl)1239 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1240 {
1241 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1242
1243 /*
1244 * Continue setup of fdirctrl register bits:
1245 * Move the flexible bytes to use the ethertype - shift 6 words
1246 * Set the maximum length per hash bucket to 0xA filters
1247 * Send interrupt when 64 filters are left
1248 */
1249 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1250 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1251 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1252
1253 /* write hashes and fdirctrl register, poll for completion */
1254 ixgbe_fdir_enable_82599(hw, fdirctrl);
1255
1256 return IXGBE_SUCCESS;
1257 }
1258
1259 /**
1260 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1261 * @hw: pointer to hardware structure
1262 * @fdirctrl: value to write to flow director control register, initially
1263 * contains just the value of the Rx packet buffer allocation
1264 **/
ixgbe_init_fdir_perfect_82599(struct ixgbe_hw * hw,u32 fdirctrl)1265 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1266 {
1267 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1268
1269 /*
1270 * Continue setup of fdirctrl register bits:
1271 * Turn perfect match filtering on
1272 * Report hash in RSS field of Rx wb descriptor
1273 * Initialize the drop queue
1274 * Move the flexible bytes to use the ethertype - shift 6 words
1275 * Set the maximum length per hash bucket to 0xA filters
1276 * Send interrupt when 64 (0x4 * 16) filters are left
1277 */
1278 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1279 IXGBE_FDIRCTRL_REPORT_STATUS |
1280 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1281 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1282 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1283 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1284
1285 /* write hashes and fdirctrl register, poll for completion */
1286 ixgbe_fdir_enable_82599(hw, fdirctrl);
1287
1288 return IXGBE_SUCCESS;
1289 }
1290
1291 /*
1292 * These defines allow us to quickly generate all of the necessary instructions
1293 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1294 * for values 0 through 15
1295 */
1296 #define IXGBE_ATR_COMMON_HASH_KEY \
1297 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1298 #ifdef lint
1299 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n)
1300 #else
1301 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1302 do { \
1303 u32 n = (_n); \
1304 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1305 common_hash ^= lo_hash_dword >> n; \
1306 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1307 bucket_hash ^= lo_hash_dword >> n; \
1308 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1309 sig_hash ^= lo_hash_dword << (16 - n); \
1310 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1311 common_hash ^= hi_hash_dword >> n; \
1312 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1313 bucket_hash ^= hi_hash_dword >> n; \
1314 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1315 sig_hash ^= hi_hash_dword << (16 - n); \
1316 } while (0);
1317 #endif
1318
1319 /**
1320 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1321 * @stream: input bitstream to compute the hash on
1322 *
1323 * This function is almost identical to the function above but contains
1324 * several optomizations such as unwinding all of the loops, letting the
1325 * compiler work out all of the conditional ifs since the keys are static
1326 * defines, and computing two keys at once since the hashed dword stream
1327 * will be the same for both keys.
1328 **/
ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,union ixgbe_atr_hash_dword common)1329 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1330 union ixgbe_atr_hash_dword common)
1331 {
1332 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1333 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1334
1335 /* record the flow_vm_vlan bits as they are a key part to the hash */
1336 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1337
1338 /* generate common hash dword */
1339 hi_hash_dword = IXGBE_NTOHL(common.dword);
1340
1341 /* low dword is word swapped version of common */
1342 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1343
1344 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1345 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1346
1347 /* Process bits 0 and 16 */
1348 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1349
1350 /*
1351 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1352 * delay this because bit 0 of the stream should not be processed
1353 * so we do not add the vlan until after bit 0 was processed
1354 */
1355 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1356
1357 /* Process remaining 30 bit of the key */
1358 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1359 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1360 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1361 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1362 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1363 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1364 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1365 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1366 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1367 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1368 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1369 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1370 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1371 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1372 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1373
1374 /* combine common_hash result with signature and bucket hashes */
1375 bucket_hash ^= common_hash;
1376 bucket_hash &= IXGBE_ATR_HASH_MASK;
1377
1378 sig_hash ^= common_hash << 16;
1379 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1380
1381 /* return completed signature hash */
1382 return sig_hash ^ bucket_hash;
1383 }
1384
1385 /**
1386 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1387 * @hw: pointer to hardware structure
1388 * @input: unique input dword
1389 * @common: compressed common input dword
1390 * @queue: queue index to direct traffic to
1391 **/
ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_hash_dword input,union ixgbe_atr_hash_dword common,u8 queue)1392 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1393 union ixgbe_atr_hash_dword input,
1394 union ixgbe_atr_hash_dword common,
1395 u8 queue)
1396 {
1397 u64 fdirhashcmd;
1398 u32 fdircmd;
1399
1400 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1401
1402 /*
1403 * Get the flow_type in order to program FDIRCMD properly
1404 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1405 */
1406 switch (input.formatted.flow_type) {
1407 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1408 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1409 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1410 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1411 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1412 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1413 break;
1414 default:
1415 DEBUGOUT(" Error on flow type input\n");
1416 return IXGBE_ERR_CONFIG;
1417 }
1418
1419 /* configure FDIRCMD register */
1420 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1421 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1422 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1423 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1424
1425 /*
1426 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1427 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1428 */
1429 fdirhashcmd = (u64)fdircmd << 32;
1430 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1431 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1432
1433 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1434
1435 return IXGBE_SUCCESS;
1436 }
1437
1438 #ifdef lint
1439 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n)
1440 #else
1441 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1442 do { \
1443 u32 n = (_n); \
1444 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1445 bucket_hash ^= lo_hash_dword >> n; \
1446 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1447 bucket_hash ^= hi_hash_dword >> n; \
1448 } while (0);
1449 #endif
1450 /**
1451 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1452 * @atr_input: input bitstream to compute the hash on
1453 * @input_mask: mask for the input bitstream
1454 *
1455 * This function serves two main purposes. First it applys the input_mask
1456 * to the atr_input resulting in a cleaned up atr_input data stream.
1457 * Secondly it computes the hash and stores it in the bkt_hash field at
1458 * the end of the input byte stream. This way it will be available for
1459 * future use without needing to recompute the hash.
1460 **/
ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input * input,union ixgbe_atr_input * input_mask)1461 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1462 union ixgbe_atr_input *input_mask)
1463 {
1464
1465 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1466 u32 bucket_hash = 0;
1467
1468 /* Apply masks to input data */
1469 input->dword_stream[0] &= input_mask->dword_stream[0];
1470 input->dword_stream[1] &= input_mask->dword_stream[1];
1471 input->dword_stream[2] &= input_mask->dword_stream[2];
1472 input->dword_stream[3] &= input_mask->dword_stream[3];
1473 input->dword_stream[4] &= input_mask->dword_stream[4];
1474 input->dword_stream[5] &= input_mask->dword_stream[5];
1475 input->dword_stream[6] &= input_mask->dword_stream[6];
1476 input->dword_stream[7] &= input_mask->dword_stream[7];
1477 input->dword_stream[8] &= input_mask->dword_stream[8];
1478 input->dword_stream[9] &= input_mask->dword_stream[9];
1479 input->dword_stream[10] &= input_mask->dword_stream[10];
1480
1481 /* record the flow_vm_vlan bits as they are a key part to the hash */
1482 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1483
1484 /* generate common hash dword */
1485 hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1486 input->dword_stream[2] ^
1487 input->dword_stream[3] ^
1488 input->dword_stream[4] ^
1489 input->dword_stream[5] ^
1490 input->dword_stream[6] ^
1491 input->dword_stream[7] ^
1492 input->dword_stream[8] ^
1493 input->dword_stream[9] ^
1494 input->dword_stream[10]);
1495
1496 /* low dword is word swapped version of common */
1497 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1498
1499 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1500 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1501
1502 /* Process bits 0 and 16 */
1503 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1504
1505 /*
1506 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1507 * delay this because bit 0 of the stream should not be processed
1508 * so we do not add the vlan until after bit 0 was processed
1509 */
1510 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1511
1512 /* Process remaining 30 bit of the key */
1513 IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1514 IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1515 IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1516 IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1517 IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1518 IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1519 IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1520 IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1521 IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1522 IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1523 IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1524 IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1525 IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1526 IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1527 IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1528
1529 /*
1530 * Limit hash to 13 bits since max bucket count is 8K.
1531 * Store result at the end of the input stream.
1532 */
1533 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1534 }
1535
1536 /**
1537 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1538 * @input_mask: mask to be bit swapped
1539 *
1540 * The source and destination port masks for flow director are bit swapped
1541 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1542 * generate a correctly swapped value we need to bit swap the mask and that
1543 * is what is accomplished by this function.
1544 **/
ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input * input_mask)1545 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1546 {
1547 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1548 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1549 mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1550 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1551 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1552 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1553 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1554 }
1555
1556 /*
1557 * These two macros are meant to address the fact that we have registers
1558 * that are either all or in part big-endian. As a result on big-endian
1559 * systems we will end up byte swapping the value to little-endian before
1560 * it is byte swapped again and written to the hardware in the original
1561 * big-endian format.
1562 */
1563 #define IXGBE_STORE_AS_BE32(_value) \
1564 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1565 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1566
1567 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1568 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1569
1570 #define IXGBE_STORE_AS_BE16(_value) \
1571 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1572
ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input_mask)1573 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1574 union ixgbe_atr_input *input_mask)
1575 {
1576 /* mask IPv6 since it is currently not supported */
1577 u32 fdirm = IXGBE_FDIRM_DIPv6;
1578 u32 fdirtcpm;
1579
1580 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1581
1582 /*
1583 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1584 * are zero, then assume a full mask for that field. Also assume that
1585 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1586 * cannot be masked out in this implementation.
1587 *
1588 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1589 * point in time.
1590 */
1591
1592 /* verify bucket hash is cleared on hash generation */
1593 if (input_mask->formatted.bkt_hash)
1594 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1595
1596 /* Program FDIRM and verify partial masks */
1597 switch (input_mask->formatted.vm_pool & 0x7F) {
1598 case 0x0:
1599 fdirm |= IXGBE_FDIRM_POOL;
1600 /* FALLTHRU */
1601 case 0x7F:
1602 break;
1603 default:
1604 DEBUGOUT(" Error on vm pool mask\n");
1605 return IXGBE_ERR_CONFIG;
1606 }
1607
1608 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1609 case 0x0:
1610 fdirm |= IXGBE_FDIRM_L4P;
1611 if (input_mask->formatted.dst_port ||
1612 input_mask->formatted.src_port) {
1613 DEBUGOUT(" Error on src/dst port mask\n");
1614 return IXGBE_ERR_CONFIG;
1615 }
1616 /* FALLTHRU */
1617 case IXGBE_ATR_L4TYPE_MASK:
1618 break;
1619 default:
1620 DEBUGOUT(" Error on flow type mask\n");
1621 return IXGBE_ERR_CONFIG;
1622 }
1623
1624 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1625 case 0x0000:
1626 /* mask VLAN ID, fall through to mask VLAN priority */
1627 fdirm |= IXGBE_FDIRM_VLANID;
1628 /* FALLTHRU */
1629 case 0x0FFF:
1630 /* mask VLAN priority */
1631 fdirm |= IXGBE_FDIRM_VLANP;
1632 break;
1633 case 0xE000:
1634 /* mask VLAN ID only, fall through */
1635 fdirm |= IXGBE_FDIRM_VLANID;
1636 /* FALLTHRU */
1637 case 0xEFFF:
1638 /* no VLAN fields masked */
1639 break;
1640 default:
1641 DEBUGOUT(" Error on VLAN mask\n");
1642 return IXGBE_ERR_CONFIG;
1643 }
1644
1645 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1646 case 0x0000:
1647 /* Mask Flex Bytes, fall through */
1648 fdirm |= IXGBE_FDIRM_FLEX;
1649 /* FALLTHRU */
1650 case 0xFFFF:
1651 break;
1652 default:
1653 DEBUGOUT(" Error on flexible byte mask\n");
1654 return IXGBE_ERR_CONFIG;
1655 }
1656
1657 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1658 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1659
1660 /* store the TCP/UDP port masks, bit reversed from port layout */
1661 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1662
1663 /* write both the same so that UDP and TCP use the same mask */
1664 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1665 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1666
1667 /* store source and destination IP masks (big-enian) */
1668 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1669 ~input_mask->formatted.src_ip[0]);
1670 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1671 ~input_mask->formatted.dst_ip[0]);
1672
1673 return IXGBE_SUCCESS;
1674 }
1675
ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,u16 soft_id,u8 queue)1676 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1677 union ixgbe_atr_input *input,
1678 u16 soft_id, u8 queue)
1679 {
1680 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1681
1682 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1683
1684 /* currently IPv6 is not supported, must be programmed with 0 */
1685 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1686 input->formatted.src_ip[0]);
1687 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1688 input->formatted.src_ip[1]);
1689 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1690 input->formatted.src_ip[2]);
1691
1692 /* record the source address (big-endian) */
1693 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1694
1695 /* record the first 32 bits of the destination address (big-endian) */
1696 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1697
1698 /* record source and destination port (little-endian)*/
1699 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1700 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1701 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1702 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1703
1704 /* record vlan (little-endian) and flex_bytes(big-endian) */
1705 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1706 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1707 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1708 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1709
1710 /* configure FDIRHASH register */
1711 fdirhash = input->formatted.bkt_hash;
1712 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1713 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1714
1715 /*
1716 * flush all previous writes to make certain registers are
1717 * programmed prior to issuing the command
1718 */
1719 IXGBE_WRITE_FLUSH(hw);
1720
1721 /* configure FDIRCMD register */
1722 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1723 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1724 if (queue == IXGBE_FDIR_DROP_QUEUE)
1725 fdircmd |= IXGBE_FDIRCMD_DROP;
1726 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1727 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1728 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1729
1730 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1731
1732 return IXGBE_SUCCESS;
1733 }
1734
ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,u16 soft_id)1735 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1736 union ixgbe_atr_input *input,
1737 u16 soft_id)
1738 {
1739 u32 fdirhash;
1740 u32 fdircmd = 0;
1741 u32 retry_count;
1742 s32 err = IXGBE_SUCCESS;
1743
1744 /* configure FDIRHASH register */
1745 fdirhash = input->formatted.bkt_hash;
1746 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1747 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1748
1749 /* flush hash to HW */
1750 IXGBE_WRITE_FLUSH(hw);
1751
1752 /* Query if filter is present */
1753 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1754
1755 for (retry_count = 10; retry_count; retry_count--) {
1756 /* allow 10us for query to process */
1757 usec_delay(10);
1758 /* verify query completed successfully */
1759 fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1760 if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1761 break;
1762 }
1763
1764 if (!retry_count)
1765 err = IXGBE_ERR_FDIR_REINIT_FAILED;
1766
1767 /* if filter exists in hardware then remove it */
1768 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1769 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1770 IXGBE_WRITE_FLUSH(hw);
1771 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1772 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1773 }
1774
1775 return err;
1776 }
1777
1778 /**
1779 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1780 * @hw: pointer to hardware structure
1781 * @input: input bitstream
1782 * @input_mask: mask for the input bitstream
1783 * @soft_id: software index for the filters
1784 * @queue: queue index to direct traffic to
1785 *
1786 * Note that the caller to this function must lock before calling, since the
1787 * hardware writes must be protected from one another.
1788 **/
ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,union ixgbe_atr_input * input_mask,u16 soft_id,u8 queue)1789 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1790 union ixgbe_atr_input *input,
1791 union ixgbe_atr_input *input_mask,
1792 u16 soft_id, u8 queue)
1793 {
1794 s32 err = IXGBE_ERR_CONFIG;
1795
1796 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1797
1798 /*
1799 * Check flow_type formatting, and bail out before we touch the hardware
1800 * if there's a configuration issue
1801 */
1802 switch (input->formatted.flow_type) {
1803 case IXGBE_ATR_FLOW_TYPE_IPV4:
1804 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1805 if (input->formatted.dst_port || input->formatted.src_port) {
1806 DEBUGOUT(" Error on src/dst port\n");
1807 return IXGBE_ERR_CONFIG;
1808 }
1809 break;
1810 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1811 if (input->formatted.dst_port || input->formatted.src_port) {
1812 DEBUGOUT(" Error on src/dst port\n");
1813 return IXGBE_ERR_CONFIG;
1814 }
1815 /* FALLTHRU */
1816 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1817 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1818 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1819 IXGBE_ATR_L4TYPE_MASK;
1820 break;
1821 default:
1822 DEBUGOUT(" Error on flow type input\n");
1823 return err;
1824 }
1825
1826 /* program input mask into the HW */
1827 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
1828 if (err)
1829 return err;
1830
1831 /* apply mask and compute/store hash */
1832 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
1833
1834 /* program filters to filter memory */
1835 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
1836 soft_id, queue);
1837 }
1838
1839 /**
1840 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1841 * @hw: pointer to hardware structure
1842 * @reg: analog register to read
1843 * @val: read value
1844 *
1845 * Performs read operation to Omer analog register specified.
1846 **/
ixgbe_read_analog_reg8_82599(struct ixgbe_hw * hw,u32 reg,u8 * val)1847 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1848 {
1849 u32 core_ctl;
1850
1851 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
1852
1853 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1854 (reg << 8));
1855 IXGBE_WRITE_FLUSH(hw);
1856 usec_delay(10);
1857 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
1858 *val = (u8)core_ctl;
1859
1860 return IXGBE_SUCCESS;
1861 }
1862
1863 /**
1864 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
1865 * @hw: pointer to hardware structure
1866 * @reg: atlas register to write
1867 * @val: value to write
1868 *
1869 * Performs write operation to Omer analog register specified.
1870 **/
ixgbe_write_analog_reg8_82599(struct ixgbe_hw * hw,u32 reg,u8 val)1871 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
1872 {
1873 u32 core_ctl;
1874
1875 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
1876
1877 core_ctl = (reg << 8) | val;
1878 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
1879 IXGBE_WRITE_FLUSH(hw);
1880 usec_delay(10);
1881
1882 return IXGBE_SUCCESS;
1883 }
1884
1885 /**
1886 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
1887 * @hw: pointer to hardware structure
1888 *
1889 * Starts the hardware using the generic start_hw function
1890 * and the generation start_hw function.
1891 * Then performs revision-specific operations, if any.
1892 **/
ixgbe_start_hw_82599(struct ixgbe_hw * hw)1893 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1894 {
1895 s32 ret_val = IXGBE_SUCCESS;
1896
1897 DEBUGFUNC("ixgbe_start_hw_82599");
1898
1899 ret_val = ixgbe_start_hw_generic(hw);
1900 if (ret_val != IXGBE_SUCCESS)
1901 goto out;
1902
1903 ret_val = ixgbe_start_hw_gen2(hw);
1904 if (ret_val != IXGBE_SUCCESS)
1905 goto out;
1906
1907 /* We need to run link autotry after the driver loads */
1908 hw->mac.autotry_restart = TRUE;
1909
1910 if (ret_val == IXGBE_SUCCESS)
1911 ret_val = ixgbe_verify_fw_version_82599(hw);
1912 out:
1913 return ret_val;
1914 }
1915
1916 /**
1917 * ixgbe_identify_phy_82599 - Get physical layer module
1918 * @hw: pointer to hardware structure
1919 *
1920 * Determines the physical layer module found on the current adapter.
1921 * If PHY already detected, maintains current PHY type in hw struct,
1922 * otherwise executes the PHY detection routine.
1923 **/
ixgbe_identify_phy_82599(struct ixgbe_hw * hw)1924 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1925 {
1926 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1927
1928 DEBUGFUNC("ixgbe_identify_phy_82599");
1929
1930 /* Detect PHY if not unknown - returns success if already detected. */
1931 status = ixgbe_identify_phy_generic(hw);
1932 if (status != IXGBE_SUCCESS) {
1933 /* 82599 10GBASE-T requires an external PHY */
1934 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1935 goto out;
1936 else
1937 status = ixgbe_identify_module_generic(hw);
1938 }
1939
1940 /* Set PHY type none if no PHY detected */
1941 if (hw->phy.type == ixgbe_phy_unknown) {
1942 hw->phy.type = ixgbe_phy_none;
1943 status = IXGBE_SUCCESS;
1944 }
1945
1946 /* Return error if SFP module has been detected but is not supported */
1947 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1948 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1949
1950 out:
1951 return status;
1952 }
1953
1954 /**
1955 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
1956 * @hw: pointer to hardware structure
1957 *
1958 * Determines physical layer capabilities of the current configuration.
1959 **/
ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw * hw)1960 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1961 {
1962 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1963 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1964 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1965 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1966 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1967 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1968 u16 ext_ability = 0;
1969 u8 comp_codes_10g = 0;
1970 u8 comp_codes_1g = 0;
1971
1972 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
1973
1974 hw->phy.ops.identify(hw);
1975
1976 switch (hw->phy.type) {
1977 case ixgbe_phy_tn:
1978 case ixgbe_phy_cu_unknown:
1979 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1980 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1981 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1982 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1983 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1984 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1985 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1986 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1987 goto out;
1988 default:
1989 break;
1990 }
1991
1992 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1993 case IXGBE_AUTOC_LMS_1G_AN:
1994 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1995 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
1996 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
1997 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1998 goto out;
1999 }
2000 /* SFI mode so read SFP module */
2001 goto sfp_check;
2002 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2003 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2004 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2005 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2006 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2007 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2008 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2009 goto out;
2010 case IXGBE_AUTOC_LMS_10G_SERIAL:
2011 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2012 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2013 goto out;
2014 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2015 goto sfp_check;
2016 break;
2017 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2018 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2019 if (autoc & IXGBE_AUTOC_KX_SUPP)
2020 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2021 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2022 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2023 if (autoc & IXGBE_AUTOC_KR_SUPP)
2024 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2025 goto out;
2026 default:
2027 goto out;
2028 }
2029
2030 sfp_check:
2031 /* SFP check must be done last since DA modules are sometimes used to
2032 * test KR mode - we need to id KR mode correctly before SFP module.
2033 * Call identify_sfp because the pluggable module may have changed */
2034 hw->phy.ops.identify_sfp(hw);
2035 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2036 goto out;
2037
2038 switch (hw->phy.type) {
2039 case ixgbe_phy_sfp_passive_tyco:
2040 case ixgbe_phy_sfp_passive_unknown:
2041 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2042 break;
2043 case ixgbe_phy_sfp_ftl_active:
2044 case ixgbe_phy_sfp_active_unknown:
2045 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2046 break;
2047 case ixgbe_phy_sfp_avago:
2048 case ixgbe_phy_sfp_ftl:
2049 case ixgbe_phy_sfp_intel:
2050 case ixgbe_phy_sfp_unknown:
2051 hw->phy.ops.read_i2c_eeprom(hw,
2052 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2053 hw->phy.ops.read_i2c_eeprom(hw,
2054 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2055 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2056 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2057 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2058 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2059 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2060 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2061 else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
2062 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2063 break;
2064 default:
2065 break;
2066 }
2067
2068 out:
2069 return physical_layer;
2070 }
2071
2072 /**
2073 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2074 * @hw: pointer to hardware structure
2075 * @regval: register value to write to RXCTRL
2076 *
2077 * Enables the Rx DMA unit for 82599
2078 **/
ixgbe_enable_rx_dma_82599(struct ixgbe_hw * hw,u32 regval)2079 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2080 {
2081
2082 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2083
2084 /*
2085 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2086 * If traffic is incoming before we enable the Rx unit, it could hang
2087 * the Rx DMA unit. Therefore, make sure the security engine is
2088 * completely disabled prior to enabling the Rx unit.
2089 */
2090
2091 hw->mac.ops.disable_sec_rx_path(hw);
2092
2093 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2094
2095 hw->mac.ops.enable_sec_rx_path(hw);
2096
2097 return IXGBE_SUCCESS;
2098 }
2099
2100 /**
2101 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2102 * @hw: pointer to hardware structure
2103 *
2104 * Verifies that installed the firmware version is 0.6 or higher
2105 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2106 *
2107 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2108 * if the FW version is not supported.
2109 **/
ixgbe_verify_fw_version_82599(struct ixgbe_hw * hw)2110 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2111 {
2112 s32 status = IXGBE_ERR_EEPROM_VERSION;
2113 u16 fw_offset, fw_ptp_cfg_offset;
2114 u16 fw_version = 0;
2115
2116 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2117
2118 /* firmware check is only necessary for SFI devices */
2119 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2120 status = IXGBE_SUCCESS;
2121 goto fw_version_out;
2122 }
2123
2124 /* get the offset to the Firmware Module block */
2125 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2126
2127 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2128 goto fw_version_out;
2129
2130 /* get the offset to the Pass Through Patch Configuration block */
2131 hw->eeprom.ops.read(hw, (fw_offset +
2132 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2133 &fw_ptp_cfg_offset);
2134
2135 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2136 goto fw_version_out;
2137
2138 /* get the firmware version */
2139 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2140 IXGBE_FW_PATCH_VERSION_4), &fw_version);
2141
2142 if (fw_version > 0x5)
2143 status = IXGBE_SUCCESS;
2144
2145 fw_version_out:
2146 return status;
2147 }
2148
2149 /**
2150 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2151 * @hw: pointer to hardware structure
2152 *
2153 * Returns TRUE if the LESM FW module is present and enabled. Otherwise
2154 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2155 **/
ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw * hw)2156 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2157 {
2158 bool lesm_enabled = FALSE;
2159 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2160 s32 status;
2161
2162 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2163
2164 /* get the offset to the Firmware Module block */
2165 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2166
2167 if ((status != IXGBE_SUCCESS) ||
2168 (fw_offset == 0) || (fw_offset == 0xFFFF))
2169 goto out;
2170
2171 /* get the offset to the LESM Parameters block */
2172 status = hw->eeprom.ops.read(hw, (fw_offset +
2173 IXGBE_FW_LESM_PARAMETERS_PTR),
2174 &fw_lesm_param_offset);
2175
2176 if ((status != IXGBE_SUCCESS) ||
2177 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2178 goto out;
2179
2180 /* get the lesm state word */
2181 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2182 IXGBE_FW_LESM_STATE_1),
2183 &fw_lesm_state);
2184
2185 if ((status == IXGBE_SUCCESS) &&
2186 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2187 lesm_enabled = TRUE;
2188
2189 out:
2190 return lesm_enabled;
2191 }
2192
2193 /**
2194 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2195 * fastest available method
2196 *
2197 * @hw: pointer to hardware structure
2198 * @offset: offset of word in EEPROM to read
2199 * @words: number of words
2200 * @data: word(s) read from the EEPROM
2201 *
2202 * Retrieves 16 bit word(s) read from EEPROM
2203 **/
ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)2204 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2205 u16 words, u16 *data)
2206 {
2207 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2208 s32 ret_val = IXGBE_ERR_CONFIG;
2209
2210 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2211
2212 /*
2213 * If EEPROM is detected and can be addressed using 14 bits,
2214 * use EERD otherwise use bit bang
2215 */
2216 if ((eeprom->type == ixgbe_eeprom_spi) &&
2217 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2218 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2219 data);
2220 else
2221 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2222 words,
2223 data);
2224
2225 return ret_val;
2226 }
2227
2228 /**
2229 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2230 * fastest available method
2231 *
2232 * @hw: pointer to hardware structure
2233 * @offset: offset of word in the EEPROM to read
2234 * @data: word read from the EEPROM
2235 *
2236 * Reads a 16 bit word from the EEPROM
2237 **/
ixgbe_read_eeprom_82599(struct ixgbe_hw * hw,u16 offset,u16 * data)2238 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2239 u16 offset, u16 *data)
2240 {
2241 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2242 s32 ret_val = IXGBE_ERR_CONFIG;
2243
2244 DEBUGFUNC("ixgbe_read_eeprom_82599");
2245
2246 /*
2247 * If EEPROM is detected and can be addressed using 14 bits,
2248 * use EERD otherwise use bit bang
2249 */
2250 if ((eeprom->type == ixgbe_eeprom_spi) &&
2251 (offset <= IXGBE_EERD_MAX_ADDR))
2252 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2253 else
2254 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2255
2256 return ret_val;
2257 }
2258
2259
2260