xref: /freebsd/sys/dev/ixgbe/ixgbe_82599.c (revision b7d683e608ac894e75b2de7eac5eaa083650de54)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2012, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_82599.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 
41 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
42 					 ixgbe_link_speed speed,
43 					 bool autoneg,
44 					 bool autoneg_wait_to_complete);
45 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
46 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
47 				   u16 offset, u16 *data);
48 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
49 					  u16 words, u16 *data);
50 
51 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
52 {
53 	struct ixgbe_mac_info *mac = &hw->mac;
54 
55 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
56 
57 	/* enable the laser control functions for SFP+ fiber */
58 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
59 		mac->ops.disable_tx_laser =
60 				       &ixgbe_disable_tx_laser_multispeed_fiber;
61 		mac->ops.enable_tx_laser =
62 					&ixgbe_enable_tx_laser_multispeed_fiber;
63 		mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
64 
65 	} else {
66 		mac->ops.disable_tx_laser = NULL;
67 		mac->ops.enable_tx_laser = NULL;
68 		mac->ops.flap_tx_laser = NULL;
69 	}
70 
71 	if (hw->phy.multispeed_fiber) {
72 		/* Set up dual speed SFP+ support */
73 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
74 	} else {
75 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
76 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
77 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
78 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
79 			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
80 		} else {
81 			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
82 		}
83 	}
84 }
85 
86 /**
87  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
88  *  @hw: pointer to hardware structure
89  *
90  *  Initialize any function pointers that were not able to be
91  *  set during init_shared_code because the PHY/SFP type was
92  *  not known.  Perform the SFP init if necessary.
93  *
94  **/
95 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
96 {
97 	struct ixgbe_mac_info *mac = &hw->mac;
98 	struct ixgbe_phy_info *phy = &hw->phy;
99 	s32 ret_val = IXGBE_SUCCESS;
100 
101 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
102 
103 	/* Identify the PHY or SFP module */
104 	ret_val = phy->ops.identify(hw);
105 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
106 		goto init_phy_ops_out;
107 
108 	/* Setup function pointers based on detected SFP module and speeds */
109 	ixgbe_init_mac_link_ops_82599(hw);
110 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
111 		hw->phy.ops.reset = NULL;
112 
113 	/* If copper media, overwrite with copper function pointers */
114 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
115 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
116 		mac->ops.get_link_capabilities =
117 				  &ixgbe_get_copper_link_capabilities_generic;
118 	}
119 
120 	/* Set necessary function pointers based on phy type */
121 	switch (hw->phy.type) {
122 	case ixgbe_phy_tn:
123 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
124 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
125 		phy->ops.get_firmware_version =
126 			     &ixgbe_get_phy_firmware_version_tnx;
127 		break;
128 	default:
129 		break;
130 	}
131 init_phy_ops_out:
132 	return ret_val;
133 }
134 
135 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
136 {
137 	s32 ret_val = IXGBE_SUCCESS;
138 	u32 reg_anlp1 = 0;
139 	u32 i = 0;
140 	u16 list_offset, data_offset, data_value;
141 
142 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
143 
144 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
145 		ixgbe_init_mac_link_ops_82599(hw);
146 
147 		hw->phy.ops.reset = NULL;
148 
149 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
150 							      &data_offset);
151 		if (ret_val != IXGBE_SUCCESS)
152 			goto setup_sfp_out;
153 
154 		/* PHY config will finish before releasing the semaphore */
155 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
156 							IXGBE_GSSR_MAC_CSR_SM);
157 		if (ret_val != IXGBE_SUCCESS) {
158 			ret_val = IXGBE_ERR_SWFW_SYNC;
159 			goto setup_sfp_out;
160 		}
161 
162 		hw->eeprom.ops.read(hw, ++data_offset, &data_value);
163 		while (data_value != 0xffff) {
164 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
165 			IXGBE_WRITE_FLUSH(hw);
166 			hw->eeprom.ops.read(hw, ++data_offset, &data_value);
167 		}
168 
169 		/* Release the semaphore */
170 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
171 		/* Delay obtaining semaphore again to allow FW access */
172 		msec_delay(hw->eeprom.semaphore_delay);
173 
174 		/* Now restart DSP by setting Restart_AN and clearing LMS */
175 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
176 				IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
177 				IXGBE_AUTOC_AN_RESTART));
178 
179 		/* Wait for AN to leave state 0 */
180 		for (i = 0; i < 10; i++) {
181 			msec_delay(4);
182 			reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
183 			if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
184 				break;
185 		}
186 		if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
187 			DEBUGOUT("sfp module setup not complete\n");
188 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
189 			goto setup_sfp_out;
190 		}
191 
192 		/* Restart DSP by setting Restart_AN and return to SFI mode */
193 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
194 				IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
195 				IXGBE_AUTOC_AN_RESTART));
196 	}
197 
198 setup_sfp_out:
199 	return ret_val;
200 }
201 
202 /**
203  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
204  *  @hw: pointer to hardware structure
205  *
206  *  Initialize the function pointers and assign the MAC type for 82599.
207  *  Does not touch the hardware.
208  **/
209 
210 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
211 {
212 	struct ixgbe_mac_info *mac = &hw->mac;
213 	struct ixgbe_phy_info *phy = &hw->phy;
214 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
215 	s32 ret_val;
216 
217 	DEBUGFUNC("ixgbe_init_ops_82599");
218 
219 	ret_val = ixgbe_init_phy_ops_generic(hw);
220 	ret_val = ixgbe_init_ops_generic(hw);
221 
222 	/* PHY */
223 	phy->ops.identify = &ixgbe_identify_phy_82599;
224 	phy->ops.init = &ixgbe_init_phy_ops_82599;
225 
226 	/* MAC */
227 	mac->ops.reset_hw = &ixgbe_reset_hw_82599;
228 	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
229 	mac->ops.get_media_type = &ixgbe_get_media_type_82599;
230 	mac->ops.get_supported_physical_layer =
231 				    &ixgbe_get_supported_physical_layer_82599;
232 	mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
233 	mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
234 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
235 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
236 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
237 	mac->ops.start_hw = &ixgbe_start_hw_82599;
238 	mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
239 	mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
240 	mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
241 	mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
242 	mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
243 
244 	/* RAR, Multicast, VLAN */
245 	mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
246 	mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
247 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
248 	mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
249 	mac->rar_highwater = 1;
250 	mac->ops.set_vfta = &ixgbe_set_vfta_generic;
251 	mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
252 	mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
253 	mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
254 	mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
255 	mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
256 	mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
257 
258 	/* Link */
259 	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
260 	mac->ops.check_link = &ixgbe_check_mac_link_generic;
261 	mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
262 	ixgbe_init_mac_link_ops_82599(hw);
263 
264 	mac->mcft_size		= 128;
265 	mac->vft_size		= 128;
266 	mac->num_rar_entries	= 128;
267 	mac->rx_pb_size		= 512;
268 	mac->max_tx_queues	= 128;
269 	mac->max_rx_queues	= 128;
270 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
271 
272 	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
273 				   IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
274 
275 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
276 
277 	/* EEPROM */
278 	eeprom->ops.read = &ixgbe_read_eeprom_82599;
279 	eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
280 
281 	/* Manageability interface */
282 	mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
283 
284 
285 	return ret_val;
286 }
287 
288 /**
289  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
290  *  @hw: pointer to hardware structure
291  *  @speed: pointer to link speed
292  *  @negotiation: TRUE when autoneg or autotry is enabled
293  *
294  *  Determines the link capabilities by reading the AUTOC register.
295  **/
296 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
297 				      ixgbe_link_speed *speed,
298 				      bool *negotiation)
299 {
300 	s32 status = IXGBE_SUCCESS;
301 	u32 autoc = 0;
302 
303 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
304 
305 
306 	/* Check if 1G SFP module. */
307 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
308 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
309 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
310 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
311 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
312 		*negotiation = TRUE;
313 		goto out;
314 	}
315 
316 	/*
317 	 * Determine link capabilities based on the stored value of AUTOC,
318 	 * which represents EEPROM defaults.  If AUTOC value has not
319 	 * been stored, use the current register values.
320 	 */
321 	if (hw->mac.orig_link_settings_stored)
322 		autoc = hw->mac.orig_autoc;
323 	else
324 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
325 
326 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
327 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
328 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
329 		*negotiation = FALSE;
330 		break;
331 
332 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
333 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
334 		*negotiation = FALSE;
335 		break;
336 
337 	case IXGBE_AUTOC_LMS_1G_AN:
338 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
339 		*negotiation = TRUE;
340 		break;
341 
342 	case IXGBE_AUTOC_LMS_10G_SERIAL:
343 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
344 		*negotiation = FALSE;
345 		break;
346 
347 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
348 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
349 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
350 		if (autoc & IXGBE_AUTOC_KR_SUPP)
351 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
352 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
353 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
354 		if (autoc & IXGBE_AUTOC_KX_SUPP)
355 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
356 		*negotiation = TRUE;
357 		break;
358 
359 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
360 		*speed = IXGBE_LINK_SPEED_100_FULL;
361 		if (autoc & IXGBE_AUTOC_KR_SUPP)
362 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
363 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
364 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
365 		if (autoc & IXGBE_AUTOC_KX_SUPP)
366 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
367 		*negotiation = TRUE;
368 		break;
369 
370 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
371 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
372 		*negotiation = FALSE;
373 		break;
374 
375 	default:
376 		status = IXGBE_ERR_LINK_SETUP;
377 		goto out;
378 		break;
379 	}
380 
381 	if (hw->phy.multispeed_fiber) {
382 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
383 			  IXGBE_LINK_SPEED_1GB_FULL;
384 		*negotiation = TRUE;
385 	}
386 
387 out:
388 	return status;
389 }
390 
391 /**
392  *  ixgbe_get_media_type_82599 - Get media type
393  *  @hw: pointer to hardware structure
394  *
395  *  Returns the media type (fiber, copper, backplane)
396  **/
397 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
398 {
399 	enum ixgbe_media_type media_type;
400 
401 	DEBUGFUNC("ixgbe_get_media_type_82599");
402 
403 	/* Detect if there is a copper PHY attached. */
404 	switch (hw->phy.type) {
405 	case ixgbe_phy_cu_unknown:
406 	case ixgbe_phy_tn:
407 		media_type = ixgbe_media_type_copper;
408 		goto out;
409 	default:
410 		break;
411 	}
412 
413 	switch (hw->device_id) {
414 	case IXGBE_DEV_ID_82599_KX4:
415 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
416 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
417 	case IXGBE_DEV_ID_82599_KR:
418 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
419 	case IXGBE_DEV_ID_82599_XAUI_LOM:
420 		/* Default device ID is mezzanine card KX/KX4 */
421 		media_type = ixgbe_media_type_backplane;
422 		break;
423 	case IXGBE_DEV_ID_82599_SFP:
424 	case IXGBE_DEV_ID_82599_SFP_FCOE:
425 	case IXGBE_DEV_ID_82599_SFP_EM:
426 	case IXGBE_DEV_ID_82599_SFP_SF2:
427 	case IXGBE_DEV_ID_82599EN_SFP:
428 		media_type = ixgbe_media_type_fiber;
429 		break;
430 	case IXGBE_DEV_ID_82599_CX4:
431 		media_type = ixgbe_media_type_cx4;
432 		break;
433 	case IXGBE_DEV_ID_82599_T3_LOM:
434 		media_type = ixgbe_media_type_copper;
435 		break;
436 	default:
437 		media_type = ixgbe_media_type_unknown;
438 		break;
439 	}
440 out:
441 	return media_type;
442 }
443 
444 /**
445  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
446  *  @hw: pointer to hardware structure
447  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
448  *
449  *  Configures link settings based on values in the ixgbe_hw struct.
450  *  Restarts the link.  Performs autonegotiation if needed.
451  **/
452 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
453 			       bool autoneg_wait_to_complete)
454 {
455 	u32 autoc_reg;
456 	u32 links_reg;
457 	u32 i;
458 	s32 status = IXGBE_SUCCESS;
459 
460 	DEBUGFUNC("ixgbe_start_mac_link_82599");
461 
462 
463 	/* Restart link */
464 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
465 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
466 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
467 
468 	/* Only poll for autoneg to complete if specified to do so */
469 	if (autoneg_wait_to_complete) {
470 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
471 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
472 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
473 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
474 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
475 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
476 			links_reg = 0; /* Just in case Autoneg time = 0 */
477 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
478 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
479 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
480 					break;
481 				msec_delay(100);
482 			}
483 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
484 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
485 				DEBUGOUT("Autoneg did not complete.\n");
486 			}
487 		}
488 	}
489 
490 	/* Add delay to filter out noises during initial link setup */
491 	msec_delay(50);
492 
493 	return status;
494 }
495 
496 /**
497  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
498  *  @hw: pointer to hardware structure
499  *
500  *  The base drivers may require better control over SFP+ module
501  *  PHY states.  This includes selectively shutting down the Tx
502  *  laser on the PHY, effectively halting physical link.
503  **/
504 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
505 {
506 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
507 
508 	/* Disable tx laser; allow 100us to go dark per spec */
509 	esdp_reg |= IXGBE_ESDP_SDP3;
510 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
511 	IXGBE_WRITE_FLUSH(hw);
512 	usec_delay(100);
513 }
514 
515 /**
516  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
517  *  @hw: pointer to hardware structure
518  *
519  *  The base drivers may require better control over SFP+ module
520  *  PHY states.  This includes selectively turning on the Tx
521  *  laser on the PHY, effectively starting physical link.
522  **/
523 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
524 {
525 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
526 
527 	/* Enable tx laser; allow 100ms to light up */
528 	esdp_reg &= ~IXGBE_ESDP_SDP3;
529 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
530 	IXGBE_WRITE_FLUSH(hw);
531 	msec_delay(100);
532 }
533 
534 /**
535  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
536  *  @hw: pointer to hardware structure
537  *
538  *  When the driver changes the link speeds that it can support,
539  *  it sets autotry_restart to TRUE to indicate that we need to
540  *  initiate a new autotry session with the link partner.  To do
541  *  so, we set the speed then disable and re-enable the tx laser, to
542  *  alert the link partner that it also needs to restart autotry on its
543  *  end.  This is consistent with TRUE clause 37 autoneg, which also
544  *  involves a loss of signal.
545  **/
546 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
547 {
548 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
549 
550 	if (hw->mac.autotry_restart) {
551 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
552 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
553 		hw->mac.autotry_restart = FALSE;
554 	}
555 }
556 
557 /**
558  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
559  *  @hw: pointer to hardware structure
560  *  @speed: new link speed
561  *  @autoneg: TRUE if autonegotiation enabled
562  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
563  *
564  *  Set the link speed in the AUTOC register and restarts link.
565  **/
566 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
567 				     ixgbe_link_speed speed, bool autoneg,
568 				     bool autoneg_wait_to_complete)
569 {
570 	s32 status = IXGBE_SUCCESS;
571 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
572 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
573 	u32 speedcnt = 0;
574 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
575 	u32 i = 0;
576 	bool link_up = FALSE;
577 	bool negotiation;
578 
579 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
580 
581 	/* Mask off requested but non-supported speeds */
582 	status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
583 	if (status != IXGBE_SUCCESS)
584 		return status;
585 
586 	speed &= link_speed;
587 
588 	/*
589 	 * Try each speed one by one, highest priority first.  We do this in
590 	 * software because 10gb fiber doesn't support speed autonegotiation.
591 	 */
592 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
593 		speedcnt++;
594 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
595 
596 		/* If we already have link at this speed, just jump out */
597 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
598 		if (status != IXGBE_SUCCESS)
599 			return status;
600 
601 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
602 			goto out;
603 
604 		/* Set the module link speed */
605 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
606 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
607 		IXGBE_WRITE_FLUSH(hw);
608 
609 		/* Allow module to change analog characteristics (1G->10G) */
610 		msec_delay(40);
611 
612 		status = ixgbe_setup_mac_link_82599(hw,
613 						    IXGBE_LINK_SPEED_10GB_FULL,
614 						    autoneg,
615 						    autoneg_wait_to_complete);
616 		if (status != IXGBE_SUCCESS)
617 			return status;
618 
619 		/* Flap the tx laser if it has not already been done */
620 		ixgbe_flap_tx_laser(hw);
621 
622 		/*
623 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
624 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
625 		 * attempted.  82599 uses the same timing for 10g SFI.
626 		 */
627 		for (i = 0; i < 5; i++) {
628 			/* Wait for the link partner to also set speed */
629 			msec_delay(100);
630 
631 			/* If we have link, just jump out */
632 			status = ixgbe_check_link(hw, &link_speed,
633 						  &link_up, FALSE);
634 			if (status != IXGBE_SUCCESS)
635 				return status;
636 
637 			if (link_up)
638 				goto out;
639 		}
640 	}
641 
642 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
643 		speedcnt++;
644 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
645 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
646 
647 		/* If we already have link at this speed, just jump out */
648 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
649 		if (status != IXGBE_SUCCESS)
650 			return status;
651 
652 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
653 			goto out;
654 
655 		/* Set the module link speed */
656 		esdp_reg &= ~IXGBE_ESDP_SDP5;
657 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
658 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
659 		IXGBE_WRITE_FLUSH(hw);
660 
661 		/* Allow module to change analog characteristics (10G->1G) */
662 		msec_delay(40);
663 
664 		status = ixgbe_setup_mac_link_82599(hw,
665 						    IXGBE_LINK_SPEED_1GB_FULL,
666 						    autoneg,
667 						    autoneg_wait_to_complete);
668 		if (status != IXGBE_SUCCESS)
669 			return status;
670 
671 		/* Flap the tx laser if it has not already been done */
672 		ixgbe_flap_tx_laser(hw);
673 
674 		/* Wait for the link partner to also set speed */
675 		msec_delay(100);
676 
677 		/* If we have link, just jump out */
678 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
679 		if (status != IXGBE_SUCCESS)
680 			return status;
681 
682 		if (link_up)
683 			goto out;
684 	}
685 
686 	/*
687 	 * We didn't get link.  Configure back to the highest speed we tried,
688 	 * (if there was more than one).  We call ourselves back with just the
689 	 * single highest speed that the user requested.
690 	 */
691 	if (speedcnt > 1)
692 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
693 			highest_link_speed, autoneg, autoneg_wait_to_complete);
694 
695 out:
696 	/* Set autoneg_advertised value based on input link speed */
697 	hw->phy.autoneg_advertised = 0;
698 
699 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
700 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
701 
702 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
703 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
704 
705 	return status;
706 }
707 
708 /**
709  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
710  *  @hw: pointer to hardware structure
711  *  @speed: new link speed
712  *  @autoneg: TRUE if autonegotiation enabled
713  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
714  *
715  *  Implements the Intel SmartSpeed algorithm.
716  **/
717 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
718 				    ixgbe_link_speed speed, bool autoneg,
719 				    bool autoneg_wait_to_complete)
720 {
721 	s32 status = IXGBE_SUCCESS;
722 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
723 	s32 i, j;
724 	bool link_up = FALSE;
725 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
726 
727 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
728 
729 	 /* Set autoneg_advertised value based on input link speed */
730 	hw->phy.autoneg_advertised = 0;
731 
732 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
733 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
734 
735 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
736 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
737 
738 	if (speed & IXGBE_LINK_SPEED_100_FULL)
739 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
740 
741 	/*
742 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
743 	 * autoneg advertisement if link is unable to be established at the
744 	 * highest negotiated rate.  This can sometimes happen due to integrity
745 	 * issues with the physical media connection.
746 	 */
747 
748 	/* First, try to get link with full advertisement */
749 	hw->phy.smart_speed_active = FALSE;
750 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
751 		status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
752 						    autoneg_wait_to_complete);
753 		if (status != IXGBE_SUCCESS)
754 			goto out;
755 
756 		/*
757 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
758 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
759 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
760 		 * Table 9 in the AN MAS.
761 		 */
762 		for (i = 0; i < 5; i++) {
763 			msec_delay(100);
764 
765 			/* If we have link, just jump out */
766 			status = ixgbe_check_link(hw, &link_speed, &link_up,
767 						  FALSE);
768 			if (status != IXGBE_SUCCESS)
769 				goto out;
770 
771 			if (link_up)
772 				goto out;
773 		}
774 	}
775 
776 	/*
777 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
778 	 * (or BX4/BX), then disable KR and try again.
779 	 */
780 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
781 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
782 		goto out;
783 
784 	/* Turn SmartSpeed on to disable KR support */
785 	hw->phy.smart_speed_active = TRUE;
786 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
787 					    autoneg_wait_to_complete);
788 	if (status != IXGBE_SUCCESS)
789 		goto out;
790 
791 	/*
792 	 * Wait for the controller to acquire link.  600ms will allow for
793 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
794 	 * parallel detect, both 10g and 1g. This allows for the maximum
795 	 * connect attempts as defined in the AN MAS table 73-7.
796 	 */
797 	for (i = 0; i < 6; i++) {
798 		msec_delay(100);
799 
800 		/* If we have link, just jump out */
801 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
802 		if (status != IXGBE_SUCCESS)
803 			goto out;
804 
805 		if (link_up)
806 			goto out;
807 	}
808 
809 	/* We didn't get link.  Turn SmartSpeed back off. */
810 	hw->phy.smart_speed_active = FALSE;
811 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
812 					    autoneg_wait_to_complete);
813 
814 out:
815 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
816 		DEBUGOUT("Smartspeed has downgraded the link speed "
817 		"from the maximum advertised\n");
818 	return status;
819 }
820 
821 /**
822  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
823  *  @hw: pointer to hardware structure
824  *  @speed: new link speed
825  *  @autoneg: TRUE if autonegotiation enabled
826  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
827  *
828  *  Set the link speed in the AUTOC register and restarts link.
829  **/
830 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
831 			       ixgbe_link_speed speed, bool autoneg,
832 			       bool autoneg_wait_to_complete)
833 {
834 	s32 status = IXGBE_SUCCESS;
835 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
836 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
837 	u32 start_autoc = autoc;
838 	u32 orig_autoc = 0;
839 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
840 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
841 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
842 	u32 links_reg;
843 	u32 i;
844 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
845 
846 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
847 
848 	/* Check to see if speed passed in is supported. */
849 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
850 	if (status != IXGBE_SUCCESS)
851 		goto out;
852 
853 	speed &= link_capabilities;
854 
855 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
856 		status = IXGBE_ERR_LINK_SETUP;
857 		goto out;
858 	}
859 
860 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
861 	if (hw->mac.orig_link_settings_stored)
862 		orig_autoc = hw->mac.orig_autoc;
863 	else
864 		orig_autoc = autoc;
865 
866 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
867 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
868 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
869 		/* Set KX4/KX/KR support according to speed requested */
870 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
871 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
872 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
873 				autoc |= IXGBE_AUTOC_KX4_SUPP;
874 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
875 			    (hw->phy.smart_speed_active == FALSE))
876 				autoc |= IXGBE_AUTOC_KR_SUPP;
877 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
878 			autoc |= IXGBE_AUTOC_KX_SUPP;
879 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
880 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
881 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
882 		/* Switch from 1G SFI to 10G SFI if requested */
883 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
884 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
885 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
886 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
887 		}
888 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
889 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
890 		/* Switch from 10G SFI to 1G SFI if requested */
891 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
892 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
893 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
894 			if (autoneg)
895 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
896 			else
897 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
898 		}
899 	}
900 
901 	if (autoc != start_autoc) {
902 		/* Restart link */
903 		autoc |= IXGBE_AUTOC_AN_RESTART;
904 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
905 
906 		/* Only poll for autoneg to complete if specified to do so */
907 		if (autoneg_wait_to_complete) {
908 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
909 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
910 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
911 				links_reg = 0; /*Just in case Autoneg time=0*/
912 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
913 					links_reg =
914 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
915 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
916 						break;
917 					msec_delay(100);
918 				}
919 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
920 					status =
921 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
922 					DEBUGOUT("Autoneg did not complete.\n");
923 				}
924 			}
925 		}
926 
927 		/* Add delay to filter out noises during initial link setup */
928 		msec_delay(50);
929 	}
930 
931 out:
932 	return status;
933 }
934 
935 /**
936  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
937  *  @hw: pointer to hardware structure
938  *  @speed: new link speed
939  *  @autoneg: TRUE if autonegotiation enabled
940  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
941  *
942  *  Restarts link on PHY and MAC based on settings passed in.
943  **/
944 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
945 					 ixgbe_link_speed speed,
946 					 bool autoneg,
947 					 bool autoneg_wait_to_complete)
948 {
949 	s32 status;
950 
951 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
952 
953 	/* Setup the PHY according to input speed */
954 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
955 					      autoneg_wait_to_complete);
956 	/* Set up MAC */
957 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
958 
959 	return status;
960 }
961 
962 /**
963  *  ixgbe_reset_hw_82599 - Perform hardware reset
964  *  @hw: pointer to hardware structure
965  *
966  *  Resets the hardware by resetting the transmit and receive units, masks
967  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
968  *  reset.
969  **/
970 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
971 {
972 	ixgbe_link_speed link_speed;
973 	s32 status;
974 	u32 ctrl, i, autoc, autoc2;
975 	bool link_up = FALSE;
976 
977 	DEBUGFUNC("ixgbe_reset_hw_82599");
978 
979 	/* Call adapter stop to disable tx/rx and clear interrupts */
980 	status = hw->mac.ops.stop_adapter(hw);
981 	if (status != IXGBE_SUCCESS)
982 		goto reset_hw_out;
983 
984 	/* flush pending Tx transactions */
985 	ixgbe_clear_tx_pending(hw);
986 
987 	/* PHY ops must be identified and initialized prior to reset */
988 
989 	/* Identify PHY and related function pointers */
990 	status = hw->phy.ops.init(hw);
991 
992 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
993 		goto reset_hw_out;
994 
995 	/* Setup SFP module if there is one present. */
996 	if (hw->phy.sfp_setup_needed) {
997 		status = hw->mac.ops.setup_sfp(hw);
998 		hw->phy.sfp_setup_needed = FALSE;
999 	}
1000 
1001 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1002 		goto reset_hw_out;
1003 
1004 	/* Reset PHY */
1005 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1006 		hw->phy.ops.reset(hw);
1007 
1008 mac_reset_top:
1009 	/*
1010 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
1011 	 * If link reset is used when link is up, it might reset the PHY when
1012 	 * mng is using it.  If link is down or the flag to force full link
1013 	 * reset is set, then perform link reset.
1014 	 */
1015 	ctrl = IXGBE_CTRL_LNK_RST;
1016 	if (!hw->force_full_reset) {
1017 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1018 		if (link_up)
1019 			ctrl = IXGBE_CTRL_RST;
1020 	}
1021 
1022 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1023 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1024 	IXGBE_WRITE_FLUSH(hw);
1025 
1026 	/* Poll for reset bit to self-clear indicating reset is complete */
1027 	for (i = 0; i < 10; i++) {
1028 		usec_delay(1);
1029 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1030 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
1031 			break;
1032 	}
1033 
1034 	if (ctrl & IXGBE_CTRL_RST_MASK) {
1035 		status = IXGBE_ERR_RESET_FAILED;
1036 		DEBUGOUT("Reset polling failed to complete.\n");
1037 	}
1038 
1039 	msec_delay(50);
1040 
1041 	/*
1042 	 * Double resets are required for recovery from certain error
1043 	 * conditions.  Between resets, it is necessary to stall to allow time
1044 	 * for any pending HW events to complete.
1045 	 */
1046 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1047 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1048 		goto mac_reset_top;
1049 	}
1050 
1051 	/*
1052 	 * Store the original AUTOC/AUTOC2 values if they have not been
1053 	 * stored off yet.  Otherwise restore the stored original
1054 	 * values since the reset operation sets back to defaults.
1055 	 */
1056 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1057 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1058 	if (hw->mac.orig_link_settings_stored == FALSE) {
1059 		hw->mac.orig_autoc = autoc;
1060 		hw->mac.orig_autoc2 = autoc2;
1061 		hw->mac.orig_link_settings_stored = TRUE;
1062 	} else {
1063 		if (autoc != hw->mac.orig_autoc)
1064 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
1065 					IXGBE_AUTOC_AN_RESTART));
1066 
1067 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1068 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1069 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1070 			autoc2 |= (hw->mac.orig_autoc2 &
1071 				   IXGBE_AUTOC2_UPPER_MASK);
1072 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1073 		}
1074 	}
1075 
1076 	/* Store the permanent mac address */
1077 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1078 
1079 	/*
1080 	 * Store MAC address from RAR0, clear receive address registers, and
1081 	 * clear the multicast table.  Also reset num_rar_entries to 128,
1082 	 * since we modify this value when programming the SAN MAC address.
1083 	 */
1084 	hw->mac.num_rar_entries = 128;
1085 	hw->mac.ops.init_rx_addrs(hw);
1086 
1087 	/* Store the permanent SAN mac address */
1088 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1089 
1090 	/* Add the SAN MAC address to the RAR only if it's a valid address */
1091 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1092 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1093 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
1094 
1095 		/* Save the SAN MAC RAR index */
1096 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1097 
1098 		/* Reserve the last RAR for the SAN MAC address */
1099 		hw->mac.num_rar_entries--;
1100 	}
1101 
1102 	/* Store the alternative WWNN/WWPN prefix */
1103 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1104 				   &hw->mac.wwpn_prefix);
1105 
1106 reset_hw_out:
1107 	return status;
1108 }
1109 
1110 /**
1111  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1112  *  @hw: pointer to hardware structure
1113  **/
1114 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1115 {
1116 	int i;
1117 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1118 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1119 
1120 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1121 
1122 	/*
1123 	 * Before starting reinitialization process,
1124 	 * FDIRCMD.CMD must be zero.
1125 	 */
1126 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1127 		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1128 		      IXGBE_FDIRCMD_CMD_MASK))
1129 			break;
1130 		usec_delay(10);
1131 	}
1132 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1133 		DEBUGOUT("Flow Director previous command isn't complete, "
1134 			 "aborting table re-initialization.\n");
1135 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1136 	}
1137 
1138 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1139 	IXGBE_WRITE_FLUSH(hw);
1140 	/*
1141 	 * 82599 adapters flow director init flow cannot be restarted,
1142 	 * Workaround 82599 silicon errata by performing the following steps
1143 	 * before re-writing the FDIRCTRL control register with the same value.
1144 	 * - write 1 to bit 8 of FDIRCMD register &
1145 	 * - write 0 to bit 8 of FDIRCMD register
1146 	 */
1147 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1148 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1149 			 IXGBE_FDIRCMD_CLEARHT));
1150 	IXGBE_WRITE_FLUSH(hw);
1151 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1152 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1153 			 ~IXGBE_FDIRCMD_CLEARHT));
1154 	IXGBE_WRITE_FLUSH(hw);
1155 	/*
1156 	 * Clear FDIR Hash register to clear any leftover hashes
1157 	 * waiting to be programmed.
1158 	 */
1159 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1160 	IXGBE_WRITE_FLUSH(hw);
1161 
1162 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1163 	IXGBE_WRITE_FLUSH(hw);
1164 
1165 	/* Poll init-done after we write FDIRCTRL register */
1166 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1167 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1168 				   IXGBE_FDIRCTRL_INIT_DONE)
1169 			break;
1170 		usec_delay(10);
1171 	}
1172 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1173 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1174 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1175 	}
1176 
1177 	/* Clear FDIR statistics registers (read to clear) */
1178 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1179 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1180 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1181 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1182 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1183 
1184 	return IXGBE_SUCCESS;
1185 }
1186 
1187 /**
1188  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1189  *  @hw: pointer to hardware structure
1190  *  @fdirctrl: value to write to flow director control register
1191  **/
1192 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1193 {
1194 	int i;
1195 
1196 	DEBUGFUNC("ixgbe_fdir_enable_82599");
1197 
1198 	/* Prime the keys for hashing */
1199 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1200 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1201 
1202 	/*
1203 	 * Poll init-done after we write the register.  Estimated times:
1204 	 *      10G: PBALLOC = 11b, timing is 60us
1205 	 *       1G: PBALLOC = 11b, timing is 600us
1206 	 *     100M: PBALLOC = 11b, timing is 6ms
1207 	 *
1208 	 *     Multiple these timings by 4 if under full Rx load
1209 	 *
1210 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1211 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1212 	 * this might not finish in our poll time, but we can live with that
1213 	 * for now.
1214 	 */
1215 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1216 	IXGBE_WRITE_FLUSH(hw);
1217 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1218 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1219 				   IXGBE_FDIRCTRL_INIT_DONE)
1220 			break;
1221 		msec_delay(1);
1222 	}
1223 
1224 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1225 		DEBUGOUT("Flow Director poll time exceeded!\n");
1226 }
1227 
1228 /**
1229  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1230  *  @hw: pointer to hardware structure
1231  *  @fdirctrl: value to write to flow director control register, initially
1232  *	     contains just the value of the Rx packet buffer allocation
1233  **/
1234 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1235 {
1236 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1237 
1238 	/*
1239 	 * Continue setup of fdirctrl register bits:
1240 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1241 	 *  Set the maximum length per hash bucket to 0xA filters
1242 	 *  Send interrupt when 64 filters are left
1243 	 */
1244 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1245 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1246 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1247 
1248 	/* write hashes and fdirctrl register, poll for completion */
1249 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1250 
1251 	return IXGBE_SUCCESS;
1252 }
1253 
1254 /**
1255  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1256  *  @hw: pointer to hardware structure
1257  *  @fdirctrl: value to write to flow director control register, initially
1258  *	     contains just the value of the Rx packet buffer allocation
1259  **/
1260 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1261 {
1262 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1263 
1264 	/*
1265 	 * Continue setup of fdirctrl register bits:
1266 	 *  Turn perfect match filtering on
1267 	 *  Report hash in RSS field of Rx wb descriptor
1268 	 *  Initialize the drop queue
1269 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1270 	 *  Set the maximum length per hash bucket to 0xA filters
1271 	 *  Send interrupt when 64 (0x4 * 16) filters are left
1272 	 */
1273 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1274 		    IXGBE_FDIRCTRL_REPORT_STATUS |
1275 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1276 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1277 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1278 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1279 
1280 	/* write hashes and fdirctrl register, poll for completion */
1281 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1282 
1283 	return IXGBE_SUCCESS;
1284 }
1285 
1286 /*
1287  * These defines allow us to quickly generate all of the necessary instructions
1288  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1289  * for values 0 through 15
1290  */
1291 #define IXGBE_ATR_COMMON_HASH_KEY \
1292 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1293 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1294 do { \
1295 	u32 n = (_n); \
1296 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1297 		common_hash ^= lo_hash_dword >> n; \
1298 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1299 		bucket_hash ^= lo_hash_dword >> n; \
1300 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1301 		sig_hash ^= lo_hash_dword << (16 - n); \
1302 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1303 		common_hash ^= hi_hash_dword >> n; \
1304 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1305 		bucket_hash ^= hi_hash_dword >> n; \
1306 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1307 		sig_hash ^= hi_hash_dword << (16 - n); \
1308 } while (0);
1309 
1310 /**
1311  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1312  *  @stream: input bitstream to compute the hash on
1313  *
1314  *  This function is almost identical to the function above but contains
1315  *  several optomizations such as unwinding all of the loops, letting the
1316  *  compiler work out all of the conditional ifs since the keys are static
1317  *  defines, and computing two keys at once since the hashed dword stream
1318  *  will be the same for both keys.
1319  **/
1320 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1321 				     union ixgbe_atr_hash_dword common)
1322 {
1323 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1324 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1325 
1326 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1327 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
1328 
1329 	/* generate common hash dword */
1330 	hi_hash_dword = IXGBE_NTOHL(common.dword);
1331 
1332 	/* low dword is word swapped version of common */
1333 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1334 
1335 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1336 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1337 
1338 	/* Process bits 0 and 16 */
1339 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1340 
1341 	/*
1342 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1343 	 * delay this because bit 0 of the stream should not be processed
1344 	 * so we do not add the vlan until after bit 0 was processed
1345 	 */
1346 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1347 
1348 	/* Process remaining 30 bit of the key */
1349 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1350 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1351 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1352 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1353 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1354 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1355 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1356 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1357 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1358 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1359 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1360 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1361 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1362 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1363 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1364 
1365 	/* combine common_hash result with signature and bucket hashes */
1366 	bucket_hash ^= common_hash;
1367 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1368 
1369 	sig_hash ^= common_hash << 16;
1370 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1371 
1372 	/* return completed signature hash */
1373 	return sig_hash ^ bucket_hash;
1374 }
1375 
1376 /**
1377  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1378  *  @hw: pointer to hardware structure
1379  *  @input: unique input dword
1380  *  @common: compressed common input dword
1381  *  @queue: queue index to direct traffic to
1382  **/
1383 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1384 					  union ixgbe_atr_hash_dword input,
1385 					  union ixgbe_atr_hash_dword common,
1386 					  u8 queue)
1387 {
1388 	u64  fdirhashcmd;
1389 	u32  fdircmd;
1390 
1391 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1392 
1393 	/*
1394 	 * Get the flow_type in order to program FDIRCMD properly
1395 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1396 	 */
1397 	switch (input.formatted.flow_type) {
1398 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1399 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1400 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1401 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
1402 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
1403 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1404 		break;
1405 	default:
1406 		DEBUGOUT(" Error on flow type input\n");
1407 		return IXGBE_ERR_CONFIG;
1408 	}
1409 
1410 	/* configure FDIRCMD register */
1411 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1412 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1413 	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1414 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1415 
1416 	/*
1417 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1418 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1419 	 */
1420 	fdirhashcmd = (u64)fdircmd << 32;
1421 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1422 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1423 
1424 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1425 
1426 	return IXGBE_SUCCESS;
1427 }
1428 
1429 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1430 do { \
1431 	u32 n = (_n); \
1432 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1433 		bucket_hash ^= lo_hash_dword >> n; \
1434 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1435 		bucket_hash ^= hi_hash_dword >> n; \
1436 } while (0);
1437 
1438 /**
1439  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1440  *  @atr_input: input bitstream to compute the hash on
1441  *  @input_mask: mask for the input bitstream
1442  *
1443  *  This function serves two main purposes.  First it applys the input_mask
1444  *  to the atr_input resulting in a cleaned up atr_input data stream.
1445  *  Secondly it computes the hash and stores it in the bkt_hash field at
1446  *  the end of the input byte stream.  This way it will be available for
1447  *  future use without needing to recompute the hash.
1448  **/
1449 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1450 					  union ixgbe_atr_input *input_mask)
1451 {
1452 
1453 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1454 	u32 bucket_hash = 0;
1455 
1456 	/* Apply masks to input data */
1457 	input->dword_stream[0]  &= input_mask->dword_stream[0];
1458 	input->dword_stream[1]  &= input_mask->dword_stream[1];
1459 	input->dword_stream[2]  &= input_mask->dword_stream[2];
1460 	input->dword_stream[3]  &= input_mask->dword_stream[3];
1461 	input->dword_stream[4]  &= input_mask->dword_stream[4];
1462 	input->dword_stream[5]  &= input_mask->dword_stream[5];
1463 	input->dword_stream[6]  &= input_mask->dword_stream[6];
1464 	input->dword_stream[7]  &= input_mask->dword_stream[7];
1465 	input->dword_stream[8]  &= input_mask->dword_stream[8];
1466 	input->dword_stream[9]  &= input_mask->dword_stream[9];
1467 	input->dword_stream[10] &= input_mask->dword_stream[10];
1468 
1469 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1470 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1471 
1472 	/* generate common hash dword */
1473 	hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1474 				    input->dword_stream[2] ^
1475 				    input->dword_stream[3] ^
1476 				    input->dword_stream[4] ^
1477 				    input->dword_stream[5] ^
1478 				    input->dword_stream[6] ^
1479 				    input->dword_stream[7] ^
1480 				    input->dword_stream[8] ^
1481 				    input->dword_stream[9] ^
1482 				    input->dword_stream[10]);
1483 
1484 	/* low dword is word swapped version of common */
1485 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1486 
1487 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1488 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1489 
1490 	/* Process bits 0 and 16 */
1491 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1492 
1493 	/*
1494 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1495 	 * delay this because bit 0 of the stream should not be processed
1496 	 * so we do not add the vlan until after bit 0 was processed
1497 	 */
1498 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1499 
1500 	/* Process remaining 30 bit of the key */
1501 	IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1502 	IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1503 	IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1504 	IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1505 	IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1506 	IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1507 	IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1508 	IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1509 	IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1510 	IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1511 	IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1512 	IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1513 	IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1514 	IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1515 	IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1516 
1517 	/*
1518 	 * Limit hash to 13 bits since max bucket count is 8K.
1519 	 * Store result at the end of the input stream.
1520 	 */
1521 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1522 }
1523 
1524 /**
1525  *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1526  *  @input_mask: mask to be bit swapped
1527  *
1528  *  The source and destination port masks for flow director are bit swapped
1529  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
1530  *  generate a correctly swapped value we need to bit swap the mask and that
1531  *  is what is accomplished by this function.
1532  **/
1533 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1534 {
1535 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1536 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1537 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1538 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1539 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1540 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1541 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1542 }
1543 
1544 /*
1545  * These two macros are meant to address the fact that we have registers
1546  * that are either all or in part big-endian.  As a result on big-endian
1547  * systems we will end up byte swapping the value to little-endian before
1548  * it is byte swapped again and written to the hardware in the original
1549  * big-endian format.
1550  */
1551 #define IXGBE_STORE_AS_BE32(_value) \
1552 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1553 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1554 
1555 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1556 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1557 
1558 #define IXGBE_STORE_AS_BE16(_value) \
1559 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1560 
1561 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1562 				    union ixgbe_atr_input *input_mask)
1563 {
1564 	/* mask IPv6 since it is currently not supported */
1565 	u32 fdirm = IXGBE_FDIRM_DIPv6;
1566 	u32 fdirtcpm;
1567 
1568 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1569 
1570 	/*
1571 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1572 	 * are zero, then assume a full mask for that field.  Also assume that
1573 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1574 	 * cannot be masked out in this implementation.
1575 	 *
1576 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1577 	 * point in time.
1578 	 */
1579 
1580 	/* verify bucket hash is cleared on hash generation */
1581 	if (input_mask->formatted.bkt_hash)
1582 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
1583 
1584 	/* Program FDIRM and verify partial masks */
1585 	switch (input_mask->formatted.vm_pool & 0x7F) {
1586 	case 0x0:
1587 		fdirm |= IXGBE_FDIRM_POOL;
1588 	case 0x7F:
1589 		break;
1590 	default:
1591 		DEBUGOUT(" Error on vm pool mask\n");
1592 		return IXGBE_ERR_CONFIG;
1593 	}
1594 
1595 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1596 	case 0x0:
1597 		fdirm |= IXGBE_FDIRM_L4P;
1598 		if (input_mask->formatted.dst_port ||
1599 		    input_mask->formatted.src_port) {
1600 			DEBUGOUT(" Error on src/dst port mask\n");
1601 			return IXGBE_ERR_CONFIG;
1602 		}
1603 	case IXGBE_ATR_L4TYPE_MASK:
1604 		break;
1605 	default:
1606 		DEBUGOUT(" Error on flow type mask\n");
1607 		return IXGBE_ERR_CONFIG;
1608 	}
1609 
1610 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1611 	case 0x0000:
1612 		/* mask VLAN ID, fall through to mask VLAN priority */
1613 		fdirm |= IXGBE_FDIRM_VLANID;
1614 	case 0x0FFF:
1615 		/* mask VLAN priority */
1616 		fdirm |= IXGBE_FDIRM_VLANP;
1617 		break;
1618 	case 0xE000:
1619 		/* mask VLAN ID only, fall through */
1620 		fdirm |= IXGBE_FDIRM_VLANID;
1621 	case 0xEFFF:
1622 		/* no VLAN fields masked */
1623 		break;
1624 	default:
1625 		DEBUGOUT(" Error on VLAN mask\n");
1626 		return IXGBE_ERR_CONFIG;
1627 	}
1628 
1629 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1630 	case 0x0000:
1631 		/* Mask Flex Bytes, fall through */
1632 		fdirm |= IXGBE_FDIRM_FLEX;
1633 	case 0xFFFF:
1634 		break;
1635 	default:
1636 		DEBUGOUT(" Error on flexible byte mask\n");
1637 		return IXGBE_ERR_CONFIG;
1638 	}
1639 
1640 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1641 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1642 
1643 	/* store the TCP/UDP port masks, bit reversed from port layout */
1644 	fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1645 
1646 	/* write both the same so that UDP and TCP use the same mask */
1647 	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1648 	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1649 
1650 	/* store source and destination IP masks (big-enian) */
1651 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1652 			     ~input_mask->formatted.src_ip[0]);
1653 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1654 			     ~input_mask->formatted.dst_ip[0]);
1655 
1656 	return IXGBE_SUCCESS;
1657 }
1658 
1659 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1660 					  union ixgbe_atr_input *input,
1661 					  u16 soft_id, u8 queue)
1662 {
1663 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
1664 
1665 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1666 
1667 	/* currently IPv6 is not supported, must be programmed with 0 */
1668 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1669 			     input->formatted.src_ip[0]);
1670 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1671 			     input->formatted.src_ip[1]);
1672 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1673 			     input->formatted.src_ip[2]);
1674 
1675 	/* record the source address (big-endian) */
1676 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1677 
1678 	/* record the first 32 bits of the destination address (big-endian) */
1679 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1680 
1681 	/* record source and destination port (little-endian)*/
1682 	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1683 	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1684 	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1685 	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1686 
1687 	/* record vlan (little-endian) and flex_bytes(big-endian) */
1688 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1689 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1690 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1691 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1692 
1693 	/* configure FDIRHASH register */
1694 	fdirhash = input->formatted.bkt_hash;
1695 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1696 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1697 
1698 	/*
1699 	 * flush all previous writes to make certain registers are
1700 	 * programmed prior to issuing the command
1701 	 */
1702 	IXGBE_WRITE_FLUSH(hw);
1703 
1704 	/* configure FDIRCMD register */
1705 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1706 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1707 	if (queue == IXGBE_FDIR_DROP_QUEUE)
1708 		fdircmd |= IXGBE_FDIRCMD_DROP;
1709 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1710 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1711 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1712 
1713 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1714 
1715 	return IXGBE_SUCCESS;
1716 }
1717 
1718 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1719 					  union ixgbe_atr_input *input,
1720 					  u16 soft_id)
1721 {
1722 	u32 fdirhash;
1723 	u32 fdircmd = 0;
1724 	u32 retry_count;
1725 	s32 err = IXGBE_SUCCESS;
1726 
1727 	/* configure FDIRHASH register */
1728 	fdirhash = input->formatted.bkt_hash;
1729 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1730 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1731 
1732 	/* flush hash to HW */
1733 	IXGBE_WRITE_FLUSH(hw);
1734 
1735 	/* Query if filter is present */
1736 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1737 
1738 	for (retry_count = 10; retry_count; retry_count--) {
1739 		/* allow 10us for query to process */
1740 		usec_delay(10);
1741 		/* verify query completed successfully */
1742 		fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1743 		if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1744 			break;
1745 	}
1746 
1747 	if (!retry_count)
1748 		err = IXGBE_ERR_FDIR_REINIT_FAILED;
1749 
1750 	/* if filter exists in hardware then remove it */
1751 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1752 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1753 		IXGBE_WRITE_FLUSH(hw);
1754 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1755 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1756 	}
1757 
1758 	return err;
1759 }
1760 
1761 /**
1762  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1763  *  @hw: pointer to hardware structure
1764  *  @input: input bitstream
1765  *  @input_mask: mask for the input bitstream
1766  *  @soft_id: software index for the filters
1767  *  @queue: queue index to direct traffic to
1768  *
1769  *  Note that the caller to this function must lock before calling, since the
1770  *  hardware writes must be protected from one another.
1771  **/
1772 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1773 					union ixgbe_atr_input *input,
1774 					union ixgbe_atr_input *input_mask,
1775 					u16 soft_id, u8 queue)
1776 {
1777 	s32 err = IXGBE_ERR_CONFIG;
1778 
1779 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1780 
1781 	/*
1782 	 * Check flow_type formatting, and bail out before we touch the hardware
1783 	 * if there's a configuration issue
1784 	 */
1785 	switch (input->formatted.flow_type) {
1786 	case IXGBE_ATR_FLOW_TYPE_IPV4:
1787 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1788 		if (input->formatted.dst_port || input->formatted.src_port) {
1789 			DEBUGOUT(" Error on src/dst port\n");
1790 			return IXGBE_ERR_CONFIG;
1791 		}
1792 		break;
1793 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1794 		if (input->formatted.dst_port || input->formatted.src_port) {
1795 			DEBUGOUT(" Error on src/dst port\n");
1796 			return IXGBE_ERR_CONFIG;
1797 		}
1798 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1799 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1800 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1801 						  IXGBE_ATR_L4TYPE_MASK;
1802 		break;
1803 	default:
1804 		DEBUGOUT(" Error on flow type input\n");
1805 		return err;
1806 	}
1807 
1808 	/* program input mask into the HW */
1809 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
1810 	if (err)
1811 		return err;
1812 
1813 	/* apply mask and compute/store hash */
1814 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
1815 
1816 	/* program filters to filter memory */
1817 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
1818 						     soft_id, queue);
1819 }
1820 
1821 /**
1822  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1823  *  @hw: pointer to hardware structure
1824  *  @reg: analog register to read
1825  *  @val: read value
1826  *
1827  *  Performs read operation to Omer analog register specified.
1828  **/
1829 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1830 {
1831 	u32  core_ctl;
1832 
1833 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
1834 
1835 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1836 			(reg << 8));
1837 	IXGBE_WRITE_FLUSH(hw);
1838 	usec_delay(10);
1839 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
1840 	*val = (u8)core_ctl;
1841 
1842 	return IXGBE_SUCCESS;
1843 }
1844 
1845 /**
1846  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
1847  *  @hw: pointer to hardware structure
1848  *  @reg: atlas register to write
1849  *  @val: value to write
1850  *
1851  *  Performs write operation to Omer analog register specified.
1852  **/
1853 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
1854 {
1855 	u32  core_ctl;
1856 
1857 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
1858 
1859 	core_ctl = (reg << 8) | val;
1860 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
1861 	IXGBE_WRITE_FLUSH(hw);
1862 	usec_delay(10);
1863 
1864 	return IXGBE_SUCCESS;
1865 }
1866 
1867 /**
1868  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
1869  *  @hw: pointer to hardware structure
1870  *
1871  *  Starts the hardware using the generic start_hw function
1872  *  and the generation start_hw function.
1873  *  Then performs revision-specific operations, if any.
1874  **/
1875 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1876 {
1877 	s32 ret_val = IXGBE_SUCCESS;
1878 
1879 	DEBUGFUNC("ixgbe_start_hw_82599");
1880 
1881 	ret_val = ixgbe_start_hw_generic(hw);
1882 	if (ret_val != IXGBE_SUCCESS)
1883 		goto out;
1884 
1885 	ret_val = ixgbe_start_hw_gen2(hw);
1886 	if (ret_val != IXGBE_SUCCESS)
1887 		goto out;
1888 
1889 	/* We need to run link autotry after the driver loads */
1890 	hw->mac.autotry_restart = TRUE;
1891 
1892 	if (ret_val == IXGBE_SUCCESS)
1893 		ret_val = ixgbe_verify_fw_version_82599(hw);
1894 out:
1895 	return ret_val;
1896 }
1897 
1898 /**
1899  *  ixgbe_identify_phy_82599 - Get physical layer module
1900  *  @hw: pointer to hardware structure
1901  *
1902  *  Determines the physical layer module found on the current adapter.
1903  *  If PHY already detected, maintains current PHY type in hw struct,
1904  *  otherwise executes the PHY detection routine.
1905  **/
1906 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1907 {
1908 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1909 
1910 	DEBUGFUNC("ixgbe_identify_phy_82599");
1911 
1912 	/* Detect PHY if not unknown - returns success if already detected. */
1913 	status = ixgbe_identify_phy_generic(hw);
1914 	if (status != IXGBE_SUCCESS) {
1915 		/* 82599 10GBASE-T requires an external PHY */
1916 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1917 			goto out;
1918 		else
1919 			status = ixgbe_identify_module_generic(hw);
1920 	}
1921 
1922 	/* Set PHY type none if no PHY detected */
1923 	if (hw->phy.type == ixgbe_phy_unknown) {
1924 		hw->phy.type = ixgbe_phy_none;
1925 		status = IXGBE_SUCCESS;
1926 	}
1927 
1928 	/* Return error if SFP module has been detected but is not supported */
1929 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1930 		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1931 
1932 out:
1933 	return status;
1934 }
1935 
1936 /**
1937  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
1938  *  @hw: pointer to hardware structure
1939  *
1940  *  Determines physical layer capabilities of the current configuration.
1941  **/
1942 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1943 {
1944 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1945 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1946 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1947 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1948 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1949 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1950 	u16 ext_ability = 0;
1951 	u8 comp_codes_10g = 0;
1952 	u8 comp_codes_1g = 0;
1953 
1954 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
1955 
1956 	hw->phy.ops.identify(hw);
1957 
1958 	switch (hw->phy.type) {
1959 	case ixgbe_phy_tn:
1960 	case ixgbe_phy_cu_unknown:
1961 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1962 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1963 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1964 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1965 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1966 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1967 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1968 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1969 		goto out;
1970 	default:
1971 		break;
1972 	}
1973 
1974 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1975 	case IXGBE_AUTOC_LMS_1G_AN:
1976 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1977 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
1978 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
1979 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1980 			goto out;
1981 		} else
1982 			/* SFI mode so read SFP module */
1983 			goto sfp_check;
1984 		break;
1985 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1986 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
1987 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1988 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
1989 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1990 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
1991 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
1992 		goto out;
1993 		break;
1994 	case IXGBE_AUTOC_LMS_10G_SERIAL:
1995 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
1996 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
1997 			goto out;
1998 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
1999 			goto sfp_check;
2000 		break;
2001 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
2002 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2003 		if (autoc & IXGBE_AUTOC_KX_SUPP)
2004 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2005 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
2006 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2007 		if (autoc & IXGBE_AUTOC_KR_SUPP)
2008 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2009 		goto out;
2010 		break;
2011 	default:
2012 		goto out;
2013 		break;
2014 	}
2015 
2016 sfp_check:
2017 	/* SFP check must be done last since DA modules are sometimes used to
2018 	 * test KR mode -  we need to id KR mode correctly before SFP module.
2019 	 * Call identify_sfp because the pluggable module may have changed */
2020 	hw->phy.ops.identify_sfp(hw);
2021 	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2022 		goto out;
2023 
2024 	switch (hw->phy.type) {
2025 	case ixgbe_phy_sfp_passive_tyco:
2026 	case ixgbe_phy_sfp_passive_unknown:
2027 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2028 		break;
2029 	case ixgbe_phy_sfp_ftl_active:
2030 	case ixgbe_phy_sfp_active_unknown:
2031 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2032 		break;
2033 	case ixgbe_phy_sfp_avago:
2034 	case ixgbe_phy_sfp_ftl:
2035 	case ixgbe_phy_sfp_intel:
2036 	case ixgbe_phy_sfp_unknown:
2037 		hw->phy.ops.read_i2c_eeprom(hw,
2038 		      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2039 		hw->phy.ops.read_i2c_eeprom(hw,
2040 		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2041 		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2042 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2043 		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2044 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2045 		else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2046 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2047 		else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
2048 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2049 		break;
2050 	default:
2051 		break;
2052 	}
2053 
2054 out:
2055 	return physical_layer;
2056 }
2057 
2058 /**
2059  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2060  *  @hw: pointer to hardware structure
2061  *  @regval: register value to write to RXCTRL
2062  *
2063  *  Enables the Rx DMA unit for 82599
2064  **/
2065 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2066 {
2067 
2068 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2069 
2070 	/*
2071 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2072 	 * If traffic is incoming before we enable the Rx unit, it could hang
2073 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2074 	 * completely disabled prior to enabling the Rx unit.
2075 	 */
2076 
2077 	hw->mac.ops.disable_sec_rx_path(hw);
2078 
2079 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2080 
2081 	hw->mac.ops.enable_sec_rx_path(hw);
2082 
2083 	return IXGBE_SUCCESS;
2084 }
2085 
2086 /**
2087  *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
2088  *  @hw: pointer to hardware structure
2089  *
2090  *  Verifies that installed the firmware version is 0.6 or higher
2091  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2092  *
2093  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2094  *  if the FW version is not supported.
2095  **/
2096 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2097 {
2098 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2099 	u16 fw_offset, fw_ptp_cfg_offset;
2100 	u16 fw_version = 0;
2101 
2102 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
2103 
2104 	/* firmware check is only necessary for SFI devices */
2105 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
2106 		status = IXGBE_SUCCESS;
2107 		goto fw_version_out;
2108 	}
2109 
2110 	/* get the offset to the Firmware Module block */
2111 	hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2112 
2113 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2114 		goto fw_version_out;
2115 
2116 	/* get the offset to the Pass Through Patch Configuration block */
2117 	hw->eeprom.ops.read(hw, (fw_offset +
2118 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2119 				 &fw_ptp_cfg_offset);
2120 
2121 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2122 		goto fw_version_out;
2123 
2124 	/* get the firmware version */
2125 	hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2126 			    IXGBE_FW_PATCH_VERSION_4), &fw_version);
2127 
2128 	if (fw_version > 0x5)
2129 		status = IXGBE_SUCCESS;
2130 
2131 fw_version_out:
2132 	return status;
2133 }
2134 
2135 /**
2136  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2137  *  @hw: pointer to hardware structure
2138  *
2139  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
2140  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2141  **/
2142 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2143 {
2144 	bool lesm_enabled = FALSE;
2145 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2146 	s32 status;
2147 
2148 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2149 
2150 	/* get the offset to the Firmware Module block */
2151 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2152 
2153 	if ((status != IXGBE_SUCCESS) ||
2154 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
2155 		goto out;
2156 
2157 	/* get the offset to the LESM Parameters block */
2158 	status = hw->eeprom.ops.read(hw, (fw_offset +
2159 				     IXGBE_FW_LESM_PARAMETERS_PTR),
2160 				     &fw_lesm_param_offset);
2161 
2162 	if ((status != IXGBE_SUCCESS) ||
2163 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2164 		goto out;
2165 
2166 	/* get the lesm state word */
2167 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2168 				     IXGBE_FW_LESM_STATE_1),
2169 				     &fw_lesm_state);
2170 
2171 	if ((status == IXGBE_SUCCESS) &&
2172 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2173 		lesm_enabled = TRUE;
2174 
2175 out:
2176 	return lesm_enabled;
2177 }
2178 
2179 /**
2180  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2181  *  fastest available method
2182  *
2183  *  @hw: pointer to hardware structure
2184  *  @offset: offset of  word in EEPROM to read
2185  *  @words: number of words
2186  *  @data: word(s) read from the EEPROM
2187  *
2188  *  Retrieves 16 bit word(s) read from EEPROM
2189  **/
2190 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2191 					  u16 words, u16 *data)
2192 {
2193 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2194 	s32 ret_val = IXGBE_ERR_CONFIG;
2195 
2196 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2197 
2198 	/*
2199 	 * If EEPROM is detected and can be addressed using 14 bits,
2200 	 * use EERD otherwise use bit bang
2201 	 */
2202 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2203 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2204 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2205 							 data);
2206 	else
2207 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2208 								    words,
2209 								    data);
2210 
2211 	return ret_val;
2212 }
2213 
2214 /**
2215  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
2216  *  fastest available method
2217  *
2218  *  @hw: pointer to hardware structure
2219  *  @offset: offset of  word in the EEPROM to read
2220  *  @data: word read from the EEPROM
2221  *
2222  *  Reads a 16 bit word from the EEPROM
2223  **/
2224 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2225 				   u16 offset, u16 *data)
2226 {
2227 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2228 	s32 ret_val = IXGBE_ERR_CONFIG;
2229 
2230 	DEBUGFUNC("ixgbe_read_eeprom_82599");
2231 
2232 	/*
2233 	 * If EEPROM is detected and can be addressed using 14 bits,
2234 	 * use EERD otherwise use bit bang
2235 	 */
2236 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2237 	    (offset <= IXGBE_EERD_MAX_ADDR))
2238 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2239 	else
2240 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2241 
2242 	return ret_val;
2243 }
2244 
2245 
2246