xref: /freebsd/sys/dev/ixgbe/ixgbe_82599.c (revision 55bce0c1203e70d8b62a3dedc9235ab39660c6f4)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2013, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_82599.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 
41 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
42 					 ixgbe_link_speed speed,
43 					 bool autoneg_wait_to_complete);
44 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
45 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
46 				   u16 offset, u16 *data);
47 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
48 					  u16 words, u16 *data);
49 
50 static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
51 {
52 	u32 fwsm, manc, factps;
53 
54 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
55 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
56 		return FALSE;
57 
58 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
59 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
60 		return FALSE;
61 
62 	factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
63 	if (factps & IXGBE_FACTPS_MNGCG)
64 		return FALSE;
65 
66 	return TRUE;
67 }
68 
69 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
70 {
71 	struct ixgbe_mac_info *mac = &hw->mac;
72 
73 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
74 
75 	/*
76 	 * enable the laser control functions for SFP+ fiber
77 	 * and MNG not enabled
78 	 */
79 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
80 	    !(ixgbe_mng_enabled(hw))) {
81 		mac->ops.disable_tx_laser =
82 				       &ixgbe_disable_tx_laser_multispeed_fiber;
83 		mac->ops.enable_tx_laser =
84 					&ixgbe_enable_tx_laser_multispeed_fiber;
85 		mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
86 
87 	} else {
88 		mac->ops.disable_tx_laser = NULL;
89 		mac->ops.enable_tx_laser = NULL;
90 		mac->ops.flap_tx_laser = NULL;
91 	}
92 
93 	if (hw->phy.multispeed_fiber) {
94 		/* Set up dual speed SFP+ support */
95 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
96 	} else {
97 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
98 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
99 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
100 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
101 			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
102 		} else {
103 			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
104 		}
105 	}
106 }
107 
108 /**
109  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
110  *  @hw: pointer to hardware structure
111  *
112  *  Initialize any function pointers that were not able to be
113  *  set during init_shared_code because the PHY/SFP type was
114  *  not known.  Perform the SFP init if necessary.
115  *
116  **/
117 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
118 {
119 	struct ixgbe_mac_info *mac = &hw->mac;
120 	struct ixgbe_phy_info *phy = &hw->phy;
121 	s32 ret_val = IXGBE_SUCCESS;
122 
123 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
124 
125 	/* Identify the PHY or SFP module */
126 	ret_val = phy->ops.identify(hw);
127 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
128 		goto init_phy_ops_out;
129 
130 	/* Setup function pointers based on detected SFP module and speeds */
131 	ixgbe_init_mac_link_ops_82599(hw);
132 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
133 		hw->phy.ops.reset = NULL;
134 
135 	/* If copper media, overwrite with copper function pointers */
136 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
137 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
138 		mac->ops.get_link_capabilities =
139 				  &ixgbe_get_copper_link_capabilities_generic;
140 	}
141 
142 	/* Set necessary function pointers based on phy type */
143 	switch (hw->phy.type) {
144 	case ixgbe_phy_tn:
145 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
146 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
147 		phy->ops.get_firmware_version =
148 			     &ixgbe_get_phy_firmware_version_tnx;
149 		break;
150 	default:
151 		break;
152 	}
153 init_phy_ops_out:
154 	return ret_val;
155 }
156 
157 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
158 {
159 	s32 ret_val = IXGBE_SUCCESS;
160 	u16 list_offset, data_offset, data_value;
161 	bool got_lock = FALSE;
162 
163 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
164 
165 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
166 		ixgbe_init_mac_link_ops_82599(hw);
167 
168 		hw->phy.ops.reset = NULL;
169 
170 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
171 							      &data_offset);
172 		if (ret_val != IXGBE_SUCCESS)
173 			goto setup_sfp_out;
174 
175 		/* PHY config will finish before releasing the semaphore */
176 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
177 							IXGBE_GSSR_MAC_CSR_SM);
178 		if (ret_val != IXGBE_SUCCESS) {
179 			ret_val = IXGBE_ERR_SWFW_SYNC;
180 			goto setup_sfp_out;
181 		}
182 
183 		hw->eeprom.ops.read(hw, ++data_offset, &data_value);
184 		while (data_value != 0xffff) {
185 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
186 			IXGBE_WRITE_FLUSH(hw);
187 			hw->eeprom.ops.read(hw, ++data_offset, &data_value);
188 		}
189 
190 		/* Release the semaphore */
191 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
192 		/* Delay obtaining semaphore again to allow FW access */
193 		msec_delay(hw->eeprom.semaphore_delay);
194 
195 		/* Need SW/FW semaphore around AUTOC writes if LESM on,
196 		 * likewise reset_pipeline requires lock as it also writes
197 		 * AUTOC.
198 		 */
199 		if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
200 			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
201 							IXGBE_GSSR_MAC_CSR_SM);
202 			if (ret_val != IXGBE_SUCCESS) {
203 				ret_val = IXGBE_ERR_SWFW_SYNC;
204 				goto setup_sfp_out;
205 			}
206 
207 			got_lock = TRUE;
208 		}
209 
210 		/* Restart DSP and set SFI mode */
211 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
212 				IXGBE_AUTOC_LMS_10G_SERIAL));
213 		hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
214 		ret_val = ixgbe_reset_pipeline_82599(hw);
215 
216 		if (got_lock) {
217 			hw->mac.ops.release_swfw_sync(hw,
218 						      IXGBE_GSSR_MAC_CSR_SM);
219 			got_lock = FALSE;
220 		}
221 
222 		if (ret_val) {
223 			DEBUGOUT("sfp module setup not complete\n");
224 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
225 			goto setup_sfp_out;
226 		}
227 
228 	}
229 
230 setup_sfp_out:
231 	return ret_val;
232 }
233 
234 /**
235  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
236  *  @hw: pointer to hardware structure
237  *
238  *  Initialize the function pointers and assign the MAC type for 82599.
239  *  Does not touch the hardware.
240  **/
241 
242 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
243 {
244 	struct ixgbe_mac_info *mac = &hw->mac;
245 	struct ixgbe_phy_info *phy = &hw->phy;
246 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
247 	s32 ret_val;
248 
249 	DEBUGFUNC("ixgbe_init_ops_82599");
250 
251 	ixgbe_init_phy_ops_generic(hw);
252 	ret_val = ixgbe_init_ops_generic(hw);
253 
254 	/* PHY */
255 	phy->ops.identify = &ixgbe_identify_phy_82599;
256 	phy->ops.init = &ixgbe_init_phy_ops_82599;
257 
258 	/* MAC */
259 	mac->ops.reset_hw = &ixgbe_reset_hw_82599;
260 	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
261 	mac->ops.get_media_type = &ixgbe_get_media_type_82599;
262 	mac->ops.get_supported_physical_layer =
263 				    &ixgbe_get_supported_physical_layer_82599;
264 	mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
265 	mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
266 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
267 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
268 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
269 	mac->ops.start_hw = &ixgbe_start_hw_82599;
270 	mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
271 	mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
272 	mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
273 	mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
274 	mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
275 
276 	/* RAR, Multicast, VLAN */
277 	mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
278 	mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
279 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
280 	mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
281 	mac->rar_highwater = 1;
282 	mac->ops.set_vfta = &ixgbe_set_vfta_generic;
283 	mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
284 	mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
285 	mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
286 	mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
287 	mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
288 	mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
289 
290 	/* Link */
291 	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
292 	mac->ops.check_link = &ixgbe_check_mac_link_generic;
293 	mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
294 	ixgbe_init_mac_link_ops_82599(hw);
295 
296 	mac->mcft_size		= 128;
297 	mac->vft_size		= 128;
298 	mac->num_rar_entries	= 128;
299 	mac->rx_pb_size		= 512;
300 	mac->max_tx_queues	= 128;
301 	mac->max_rx_queues	= 128;
302 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
303 
304 	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
305 				   IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
306 
307 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
308 
309 	/* EEPROM */
310 	eeprom->ops.read = &ixgbe_read_eeprom_82599;
311 	eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
312 
313 	/* Manageability interface */
314 	mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
315 
316 
317 	return ret_val;
318 }
319 
320 /**
321  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
322  *  @hw: pointer to hardware structure
323  *  @speed: pointer to link speed
324  *  @autoneg: TRUE when autoneg or autotry is enabled
325  *
326  *  Determines the link capabilities by reading the AUTOC register.
327  **/
328 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
329 				      ixgbe_link_speed *speed,
330 				      bool *autoneg)
331 {
332 	s32 status = IXGBE_SUCCESS;
333 	u32 autoc = 0;
334 
335 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
336 
337 
338 	/* Check if 1G SFP module. */
339 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
340 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
341 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
342 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
343 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
344 		*autoneg = TRUE;
345 		goto out;
346 	}
347 
348 	/*
349 	 * Determine link capabilities based on the stored value of AUTOC,
350 	 * which represents EEPROM defaults.  If AUTOC value has not
351 	 * been stored, use the current register values.
352 	 */
353 	if (hw->mac.orig_link_settings_stored)
354 		autoc = hw->mac.orig_autoc;
355 	else
356 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
357 
358 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
359 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
360 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
361 		*autoneg = FALSE;
362 		break;
363 
364 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
365 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
366 		*autoneg = FALSE;
367 		break;
368 
369 	case IXGBE_AUTOC_LMS_1G_AN:
370 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
371 		*autoneg = TRUE;
372 		break;
373 
374 	case IXGBE_AUTOC_LMS_10G_SERIAL:
375 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
376 		*autoneg = FALSE;
377 		break;
378 
379 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
380 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
381 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
382 		if (autoc & IXGBE_AUTOC_KR_SUPP)
383 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
384 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
385 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
386 		if (autoc & IXGBE_AUTOC_KX_SUPP)
387 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
388 		*autoneg = TRUE;
389 		break;
390 
391 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
392 		*speed = IXGBE_LINK_SPEED_100_FULL;
393 		if (autoc & IXGBE_AUTOC_KR_SUPP)
394 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
395 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
396 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
397 		if (autoc & IXGBE_AUTOC_KX_SUPP)
398 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
399 		*autoneg = TRUE;
400 		break;
401 
402 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
403 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
404 		*autoneg = FALSE;
405 		break;
406 
407 	default:
408 		status = IXGBE_ERR_LINK_SETUP;
409 		goto out;
410 		break;
411 	}
412 
413 	if (hw->phy.multispeed_fiber) {
414 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
415 			  IXGBE_LINK_SPEED_1GB_FULL;
416 		*autoneg = TRUE;
417 	}
418 
419 out:
420 	return status;
421 }
422 
423 /**
424  *  ixgbe_get_media_type_82599 - Get media type
425  *  @hw: pointer to hardware structure
426  *
427  *  Returns the media type (fiber, copper, backplane)
428  **/
429 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
430 {
431 	enum ixgbe_media_type media_type;
432 
433 	DEBUGFUNC("ixgbe_get_media_type_82599");
434 
435 	/* Detect if there is a copper PHY attached. */
436 	switch (hw->phy.type) {
437 	case ixgbe_phy_cu_unknown:
438 	case ixgbe_phy_tn:
439 		media_type = ixgbe_media_type_copper;
440 		goto out;
441 	default:
442 		break;
443 	}
444 
445 	switch (hw->device_id) {
446 	case IXGBE_DEV_ID_82599_KX4:
447 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
448 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
449 	case IXGBE_DEV_ID_82599_KR:
450 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
451 	case IXGBE_DEV_ID_82599_XAUI_LOM:
452 		/* Default device ID is mezzanine card KX/KX4 */
453 		media_type = ixgbe_media_type_backplane;
454 		break;
455 	case IXGBE_DEV_ID_82599_SFP:
456 	case IXGBE_DEV_ID_82599_SFP_FCOE:
457 	case IXGBE_DEV_ID_82599_SFP_EM:
458 	case IXGBE_DEV_ID_82599_SFP_SF2:
459 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
460 	case IXGBE_DEV_ID_82599EN_SFP:
461 		media_type = ixgbe_media_type_fiber;
462 		break;
463 	case IXGBE_DEV_ID_82599_CX4:
464 		media_type = ixgbe_media_type_cx4;
465 		break;
466 	case IXGBE_DEV_ID_82599_T3_LOM:
467 		media_type = ixgbe_media_type_copper;
468 		break;
469 	case IXGBE_DEV_ID_82599_BYPASS:
470 		media_type = ixgbe_media_type_fiber_fixed;
471 		hw->phy.multispeed_fiber = TRUE;
472 		break;
473 	default:
474 		media_type = ixgbe_media_type_unknown;
475 		break;
476 	}
477 out:
478 	return media_type;
479 }
480 
481 /**
482  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
483  *  @hw: pointer to hardware structure
484  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
485  *
486  *  Configures link settings based on values in the ixgbe_hw struct.
487  *  Restarts the link.  Performs autonegotiation if needed.
488  **/
489 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
490 			       bool autoneg_wait_to_complete)
491 {
492 	u32 autoc_reg;
493 	u32 links_reg;
494 	u32 i;
495 	s32 status = IXGBE_SUCCESS;
496 	bool got_lock = FALSE;
497 
498 	DEBUGFUNC("ixgbe_start_mac_link_82599");
499 
500 
501 	/*  reset_pipeline requires us to hold this lock as it writes to
502 	 *  AUTOC.
503 	 */
504 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
505 		status = hw->mac.ops.acquire_swfw_sync(hw,
506 						       IXGBE_GSSR_MAC_CSR_SM);
507 		if (status != IXGBE_SUCCESS)
508 			goto out;
509 
510 		got_lock = TRUE;
511 	}
512 
513 	/* Restart link */
514 	ixgbe_reset_pipeline_82599(hw);
515 
516 	if (got_lock)
517 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
518 
519 	/* Only poll for autoneg to complete if specified to do so */
520 	if (autoneg_wait_to_complete) {
521 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
522 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
523 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
524 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
525 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
526 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
527 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
528 			links_reg = 0; /* Just in case Autoneg time = 0 */
529 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
530 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
531 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
532 					break;
533 				msec_delay(100);
534 			}
535 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
536 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
537 				DEBUGOUT("Autoneg did not complete.\n");
538 			}
539 		}
540 	}
541 
542 	/* Add delay to filter out noises during initial link setup */
543 	msec_delay(50);
544 
545 out:
546 	return status;
547 }
548 
549 /**
550  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
551  *  @hw: pointer to hardware structure
552  *
553  *  The base drivers may require better control over SFP+ module
554  *  PHY states.  This includes selectively shutting down the Tx
555  *  laser on the PHY, effectively halting physical link.
556  **/
557 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
558 {
559 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
560 
561 	/* Disable tx laser; allow 100us to go dark per spec */
562 	esdp_reg |= IXGBE_ESDP_SDP3;
563 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
564 	IXGBE_WRITE_FLUSH(hw);
565 	usec_delay(100);
566 }
567 
568 /**
569  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
570  *  @hw: pointer to hardware structure
571  *
572  *  The base drivers may require better control over SFP+ module
573  *  PHY states.  This includes selectively turning on the Tx
574  *  laser on the PHY, effectively starting physical link.
575  **/
576 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
577 {
578 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
579 
580 	/* Enable tx laser; allow 100ms to light up */
581 	esdp_reg &= ~IXGBE_ESDP_SDP3;
582 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
583 	IXGBE_WRITE_FLUSH(hw);
584 	msec_delay(100);
585 }
586 
587 /**
588  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
589  *  @hw: pointer to hardware structure
590  *
591  *  When the driver changes the link speeds that it can support,
592  *  it sets autotry_restart to TRUE to indicate that we need to
593  *  initiate a new autotry session with the link partner.  To do
594  *  so, we set the speed then disable and re-enable the tx laser, to
595  *  alert the link partner that it also needs to restart autotry on its
596  *  end.  This is consistent with TRUE clause 37 autoneg, which also
597  *  involves a loss of signal.
598  **/
599 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
600 {
601 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
602 
603 	if (hw->mac.autotry_restart) {
604 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
605 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
606 		hw->mac.autotry_restart = FALSE;
607 	}
608 }
609 
610 /**
611  *  ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
612  *  @hw: pointer to hardware structure
613  *  @speed: link speed to set
614  *
615  *  We set the module speed differently for fixed fiber.  For other
616  *  multi-speed devices we don't have an error value so here if we
617  *  detect an error we just log it and exit.
618  */
619 static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
620 					ixgbe_link_speed speed)
621 {
622 	s32 status;
623 	u8 rs, eeprom_data;
624 
625 	switch (speed) {
626 	case IXGBE_LINK_SPEED_10GB_FULL:
627 		/* one bit mask same as setting on */
628 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
629 		break;
630 	case IXGBE_LINK_SPEED_1GB_FULL:
631 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
632 		break;
633 	default:
634 		DEBUGOUT("Invalid fixed module speed\n");
635 		return;
636 	}
637 
638 	/* Set RS0 */
639 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
640 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
641 					   &eeprom_data);
642 	if (status) {
643 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
644 		goto out;
645 	}
646 
647 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
648 
649 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
650 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
651 					    eeprom_data);
652 	if (status) {
653 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
654 		goto out;
655 	}
656 
657 	/* Set RS1 */
658 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
659 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
660 					   &eeprom_data);
661 	if (status) {
662 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
663 		goto out;
664 	}
665 
666 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
667 
668 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
669 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
670 					    eeprom_data);
671 	if (status) {
672 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
673 		goto out;
674 	}
675 out:
676 	return;
677 }
678 
679 /**
680  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
681  *  @hw: pointer to hardware structure
682  *  @speed: new link speed
683  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
684  *
685  *  Set the link speed in the AUTOC register and restarts link.
686  **/
687 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
688 				     ixgbe_link_speed speed,
689 				     bool autoneg_wait_to_complete)
690 {
691 	s32 status = IXGBE_SUCCESS;
692 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
693 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
694 	u32 speedcnt = 0;
695 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
696 	u32 i = 0;
697 	bool autoneg, link_up = FALSE;
698 
699 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
700 
701 	/* Mask off requested but non-supported speeds */
702 	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
703 	if (status != IXGBE_SUCCESS)
704 		return status;
705 
706 	speed &= link_speed;
707 
708 	/*
709 	 * Try each speed one by one, highest priority first.  We do this in
710 	 * software because 10gb fiber doesn't support speed autonegotiation.
711 	 */
712 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
713 		speedcnt++;
714 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
715 
716 		/* If we already have link at this speed, just jump out */
717 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
718 		if (status != IXGBE_SUCCESS)
719 			return status;
720 
721 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
722 			goto out;
723 
724 		/* Set the module link speed */
725 		if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
726 			ixgbe_set_fiber_fixed_speed(hw,
727 						    IXGBE_LINK_SPEED_10GB_FULL);
728 		} else {
729 			esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
730 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
731 			IXGBE_WRITE_FLUSH(hw);
732 		}
733 
734 		/* Allow module to change analog characteristics (1G->10G) */
735 		msec_delay(40);
736 
737 		status = ixgbe_setup_mac_link_82599(hw,
738 						    IXGBE_LINK_SPEED_10GB_FULL,
739 						    autoneg_wait_to_complete);
740 		if (status != IXGBE_SUCCESS)
741 			return status;
742 
743 		/* Flap the tx laser if it has not already been done */
744 		ixgbe_flap_tx_laser(hw);
745 
746 		/*
747 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
748 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
749 		 * attempted.  82599 uses the same timing for 10g SFI.
750 		 */
751 		for (i = 0; i < 5; i++) {
752 			/* Wait for the link partner to also set speed */
753 			msec_delay(100);
754 
755 			/* If we have link, just jump out */
756 			status = ixgbe_check_link(hw, &link_speed,
757 						  &link_up, FALSE);
758 			if (status != IXGBE_SUCCESS)
759 				return status;
760 
761 			if (link_up)
762 				goto out;
763 		}
764 	}
765 
766 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
767 		speedcnt++;
768 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
769 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
770 
771 		/* If we already have link at this speed, just jump out */
772 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
773 		if (status != IXGBE_SUCCESS)
774 			return status;
775 
776 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
777 			goto out;
778 
779 		/* Set the module link speed */
780 		if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
781 			ixgbe_set_fiber_fixed_speed(hw,
782 						    IXGBE_LINK_SPEED_1GB_FULL);
783 		} else {
784 			esdp_reg &= ~IXGBE_ESDP_SDP5;
785 			esdp_reg |= IXGBE_ESDP_SDP5_DIR;
786 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
787 			IXGBE_WRITE_FLUSH(hw);
788 		}
789 
790 		/* Allow module to change analog characteristics (10G->1G) */
791 		msec_delay(40);
792 
793 		status = ixgbe_setup_mac_link_82599(hw,
794 						    IXGBE_LINK_SPEED_1GB_FULL,
795 						    autoneg_wait_to_complete);
796 		if (status != IXGBE_SUCCESS)
797 			return status;
798 
799 		/* Flap the tx laser if it has not already been done */
800 		ixgbe_flap_tx_laser(hw);
801 
802 		/* Wait for the link partner to also set speed */
803 		msec_delay(100);
804 
805 		/* If we have link, just jump out */
806 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
807 		if (status != IXGBE_SUCCESS)
808 			return status;
809 
810 		if (link_up)
811 			goto out;
812 	}
813 
814 	/*
815 	 * We didn't get link.  Configure back to the highest speed we tried,
816 	 * (if there was more than one).  We call ourselves back with just the
817 	 * single highest speed that the user requested.
818 	 */
819 	if (speedcnt > 1)
820 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
821 			highest_link_speed, autoneg_wait_to_complete);
822 
823 out:
824 	/* Set autoneg_advertised value based on input link speed */
825 	hw->phy.autoneg_advertised = 0;
826 
827 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
828 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
829 
830 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
831 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
832 
833 	return status;
834 }
835 
836 /**
837  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
838  *  @hw: pointer to hardware structure
839  *  @speed: new link speed
840  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
841  *
842  *  Implements the Intel SmartSpeed algorithm.
843  **/
844 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
845 				    ixgbe_link_speed speed,
846 				    bool autoneg_wait_to_complete)
847 {
848 	s32 status = IXGBE_SUCCESS;
849 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
850 	s32 i, j;
851 	bool link_up = FALSE;
852 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
853 
854 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
855 
856 	 /* Set autoneg_advertised value based on input link speed */
857 	hw->phy.autoneg_advertised = 0;
858 
859 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
860 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
861 
862 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
863 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
864 
865 	if (speed & IXGBE_LINK_SPEED_100_FULL)
866 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
867 
868 	/*
869 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
870 	 * autoneg advertisement if link is unable to be established at the
871 	 * highest negotiated rate.  This can sometimes happen due to integrity
872 	 * issues with the physical media connection.
873 	 */
874 
875 	/* First, try to get link with full advertisement */
876 	hw->phy.smart_speed_active = FALSE;
877 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
878 		status = ixgbe_setup_mac_link_82599(hw, speed,
879 						    autoneg_wait_to_complete);
880 		if (status != IXGBE_SUCCESS)
881 			goto out;
882 
883 		/*
884 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
885 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
886 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
887 		 * Table 9 in the AN MAS.
888 		 */
889 		for (i = 0; i < 5; i++) {
890 			msec_delay(100);
891 
892 			/* If we have link, just jump out */
893 			status = ixgbe_check_link(hw, &link_speed, &link_up,
894 						  FALSE);
895 			if (status != IXGBE_SUCCESS)
896 				goto out;
897 
898 			if (link_up)
899 				goto out;
900 		}
901 	}
902 
903 	/*
904 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
905 	 * (or BX4/BX), then disable KR and try again.
906 	 */
907 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
908 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
909 		goto out;
910 
911 	/* Turn SmartSpeed on to disable KR support */
912 	hw->phy.smart_speed_active = TRUE;
913 	status = ixgbe_setup_mac_link_82599(hw, speed,
914 					    autoneg_wait_to_complete);
915 	if (status != IXGBE_SUCCESS)
916 		goto out;
917 
918 	/*
919 	 * Wait for the controller to acquire link.  600ms will allow for
920 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
921 	 * parallel detect, both 10g and 1g. This allows for the maximum
922 	 * connect attempts as defined in the AN MAS table 73-7.
923 	 */
924 	for (i = 0; i < 6; i++) {
925 		msec_delay(100);
926 
927 		/* If we have link, just jump out */
928 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
929 		if (status != IXGBE_SUCCESS)
930 			goto out;
931 
932 		if (link_up)
933 			goto out;
934 	}
935 
936 	/* We didn't get link.  Turn SmartSpeed back off. */
937 	hw->phy.smart_speed_active = FALSE;
938 	status = ixgbe_setup_mac_link_82599(hw, speed,
939 					    autoneg_wait_to_complete);
940 
941 out:
942 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
943 		DEBUGOUT("Smartspeed has downgraded the link speed "
944 		"from the maximum advertised\n");
945 	return status;
946 }
947 
948 /**
949  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
950  *  @hw: pointer to hardware structure
951  *  @speed: new link speed
952  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
953  *
954  *  Set the link speed in the AUTOC register and restarts link.
955  **/
956 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
957 			       ixgbe_link_speed speed,
958 			       bool autoneg_wait_to_complete)
959 {
960 	bool autoneg = FALSE;
961 	s32 status = IXGBE_SUCCESS;
962 	u32 autoc, pma_pmd_1g, link_mode, start_autoc;
963 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
964 	u32 orig_autoc = 0;
965 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
966 	u32 links_reg;
967 	u32 i;
968 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
969 	bool got_lock = FALSE;
970 
971 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
972 
973 	/* Check to see if speed passed in is supported. */
974 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
975 	if (status)
976 		goto out;
977 
978 	speed &= link_capabilities;
979 
980 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
981 		status = IXGBE_ERR_LINK_SETUP;
982 		goto out;
983 	}
984 
985 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
986 	if (hw->mac.orig_link_settings_stored)
987 		autoc = hw->mac.orig_autoc;
988 	else
989 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
990 
991 	orig_autoc = autoc;
992 	start_autoc = hw->mac.cached_autoc;
993 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
994 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
995 
996 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
997 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
998 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
999 		/* Set KX4/KX/KR support according to speed requested */
1000 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
1001 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1002 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
1003 				autoc |= IXGBE_AUTOC_KX4_SUPP;
1004 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
1005 			    (hw->phy.smart_speed_active == FALSE))
1006 				autoc |= IXGBE_AUTOC_KR_SUPP;
1007 		}
1008 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1009 			autoc |= IXGBE_AUTOC_KX_SUPP;
1010 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
1011 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
1012 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
1013 		/* Switch from 1G SFI to 10G SFI if requested */
1014 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
1015 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
1016 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
1017 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
1018 		}
1019 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
1020 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
1021 		/* Switch from 10G SFI to 1G SFI if requested */
1022 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1023 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
1024 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
1025 			if (autoneg)
1026 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
1027 			else
1028 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
1029 		}
1030 	}
1031 
1032 	if (autoc != start_autoc) {
1033 		/* Need SW/FW semaphore around AUTOC writes if LESM is on,
1034 		 * likewise reset_pipeline requires us to hold this lock as
1035 		 * it also writes to AUTOC.
1036 		 */
1037 		if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1038 			status = hw->mac.ops.acquire_swfw_sync(hw,
1039 							IXGBE_GSSR_MAC_CSR_SM);
1040 			if (status != IXGBE_SUCCESS) {
1041 				status = IXGBE_ERR_SWFW_SYNC;
1042 				goto out;
1043 			}
1044 
1045 			got_lock = TRUE;
1046 		}
1047 
1048 		/* Restart link */
1049 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
1050 		hw->mac.cached_autoc = autoc;
1051 		ixgbe_reset_pipeline_82599(hw);
1052 
1053 		if (got_lock) {
1054 			hw->mac.ops.release_swfw_sync(hw,
1055 						      IXGBE_GSSR_MAC_CSR_SM);
1056 			got_lock = FALSE;
1057 		}
1058 
1059 		/* Only poll for autoneg to complete if specified to do so */
1060 		if (autoneg_wait_to_complete) {
1061 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1062 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1063 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1064 				links_reg = 0; /*Just in case Autoneg time=0*/
1065 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
1066 					links_reg =
1067 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
1068 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
1069 						break;
1070 					msec_delay(100);
1071 				}
1072 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1073 					status =
1074 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1075 					DEBUGOUT("Autoneg did not complete.\n");
1076 				}
1077 			}
1078 		}
1079 
1080 		/* Add delay to filter out noises during initial link setup */
1081 		msec_delay(50);
1082 	}
1083 
1084 out:
1085 	return status;
1086 }
1087 
1088 /**
1089  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1090  *  @hw: pointer to hardware structure
1091  *  @speed: new link speed
1092  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
1093  *
1094  *  Restarts link on PHY and MAC based on settings passed in.
1095  **/
1096 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1097 					 ixgbe_link_speed speed,
1098 					 bool autoneg_wait_to_complete)
1099 {
1100 	s32 status;
1101 
1102 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
1103 
1104 	/* Setup the PHY according to input speed */
1105 	status = hw->phy.ops.setup_link_speed(hw, speed,
1106 					      autoneg_wait_to_complete);
1107 	/* Set up MAC */
1108 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1109 
1110 	return status;
1111 }
1112 
1113 /**
1114  *  ixgbe_reset_hw_82599 - Perform hardware reset
1115  *  @hw: pointer to hardware structure
1116  *
1117  *  Resets the hardware by resetting the transmit and receive units, masks
1118  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1119  *  reset.
1120  **/
1121 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1122 {
1123 	ixgbe_link_speed link_speed;
1124 	s32 status;
1125 	u32 ctrl, i, autoc, autoc2;
1126 	bool link_up = FALSE;
1127 
1128 	DEBUGFUNC("ixgbe_reset_hw_82599");
1129 
1130 	/* Call adapter stop to disable tx/rx and clear interrupts */
1131 	status = hw->mac.ops.stop_adapter(hw);
1132 	if (status != IXGBE_SUCCESS)
1133 		goto reset_hw_out;
1134 
1135 	/* flush pending Tx transactions */
1136 	ixgbe_clear_tx_pending(hw);
1137 
1138 	/* PHY ops must be identified and initialized prior to reset */
1139 
1140 	/* Identify PHY and related function pointers */
1141 	status = hw->phy.ops.init(hw);
1142 
1143 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1144 		goto reset_hw_out;
1145 
1146 	/* Setup SFP module if there is one present. */
1147 	if (hw->phy.sfp_setup_needed) {
1148 		status = hw->mac.ops.setup_sfp(hw);
1149 		hw->phy.sfp_setup_needed = FALSE;
1150 	}
1151 
1152 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1153 		goto reset_hw_out;
1154 
1155 	/* Reset PHY */
1156 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1157 		hw->phy.ops.reset(hw);
1158 
1159 mac_reset_top:
1160 	/*
1161 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
1162 	 * If link reset is used when link is up, it might reset the PHY when
1163 	 * mng is using it.  If link is down or the flag to force full link
1164 	 * reset is set, then perform link reset.
1165 	 */
1166 	ctrl = IXGBE_CTRL_LNK_RST;
1167 	if (!hw->force_full_reset) {
1168 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1169 		if (link_up)
1170 			ctrl = IXGBE_CTRL_RST;
1171 	}
1172 
1173 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1174 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1175 	IXGBE_WRITE_FLUSH(hw);
1176 
1177 	/* Poll for reset bit to self-clear indicating reset is complete */
1178 	for (i = 0; i < 10; i++) {
1179 		usec_delay(1);
1180 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1181 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
1182 			break;
1183 	}
1184 
1185 	if (ctrl & IXGBE_CTRL_RST_MASK) {
1186 		status = IXGBE_ERR_RESET_FAILED;
1187 		DEBUGOUT("Reset polling failed to complete.\n");
1188 	}
1189 
1190 	msec_delay(50);
1191 
1192 	/*
1193 	 * Double resets are required for recovery from certain error
1194 	 * conditions.  Between resets, it is necessary to stall to allow time
1195 	 * for any pending HW events to complete.
1196 	 */
1197 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1198 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1199 		goto mac_reset_top;
1200 	}
1201 
1202 	/*
1203 	 * Store the original AUTOC/AUTOC2 values if they have not been
1204 	 * stored off yet.  Otherwise restore the stored original
1205 	 * values since the reset operation sets back to defaults.
1206 	 */
1207 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1208 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1209 
1210 	/* Enable link if disabled in NVM */
1211 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1212 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1213 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1214 		IXGBE_WRITE_FLUSH(hw);
1215 	}
1216 
1217 	if (hw->mac.orig_link_settings_stored == FALSE) {
1218 		hw->mac.orig_autoc = autoc;
1219 		hw->mac.orig_autoc2 = autoc2;
1220 		hw->mac.cached_autoc = autoc;
1221 		hw->mac.orig_link_settings_stored = TRUE;
1222 	} else {
1223 		if (autoc != hw->mac.orig_autoc) {
1224 			/* Need SW/FW semaphore around AUTOC writes if LESM is
1225 			 * on, likewise reset_pipeline requires us to hold
1226 			 * this lock as it also writes to AUTOC.
1227 			 */
1228 			bool got_lock = FALSE;
1229 			if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1230 				status = hw->mac.ops.acquire_swfw_sync(hw,
1231 							IXGBE_GSSR_MAC_CSR_SM);
1232 				if (status != IXGBE_SUCCESS) {
1233 					status = IXGBE_ERR_SWFW_SYNC;
1234 					goto reset_hw_out;
1235 				}
1236 
1237 				got_lock = TRUE;
1238 			}
1239 
1240 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
1241 			hw->mac.cached_autoc = hw->mac.orig_autoc;
1242 			ixgbe_reset_pipeline_82599(hw);
1243 
1244 			if (got_lock)
1245 				hw->mac.ops.release_swfw_sync(hw,
1246 						      IXGBE_GSSR_MAC_CSR_SM);
1247 		}
1248 
1249 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1250 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1251 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1252 			autoc2 |= (hw->mac.orig_autoc2 &
1253 				   IXGBE_AUTOC2_UPPER_MASK);
1254 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1255 		}
1256 	}
1257 
1258 	/* Store the permanent mac address */
1259 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1260 
1261 	/*
1262 	 * Store MAC address from RAR0, clear receive address registers, and
1263 	 * clear the multicast table.  Also reset num_rar_entries to 128,
1264 	 * since we modify this value when programming the SAN MAC address.
1265 	 */
1266 	hw->mac.num_rar_entries = 128;
1267 	hw->mac.ops.init_rx_addrs(hw);
1268 
1269 	/* Store the permanent SAN mac address */
1270 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1271 
1272 	/* Add the SAN MAC address to the RAR only if it's a valid address */
1273 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1274 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1275 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
1276 
1277 		/* Save the SAN MAC RAR index */
1278 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1279 
1280 		/* Reserve the last RAR for the SAN MAC address */
1281 		hw->mac.num_rar_entries--;
1282 	}
1283 
1284 	/* Store the alternative WWNN/WWPN prefix */
1285 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1286 				   &hw->mac.wwpn_prefix);
1287 
1288 reset_hw_out:
1289 	return status;
1290 }
1291 
1292 /**
1293  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1294  *  @hw: pointer to hardware structure
1295  **/
1296 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1297 {
1298 	int i;
1299 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1300 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1301 
1302 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1303 
1304 	/*
1305 	 * Before starting reinitialization process,
1306 	 * FDIRCMD.CMD must be zero.
1307 	 */
1308 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1309 		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1310 		      IXGBE_FDIRCMD_CMD_MASK))
1311 			break;
1312 		usec_delay(10);
1313 	}
1314 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1315 		DEBUGOUT("Flow Director previous command isn't complete, "
1316 			 "aborting table re-initialization.\n");
1317 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1318 	}
1319 
1320 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1321 	IXGBE_WRITE_FLUSH(hw);
1322 	/*
1323 	 * 82599 adapters flow director init flow cannot be restarted,
1324 	 * Workaround 82599 silicon errata by performing the following steps
1325 	 * before re-writing the FDIRCTRL control register with the same value.
1326 	 * - write 1 to bit 8 of FDIRCMD register &
1327 	 * - write 0 to bit 8 of FDIRCMD register
1328 	 */
1329 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1330 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1331 			 IXGBE_FDIRCMD_CLEARHT));
1332 	IXGBE_WRITE_FLUSH(hw);
1333 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1334 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1335 			 ~IXGBE_FDIRCMD_CLEARHT));
1336 	IXGBE_WRITE_FLUSH(hw);
1337 	/*
1338 	 * Clear FDIR Hash register to clear any leftover hashes
1339 	 * waiting to be programmed.
1340 	 */
1341 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1342 	IXGBE_WRITE_FLUSH(hw);
1343 
1344 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1345 	IXGBE_WRITE_FLUSH(hw);
1346 
1347 	/* Poll init-done after we write FDIRCTRL register */
1348 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1349 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1350 				   IXGBE_FDIRCTRL_INIT_DONE)
1351 			break;
1352 		msec_delay(1);
1353 	}
1354 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1355 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1356 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1357 	}
1358 
1359 	/* Clear FDIR statistics registers (read to clear) */
1360 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1361 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1362 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1363 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1364 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1365 
1366 	return IXGBE_SUCCESS;
1367 }
1368 
1369 /**
1370  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1371  *  @hw: pointer to hardware structure
1372  *  @fdirctrl: value to write to flow director control register
1373  **/
1374 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1375 {
1376 	int i;
1377 
1378 	DEBUGFUNC("ixgbe_fdir_enable_82599");
1379 
1380 	/* Prime the keys for hashing */
1381 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1382 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1383 
1384 	/*
1385 	 * Poll init-done after we write the register.  Estimated times:
1386 	 *      10G: PBALLOC = 11b, timing is 60us
1387 	 *       1G: PBALLOC = 11b, timing is 600us
1388 	 *     100M: PBALLOC = 11b, timing is 6ms
1389 	 *
1390 	 *     Multiple these timings by 4 if under full Rx load
1391 	 *
1392 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1393 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1394 	 * this might not finish in our poll time, but we can live with that
1395 	 * for now.
1396 	 */
1397 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1398 	IXGBE_WRITE_FLUSH(hw);
1399 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1400 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1401 				   IXGBE_FDIRCTRL_INIT_DONE)
1402 			break;
1403 		msec_delay(1);
1404 	}
1405 
1406 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1407 		DEBUGOUT("Flow Director poll time exceeded!\n");
1408 }
1409 
1410 /**
1411  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1412  *  @hw: pointer to hardware structure
1413  *  @fdirctrl: value to write to flow director control register, initially
1414  *	     contains just the value of the Rx packet buffer allocation
1415  **/
1416 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1417 {
1418 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1419 
1420 	/*
1421 	 * Continue setup of fdirctrl register bits:
1422 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1423 	 *  Set the maximum length per hash bucket to 0xA filters
1424 	 *  Send interrupt when 64 filters are left
1425 	 */
1426 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1427 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1428 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1429 
1430 	/* write hashes and fdirctrl register, poll for completion */
1431 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1432 
1433 	return IXGBE_SUCCESS;
1434 }
1435 
1436 /**
1437  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1438  *  @hw: pointer to hardware structure
1439  *  @fdirctrl: value to write to flow director control register, initially
1440  *	     contains just the value of the Rx packet buffer allocation
1441  **/
1442 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1443 {
1444 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1445 
1446 	/*
1447 	 * Continue setup of fdirctrl register bits:
1448 	 *  Turn perfect match filtering on
1449 	 *  Report hash in RSS field of Rx wb descriptor
1450 	 *  Initialize the drop queue
1451 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1452 	 *  Set the maximum length per hash bucket to 0xA filters
1453 	 *  Send interrupt when 64 (0x4 * 16) filters are left
1454 	 */
1455 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1456 		    IXGBE_FDIRCTRL_REPORT_STATUS |
1457 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1458 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1459 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1460 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1461 
1462 	/* write hashes and fdirctrl register, poll for completion */
1463 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1464 
1465 	return IXGBE_SUCCESS;
1466 }
1467 
1468 /*
1469  * These defines allow us to quickly generate all of the necessary instructions
1470  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1471  * for values 0 through 15
1472  */
1473 #define IXGBE_ATR_COMMON_HASH_KEY \
1474 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1475 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1476 do { \
1477 	u32 n = (_n); \
1478 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1479 		common_hash ^= lo_hash_dword >> n; \
1480 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1481 		bucket_hash ^= lo_hash_dword >> n; \
1482 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1483 		sig_hash ^= lo_hash_dword << (16 - n); \
1484 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1485 		common_hash ^= hi_hash_dword >> n; \
1486 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1487 		bucket_hash ^= hi_hash_dword >> n; \
1488 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1489 		sig_hash ^= hi_hash_dword << (16 - n); \
1490 } while (0);
1491 
1492 /**
1493  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1494  *  @stream: input bitstream to compute the hash on
1495  *
1496  *  This function is almost identical to the function above but contains
1497  *  several optomizations such as unwinding all of the loops, letting the
1498  *  compiler work out all of the conditional ifs since the keys are static
1499  *  defines, and computing two keys at once since the hashed dword stream
1500  *  will be the same for both keys.
1501  **/
1502 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1503 				     union ixgbe_atr_hash_dword common)
1504 {
1505 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1506 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1507 
1508 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1509 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
1510 
1511 	/* generate common hash dword */
1512 	hi_hash_dword = IXGBE_NTOHL(common.dword);
1513 
1514 	/* low dword is word swapped version of common */
1515 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1516 
1517 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1518 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1519 
1520 	/* Process bits 0 and 16 */
1521 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1522 
1523 	/*
1524 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1525 	 * delay this because bit 0 of the stream should not be processed
1526 	 * so we do not add the vlan until after bit 0 was processed
1527 	 */
1528 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1529 
1530 	/* Process remaining 30 bit of the key */
1531 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1532 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1533 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1534 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1535 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1536 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1537 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1538 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1539 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1540 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1541 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1542 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1543 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1544 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1545 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1546 
1547 	/* combine common_hash result with signature and bucket hashes */
1548 	bucket_hash ^= common_hash;
1549 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1550 
1551 	sig_hash ^= common_hash << 16;
1552 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1553 
1554 	/* return completed signature hash */
1555 	return sig_hash ^ bucket_hash;
1556 }
1557 
1558 /**
1559  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1560  *  @hw: pointer to hardware structure
1561  *  @input: unique input dword
1562  *  @common: compressed common input dword
1563  *  @queue: queue index to direct traffic to
1564  **/
1565 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1566 					  union ixgbe_atr_hash_dword input,
1567 					  union ixgbe_atr_hash_dword common,
1568 					  u8 queue)
1569 {
1570 	u64  fdirhashcmd;
1571 	u32  fdircmd;
1572 
1573 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1574 
1575 	/*
1576 	 * Get the flow_type in order to program FDIRCMD properly
1577 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1578 	 */
1579 	switch (input.formatted.flow_type) {
1580 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1581 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1582 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1583 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
1584 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
1585 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1586 		break;
1587 	default:
1588 		DEBUGOUT(" Error on flow type input\n");
1589 		return IXGBE_ERR_CONFIG;
1590 	}
1591 
1592 	/* configure FDIRCMD register */
1593 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1594 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1595 	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1596 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1597 
1598 	/*
1599 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1600 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1601 	 */
1602 	fdirhashcmd = (u64)fdircmd << 32;
1603 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1604 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1605 
1606 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1607 
1608 	return IXGBE_SUCCESS;
1609 }
1610 
1611 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1612 do { \
1613 	u32 n = (_n); \
1614 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1615 		bucket_hash ^= lo_hash_dword >> n; \
1616 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1617 		bucket_hash ^= hi_hash_dword >> n; \
1618 } while (0);
1619 
1620 /**
1621  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1622  *  @atr_input: input bitstream to compute the hash on
1623  *  @input_mask: mask for the input bitstream
1624  *
1625  *  This function serves two main purposes.  First it applys the input_mask
1626  *  to the atr_input resulting in a cleaned up atr_input data stream.
1627  *  Secondly it computes the hash and stores it in the bkt_hash field at
1628  *  the end of the input byte stream.  This way it will be available for
1629  *  future use without needing to recompute the hash.
1630  **/
1631 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1632 					  union ixgbe_atr_input *input_mask)
1633 {
1634 
1635 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1636 	u32 bucket_hash = 0;
1637 
1638 	/* Apply masks to input data */
1639 	input->dword_stream[0]  &= input_mask->dword_stream[0];
1640 	input->dword_stream[1]  &= input_mask->dword_stream[1];
1641 	input->dword_stream[2]  &= input_mask->dword_stream[2];
1642 	input->dword_stream[3]  &= input_mask->dword_stream[3];
1643 	input->dword_stream[4]  &= input_mask->dword_stream[4];
1644 	input->dword_stream[5]  &= input_mask->dword_stream[5];
1645 	input->dword_stream[6]  &= input_mask->dword_stream[6];
1646 	input->dword_stream[7]  &= input_mask->dword_stream[7];
1647 	input->dword_stream[8]  &= input_mask->dword_stream[8];
1648 	input->dword_stream[9]  &= input_mask->dword_stream[9];
1649 	input->dword_stream[10] &= input_mask->dword_stream[10];
1650 
1651 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1652 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1653 
1654 	/* generate common hash dword */
1655 	hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1656 				    input->dword_stream[2] ^
1657 				    input->dword_stream[3] ^
1658 				    input->dword_stream[4] ^
1659 				    input->dword_stream[5] ^
1660 				    input->dword_stream[6] ^
1661 				    input->dword_stream[7] ^
1662 				    input->dword_stream[8] ^
1663 				    input->dword_stream[9] ^
1664 				    input->dword_stream[10]);
1665 
1666 	/* low dword is word swapped version of common */
1667 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1668 
1669 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1670 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1671 
1672 	/* Process bits 0 and 16 */
1673 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1674 
1675 	/*
1676 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1677 	 * delay this because bit 0 of the stream should not be processed
1678 	 * so we do not add the vlan until after bit 0 was processed
1679 	 */
1680 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1681 
1682 	/* Process remaining 30 bit of the key */
1683 	IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1684 	IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1685 	IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1686 	IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1687 	IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1688 	IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1689 	IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1690 	IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1691 	IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1692 	IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1693 	IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1694 	IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1695 	IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1696 	IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1697 	IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1698 
1699 	/*
1700 	 * Limit hash to 13 bits since max bucket count is 8K.
1701 	 * Store result at the end of the input stream.
1702 	 */
1703 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1704 }
1705 
1706 /**
1707  *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1708  *  @input_mask: mask to be bit swapped
1709  *
1710  *  The source and destination port masks for flow director are bit swapped
1711  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
1712  *  generate a correctly swapped value we need to bit swap the mask and that
1713  *  is what is accomplished by this function.
1714  **/
1715 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1716 {
1717 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1718 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1719 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1720 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1721 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1722 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1723 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1724 }
1725 
1726 /*
1727  * These two macros are meant to address the fact that we have registers
1728  * that are either all or in part big-endian.  As a result on big-endian
1729  * systems we will end up byte swapping the value to little-endian before
1730  * it is byte swapped again and written to the hardware in the original
1731  * big-endian format.
1732  */
1733 #define IXGBE_STORE_AS_BE32(_value) \
1734 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1735 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1736 
1737 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1738 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1739 
1740 #define IXGBE_STORE_AS_BE16(_value) \
1741 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1742 
1743 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1744 				    union ixgbe_atr_input *input_mask)
1745 {
1746 	/* mask IPv6 since it is currently not supported */
1747 	u32 fdirm = IXGBE_FDIRM_DIPv6;
1748 	u32 fdirtcpm;
1749 
1750 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1751 
1752 	/*
1753 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1754 	 * are zero, then assume a full mask for that field.  Also assume that
1755 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1756 	 * cannot be masked out in this implementation.
1757 	 *
1758 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1759 	 * point in time.
1760 	 */
1761 
1762 	/* verify bucket hash is cleared on hash generation */
1763 	if (input_mask->formatted.bkt_hash)
1764 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
1765 
1766 	/* Program FDIRM and verify partial masks */
1767 	switch (input_mask->formatted.vm_pool & 0x7F) {
1768 	case 0x0:
1769 		fdirm |= IXGBE_FDIRM_POOL;
1770 	case 0x7F:
1771 		break;
1772 	default:
1773 		DEBUGOUT(" Error on vm pool mask\n");
1774 		return IXGBE_ERR_CONFIG;
1775 	}
1776 
1777 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1778 	case 0x0:
1779 		fdirm |= IXGBE_FDIRM_L4P;
1780 		if (input_mask->formatted.dst_port ||
1781 		    input_mask->formatted.src_port) {
1782 			DEBUGOUT(" Error on src/dst port mask\n");
1783 			return IXGBE_ERR_CONFIG;
1784 		}
1785 	case IXGBE_ATR_L4TYPE_MASK:
1786 		break;
1787 	default:
1788 		DEBUGOUT(" Error on flow type mask\n");
1789 		return IXGBE_ERR_CONFIG;
1790 	}
1791 
1792 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1793 	case 0x0000:
1794 		/* mask VLAN ID, fall through to mask VLAN priority */
1795 		fdirm |= IXGBE_FDIRM_VLANID;
1796 	case 0x0FFF:
1797 		/* mask VLAN priority */
1798 		fdirm |= IXGBE_FDIRM_VLANP;
1799 		break;
1800 	case 0xE000:
1801 		/* mask VLAN ID only, fall through */
1802 		fdirm |= IXGBE_FDIRM_VLANID;
1803 	case 0xEFFF:
1804 		/* no VLAN fields masked */
1805 		break;
1806 	default:
1807 		DEBUGOUT(" Error on VLAN mask\n");
1808 		return IXGBE_ERR_CONFIG;
1809 	}
1810 
1811 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1812 	case 0x0000:
1813 		/* Mask Flex Bytes, fall through */
1814 		fdirm |= IXGBE_FDIRM_FLEX;
1815 	case 0xFFFF:
1816 		break;
1817 	default:
1818 		DEBUGOUT(" Error on flexible byte mask\n");
1819 		return IXGBE_ERR_CONFIG;
1820 	}
1821 
1822 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1823 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1824 
1825 	/* store the TCP/UDP port masks, bit reversed from port layout */
1826 	fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1827 
1828 	/* write both the same so that UDP and TCP use the same mask */
1829 	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1830 	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1831 
1832 	/* store source and destination IP masks (big-enian) */
1833 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1834 			     ~input_mask->formatted.src_ip[0]);
1835 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1836 			     ~input_mask->formatted.dst_ip[0]);
1837 
1838 	return IXGBE_SUCCESS;
1839 }
1840 
1841 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1842 					  union ixgbe_atr_input *input,
1843 					  u16 soft_id, u8 queue)
1844 {
1845 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
1846 
1847 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1848 
1849 	/* currently IPv6 is not supported, must be programmed with 0 */
1850 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1851 			     input->formatted.src_ip[0]);
1852 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1853 			     input->formatted.src_ip[1]);
1854 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1855 			     input->formatted.src_ip[2]);
1856 
1857 	/* record the source address (big-endian) */
1858 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1859 
1860 	/* record the first 32 bits of the destination address (big-endian) */
1861 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1862 
1863 	/* record source and destination port (little-endian)*/
1864 	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1865 	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1866 	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1867 	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1868 
1869 	/* record vlan (little-endian) and flex_bytes(big-endian) */
1870 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1871 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1872 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1873 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1874 
1875 	/* configure FDIRHASH register */
1876 	fdirhash = input->formatted.bkt_hash;
1877 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1878 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1879 
1880 	/*
1881 	 * flush all previous writes to make certain registers are
1882 	 * programmed prior to issuing the command
1883 	 */
1884 	IXGBE_WRITE_FLUSH(hw);
1885 
1886 	/* configure FDIRCMD register */
1887 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1888 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1889 	if (queue == IXGBE_FDIR_DROP_QUEUE)
1890 		fdircmd |= IXGBE_FDIRCMD_DROP;
1891 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1892 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1893 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1894 
1895 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1896 
1897 	return IXGBE_SUCCESS;
1898 }
1899 
1900 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1901 					  union ixgbe_atr_input *input,
1902 					  u16 soft_id)
1903 {
1904 	u32 fdirhash;
1905 	u32 fdircmd = 0;
1906 	u32 retry_count;
1907 	s32 err = IXGBE_SUCCESS;
1908 
1909 	/* configure FDIRHASH register */
1910 	fdirhash = input->formatted.bkt_hash;
1911 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1912 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1913 
1914 	/* flush hash to HW */
1915 	IXGBE_WRITE_FLUSH(hw);
1916 
1917 	/* Query if filter is present */
1918 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1919 
1920 	for (retry_count = 10; retry_count; retry_count--) {
1921 		/* allow 10us for query to process */
1922 		usec_delay(10);
1923 		/* verify query completed successfully */
1924 		fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1925 		if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1926 			break;
1927 	}
1928 
1929 	if (!retry_count)
1930 		err = IXGBE_ERR_FDIR_REINIT_FAILED;
1931 
1932 	/* if filter exists in hardware then remove it */
1933 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1934 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1935 		IXGBE_WRITE_FLUSH(hw);
1936 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1937 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1938 	}
1939 
1940 	return err;
1941 }
1942 
1943 /**
1944  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1945  *  @hw: pointer to hardware structure
1946  *  @input: input bitstream
1947  *  @input_mask: mask for the input bitstream
1948  *  @soft_id: software index for the filters
1949  *  @queue: queue index to direct traffic to
1950  *
1951  *  Note that the caller to this function must lock before calling, since the
1952  *  hardware writes must be protected from one another.
1953  **/
1954 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1955 					union ixgbe_atr_input *input,
1956 					union ixgbe_atr_input *input_mask,
1957 					u16 soft_id, u8 queue)
1958 {
1959 	s32 err = IXGBE_ERR_CONFIG;
1960 
1961 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1962 
1963 	/*
1964 	 * Check flow_type formatting, and bail out before we touch the hardware
1965 	 * if there's a configuration issue
1966 	 */
1967 	switch (input->formatted.flow_type) {
1968 	case IXGBE_ATR_FLOW_TYPE_IPV4:
1969 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1970 		if (input->formatted.dst_port || input->formatted.src_port) {
1971 			DEBUGOUT(" Error on src/dst port\n");
1972 			return IXGBE_ERR_CONFIG;
1973 		}
1974 		break;
1975 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1976 		if (input->formatted.dst_port || input->formatted.src_port) {
1977 			DEBUGOUT(" Error on src/dst port\n");
1978 			return IXGBE_ERR_CONFIG;
1979 		}
1980 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1981 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1982 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1983 						  IXGBE_ATR_L4TYPE_MASK;
1984 		break;
1985 	default:
1986 		DEBUGOUT(" Error on flow type input\n");
1987 		return err;
1988 	}
1989 
1990 	/* program input mask into the HW */
1991 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
1992 	if (err)
1993 		return err;
1994 
1995 	/* apply mask and compute/store hash */
1996 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
1997 
1998 	/* program filters to filter memory */
1999 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2000 						     soft_id, queue);
2001 }
2002 
2003 /**
2004  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2005  *  @hw: pointer to hardware structure
2006  *  @reg: analog register to read
2007  *  @val: read value
2008  *
2009  *  Performs read operation to Omer analog register specified.
2010  **/
2011 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2012 {
2013 	u32  core_ctl;
2014 
2015 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2016 
2017 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2018 			(reg << 8));
2019 	IXGBE_WRITE_FLUSH(hw);
2020 	usec_delay(10);
2021 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2022 	*val = (u8)core_ctl;
2023 
2024 	return IXGBE_SUCCESS;
2025 }
2026 
2027 /**
2028  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2029  *  @hw: pointer to hardware structure
2030  *  @reg: atlas register to write
2031  *  @val: value to write
2032  *
2033  *  Performs write operation to Omer analog register specified.
2034  **/
2035 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2036 {
2037 	u32  core_ctl;
2038 
2039 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2040 
2041 	core_ctl = (reg << 8) | val;
2042 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2043 	IXGBE_WRITE_FLUSH(hw);
2044 	usec_delay(10);
2045 
2046 	return IXGBE_SUCCESS;
2047 }
2048 
2049 /**
2050  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2051  *  @hw: pointer to hardware structure
2052  *
2053  *  Starts the hardware using the generic start_hw function
2054  *  and the generation start_hw function.
2055  *  Then performs revision-specific operations, if any.
2056  **/
2057 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2058 {
2059 	s32 ret_val = IXGBE_SUCCESS;
2060 
2061 	DEBUGFUNC("ixgbe_start_hw_82599");
2062 
2063 	ret_val = ixgbe_start_hw_generic(hw);
2064 	if (ret_val != IXGBE_SUCCESS)
2065 		goto out;
2066 
2067 	ret_val = ixgbe_start_hw_gen2(hw);
2068 	if (ret_val != IXGBE_SUCCESS)
2069 		goto out;
2070 
2071 	/* We need to run link autotry after the driver loads */
2072 	hw->mac.autotry_restart = TRUE;
2073 
2074 	if (ret_val == IXGBE_SUCCESS)
2075 		ret_val = ixgbe_verify_fw_version_82599(hw);
2076 out:
2077 	return ret_val;
2078 }
2079 
2080 /**
2081  *  ixgbe_identify_phy_82599 - Get physical layer module
2082  *  @hw: pointer to hardware structure
2083  *
2084  *  Determines the physical layer module found on the current adapter.
2085  *  If PHY already detected, maintains current PHY type in hw struct,
2086  *  otherwise executes the PHY detection routine.
2087  **/
2088 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2089 {
2090 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2091 
2092 	DEBUGFUNC("ixgbe_identify_phy_82599");
2093 
2094 	/* Detect PHY if not unknown - returns success if already detected. */
2095 	status = ixgbe_identify_phy_generic(hw);
2096 	if (status != IXGBE_SUCCESS) {
2097 		/* 82599 10GBASE-T requires an external PHY */
2098 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2099 			goto out;
2100 		else
2101 			status = ixgbe_identify_module_generic(hw);
2102 	}
2103 
2104 	/* Set PHY type none if no PHY detected */
2105 	if (hw->phy.type == ixgbe_phy_unknown) {
2106 		hw->phy.type = ixgbe_phy_none;
2107 		status = IXGBE_SUCCESS;
2108 	}
2109 
2110 	/* Return error if SFP module has been detected but is not supported */
2111 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2112 		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2113 
2114 out:
2115 	return status;
2116 }
2117 
2118 /**
2119  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2120  *  @hw: pointer to hardware structure
2121  *
2122  *  Determines physical layer capabilities of the current configuration.
2123  **/
2124 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2125 {
2126 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2127 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2128 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2129 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2130 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2131 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2132 	u16 ext_ability = 0;
2133 	u8 comp_codes_10g = 0;
2134 	u8 comp_codes_1g = 0;
2135 
2136 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2137 
2138 	hw->phy.ops.identify(hw);
2139 
2140 	switch (hw->phy.type) {
2141 	case ixgbe_phy_tn:
2142 	case ixgbe_phy_cu_unknown:
2143 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2144 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2145 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2146 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2147 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2148 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2149 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2150 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2151 		goto out;
2152 	default:
2153 		break;
2154 	}
2155 
2156 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2157 	case IXGBE_AUTOC_LMS_1G_AN:
2158 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2159 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2160 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2161 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2162 			goto out;
2163 		} else
2164 			/* SFI mode so read SFP module */
2165 			goto sfp_check;
2166 		break;
2167 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2168 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2169 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2170 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2171 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2172 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2173 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2174 		goto out;
2175 		break;
2176 	case IXGBE_AUTOC_LMS_10G_SERIAL:
2177 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2178 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2179 			goto out;
2180 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2181 			goto sfp_check;
2182 		break;
2183 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
2184 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2185 		if (autoc & IXGBE_AUTOC_KX_SUPP)
2186 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2187 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
2188 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2189 		if (autoc & IXGBE_AUTOC_KR_SUPP)
2190 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2191 		goto out;
2192 		break;
2193 	default:
2194 		goto out;
2195 		break;
2196 	}
2197 
2198 sfp_check:
2199 	/* SFP check must be done last since DA modules are sometimes used to
2200 	 * test KR mode -  we need to id KR mode correctly before SFP module.
2201 	 * Call identify_sfp because the pluggable module may have changed */
2202 	hw->phy.ops.identify_sfp(hw);
2203 	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2204 		goto out;
2205 
2206 	switch (hw->phy.type) {
2207 	case ixgbe_phy_sfp_passive_tyco:
2208 	case ixgbe_phy_sfp_passive_unknown:
2209 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2210 		break;
2211 	case ixgbe_phy_sfp_ftl_active:
2212 	case ixgbe_phy_sfp_active_unknown:
2213 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2214 		break;
2215 	case ixgbe_phy_sfp_avago:
2216 	case ixgbe_phy_sfp_ftl:
2217 	case ixgbe_phy_sfp_intel:
2218 	case ixgbe_phy_sfp_unknown:
2219 		hw->phy.ops.read_i2c_eeprom(hw,
2220 		      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2221 		hw->phy.ops.read_i2c_eeprom(hw,
2222 		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2223 		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2224 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2225 		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2226 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2227 		else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2228 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2229 		else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
2230 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2231 		break;
2232 	default:
2233 		break;
2234 	}
2235 
2236 out:
2237 	return physical_layer;
2238 }
2239 
2240 /**
2241  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2242  *  @hw: pointer to hardware structure
2243  *  @regval: register value to write to RXCTRL
2244  *
2245  *  Enables the Rx DMA unit for 82599
2246  **/
2247 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2248 {
2249 
2250 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2251 
2252 	/*
2253 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2254 	 * If traffic is incoming before we enable the Rx unit, it could hang
2255 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2256 	 * completely disabled prior to enabling the Rx unit.
2257 	 */
2258 
2259 	hw->mac.ops.disable_sec_rx_path(hw);
2260 
2261 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2262 
2263 	hw->mac.ops.enable_sec_rx_path(hw);
2264 
2265 	return IXGBE_SUCCESS;
2266 }
2267 
2268 /**
2269  *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
2270  *  @hw: pointer to hardware structure
2271  *
2272  *  Verifies that installed the firmware version is 0.6 or higher
2273  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2274  *
2275  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2276  *  if the FW version is not supported.
2277  **/
2278 s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2279 {
2280 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2281 	u16 fw_offset, fw_ptp_cfg_offset;
2282 	u16 fw_version = 0;
2283 
2284 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
2285 
2286 	/* firmware check is only necessary for SFI devices */
2287 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
2288 		status = IXGBE_SUCCESS;
2289 		goto fw_version_out;
2290 	}
2291 
2292 	/* get the offset to the Firmware Module block */
2293 	hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2294 
2295 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2296 		goto fw_version_out;
2297 
2298 	/* get the offset to the Pass Through Patch Configuration block */
2299 	hw->eeprom.ops.read(hw, (fw_offset +
2300 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2301 				 &fw_ptp_cfg_offset);
2302 
2303 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2304 		goto fw_version_out;
2305 
2306 	/* get the firmware version */
2307 	hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2308 			    IXGBE_FW_PATCH_VERSION_4), &fw_version);
2309 
2310 	if (fw_version > 0x5)
2311 		status = IXGBE_SUCCESS;
2312 
2313 fw_version_out:
2314 	return status;
2315 }
2316 
2317 /**
2318  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2319  *  @hw: pointer to hardware structure
2320  *
2321  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
2322  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2323  **/
2324 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2325 {
2326 	bool lesm_enabled = FALSE;
2327 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2328 	s32 status;
2329 
2330 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2331 
2332 	/* get the offset to the Firmware Module block */
2333 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2334 
2335 	if ((status != IXGBE_SUCCESS) ||
2336 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
2337 		goto out;
2338 
2339 	/* get the offset to the LESM Parameters block */
2340 	status = hw->eeprom.ops.read(hw, (fw_offset +
2341 				     IXGBE_FW_LESM_PARAMETERS_PTR),
2342 				     &fw_lesm_param_offset);
2343 
2344 	if ((status != IXGBE_SUCCESS) ||
2345 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2346 		goto out;
2347 
2348 	/* get the lesm state word */
2349 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2350 				     IXGBE_FW_LESM_STATE_1),
2351 				     &fw_lesm_state);
2352 
2353 	if ((status == IXGBE_SUCCESS) &&
2354 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2355 		lesm_enabled = TRUE;
2356 
2357 out:
2358 	return lesm_enabled;
2359 }
2360 
2361 /**
2362  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2363  *  fastest available method
2364  *
2365  *  @hw: pointer to hardware structure
2366  *  @offset: offset of  word in EEPROM to read
2367  *  @words: number of words
2368  *  @data: word(s) read from the EEPROM
2369  *
2370  *  Retrieves 16 bit word(s) read from EEPROM
2371  **/
2372 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2373 					  u16 words, u16 *data)
2374 {
2375 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2376 	s32 ret_val = IXGBE_ERR_CONFIG;
2377 
2378 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2379 
2380 	/*
2381 	 * If EEPROM is detected and can be addressed using 14 bits,
2382 	 * use EERD otherwise use bit bang
2383 	 */
2384 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2385 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2386 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2387 							 data);
2388 	else
2389 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2390 								    words,
2391 								    data);
2392 
2393 	return ret_val;
2394 }
2395 
2396 /**
2397  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
2398  *  fastest available method
2399  *
2400  *  @hw: pointer to hardware structure
2401  *  @offset: offset of  word in the EEPROM to read
2402  *  @data: word read from the EEPROM
2403  *
2404  *  Reads a 16 bit word from the EEPROM
2405  **/
2406 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2407 				   u16 offset, u16 *data)
2408 {
2409 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2410 	s32 ret_val = IXGBE_ERR_CONFIG;
2411 
2412 	DEBUGFUNC("ixgbe_read_eeprom_82599");
2413 
2414 	/*
2415 	 * If EEPROM is detected and can be addressed using 14 bits,
2416 	 * use EERD otherwise use bit bang
2417 	 */
2418 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2419 	    (offset <= IXGBE_EERD_MAX_ADDR))
2420 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2421 	else
2422 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2423 
2424 	return ret_val;
2425 }
2426 
2427 /**
2428  * ixgbe_reset_pipeline_82599 - perform pipeline reset
2429  *
2430  *  @hw: pointer to hardware structure
2431  *
2432  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2433  * full pipeline reset
2434  **/
2435 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2436 {
2437 	s32 ret_val;
2438 	u32 anlp1_reg = 0;
2439 	u32 i, autoc_reg, autoc2_reg;
2440 
2441 	/* Enable link if disabled in NVM */
2442 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2443 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2444 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2445 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2446 		IXGBE_WRITE_FLUSH(hw);
2447 	}
2448 
2449 	autoc_reg = hw->mac.cached_autoc;
2450 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2451 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2452 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
2453 	/* Wait for AN to leave state 0 */
2454 	for (i = 0; i < 10; i++) {
2455 		msec_delay(4);
2456 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2457 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2458 			break;
2459 	}
2460 
2461 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2462 		DEBUGOUT("auto negotiation not completed\n");
2463 		ret_val = IXGBE_ERR_RESET_FAILED;
2464 		goto reset_pipeline_out;
2465 	}
2466 
2467 	ret_val = IXGBE_SUCCESS;
2468 
2469 reset_pipeline_out:
2470 	/* Write AUTOC register with original LMS field and Restart_AN */
2471 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2472 	IXGBE_WRITE_FLUSH(hw);
2473 
2474 	return ret_val;
2475 }
2476 
2477 
2478 
2479