xref: /freebsd/sys/dev/ixgbe/ixgbe_82599.c (revision ec0e626bafb335b30c499d06066997f54b10c092)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2014, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_82599.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 
41 #define IXGBE_82599_MAX_TX_QUEUES 128
42 #define IXGBE_82599_MAX_RX_QUEUES 128
43 #define IXGBE_82599_RAR_ENTRIES   128
44 #define IXGBE_82599_MC_TBL_SIZE   128
45 #define IXGBE_82599_VFT_TBL_SIZE  128
46 #define IXGBE_82599_RX_PB_SIZE	  512
47 
48 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
49 					 ixgbe_link_speed speed,
50 					 bool autoneg_wait_to_complete);
51 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
52 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
53 				   u16 offset, u16 *data);
54 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
55 					  u16 words, u16 *data);
56 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
57 					u8 dev_addr, u8 *data);
58 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
59 					u8 dev_addr, u8 data);
60 
61 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
62 {
63 	struct ixgbe_mac_info *mac = &hw->mac;
64 
65 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
66 
67 	/*
68 	 * enable the laser control functions for SFP+ fiber
69 	 * and MNG not enabled
70 	 */
71 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
72 	    !ixgbe_mng_enabled(hw)) {
73 		mac->ops.disable_tx_laser =
74 				       ixgbe_disable_tx_laser_multispeed_fiber;
75 		mac->ops.enable_tx_laser =
76 					ixgbe_enable_tx_laser_multispeed_fiber;
77 		mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
78 
79 	} else {
80 		mac->ops.disable_tx_laser = NULL;
81 		mac->ops.enable_tx_laser = NULL;
82 		mac->ops.flap_tx_laser = NULL;
83 	}
84 
85 	if (hw->phy.multispeed_fiber) {
86 		/* Set up dual speed SFP+ support */
87 		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
88 		mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
89 		mac->ops.set_rate_select_speed =
90 					       ixgbe_set_hard_rate_select_speed;
91 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
92 			mac->ops.set_rate_select_speed =
93 					       ixgbe_set_soft_rate_select_speed;
94 	} else {
95 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
96 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
97 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
98 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
99 			mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
100 		} else {
101 			mac->ops.setup_link = ixgbe_setup_mac_link_82599;
102 		}
103 	}
104 }
105 
106 /**
107  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
108  *  @hw: pointer to hardware structure
109  *
110  *  Initialize any function pointers that were not able to be
111  *  set during init_shared_code because the PHY/SFP type was
112  *  not known.  Perform the SFP init if necessary.
113  *
114  **/
115 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
116 {
117 	struct ixgbe_mac_info *mac = &hw->mac;
118 	struct ixgbe_phy_info *phy = &hw->phy;
119 	s32 ret_val = IXGBE_SUCCESS;
120 	u32 esdp;
121 
122 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
123 
124 	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
125 		/* Store flag indicating I2C bus access control unit. */
126 		hw->phy.qsfp_shared_i2c_bus = TRUE;
127 
128 		/* Initialize access to QSFP+ I2C bus */
129 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
130 		esdp |= IXGBE_ESDP_SDP0_DIR;
131 		esdp &= ~IXGBE_ESDP_SDP1_DIR;
132 		esdp &= ~IXGBE_ESDP_SDP0;
133 		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
134 		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
135 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
136 		IXGBE_WRITE_FLUSH(hw);
137 
138 		phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
139 		phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
140 	}
141 	/* Identify the PHY or SFP module */
142 	ret_val = phy->ops.identify(hw);
143 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
144 		goto init_phy_ops_out;
145 
146 	/* Setup function pointers based on detected SFP module and speeds */
147 	ixgbe_init_mac_link_ops_82599(hw);
148 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
149 		hw->phy.ops.reset = NULL;
150 
151 	/* If copper media, overwrite with copper function pointers */
152 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
153 		mac->ops.setup_link = ixgbe_setup_copper_link_82599;
154 		mac->ops.get_link_capabilities =
155 				  ixgbe_get_copper_link_capabilities_generic;
156 	}
157 
158 	/* Set necessary function pointers based on PHY type */
159 	switch (hw->phy.type) {
160 	case ixgbe_phy_tn:
161 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
162 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
163 		phy->ops.get_firmware_version =
164 			     ixgbe_get_phy_firmware_version_tnx;
165 		break;
166 	default:
167 		break;
168 	}
169 init_phy_ops_out:
170 	return ret_val;
171 }
172 
173 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
174 {
175 	s32 ret_val = IXGBE_SUCCESS;
176 	u16 list_offset, data_offset, data_value;
177 
178 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
179 
180 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
181 		ixgbe_init_mac_link_ops_82599(hw);
182 
183 		hw->phy.ops.reset = NULL;
184 
185 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
186 							      &data_offset);
187 		if (ret_val != IXGBE_SUCCESS)
188 			goto setup_sfp_out;
189 
190 		/* PHY config will finish before releasing the semaphore */
191 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
192 							IXGBE_GSSR_MAC_CSR_SM);
193 		if (ret_val != IXGBE_SUCCESS) {
194 			ret_val = IXGBE_ERR_SWFW_SYNC;
195 			goto setup_sfp_out;
196 		}
197 
198 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
199 			goto setup_sfp_err;
200 		while (data_value != 0xffff) {
201 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
202 			IXGBE_WRITE_FLUSH(hw);
203 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
204 				goto setup_sfp_err;
205 		}
206 
207 		/* Release the semaphore */
208 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
209 		/* Delay obtaining semaphore again to allow FW access
210 		 * prot_autoc_write uses the semaphore too.
211 		 */
212 		msec_delay(hw->eeprom.semaphore_delay);
213 
214 		/* Restart DSP and set SFI mode */
215 		ret_val = hw->mac.ops.prot_autoc_write(hw,
216 			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
217 			FALSE);
218 
219 		if (ret_val) {
220 			DEBUGOUT("sfp module setup not complete\n");
221 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
222 			goto setup_sfp_out;
223 		}
224 
225 	}
226 
227 setup_sfp_out:
228 	return ret_val;
229 
230 setup_sfp_err:
231 	/* Release the semaphore */
232 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
233 	/* Delay obtaining semaphore again to allow FW access */
234 	msec_delay(hw->eeprom.semaphore_delay);
235 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
236 		      "eeprom read at offset %d failed", data_offset);
237 	return IXGBE_ERR_PHY;
238 }
239 
240 /**
241  *  prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
242  *  @hw: pointer to hardware structure
243  *  @locked: Return the if we locked for this read.
244  *  @reg_val: Value we read from AUTOC
245  *
246  *  For this part (82599) we need to wrap read-modify-writes with a possible
247  *  FW/SW lock.  It is assumed this lock will be freed with the next
248  *  prot_autoc_write_82599().
249  */
250 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
251 {
252 	s32 ret_val;
253 
254 	*locked = FALSE;
255 	 /* If LESM is on then we need to hold the SW/FW semaphore. */
256 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
257 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
258 					IXGBE_GSSR_MAC_CSR_SM);
259 		if (ret_val != IXGBE_SUCCESS)
260 			return IXGBE_ERR_SWFW_SYNC;
261 
262 		*locked = TRUE;
263 	}
264 
265 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
266 	return IXGBE_SUCCESS;
267 }
268 
269 /**
270  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
271  * @hw: pointer to hardware structure
272  * @reg_val: value to write to AUTOC
273  * @locked: bool to indicate whether the SW/FW lock was already taken by
274  *           previous proc_autoc_read_82599.
275  *
276  * This part (82599) may need to hold the SW/FW lock around all writes to
277  * AUTOC. Likewise after a write we need to do a pipeline reset.
278  */
279 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
280 {
281 	s32 ret_val = IXGBE_SUCCESS;
282 
283 	/* Blocked by MNG FW so bail */
284 	if (ixgbe_check_reset_blocked(hw))
285 		goto out;
286 
287 	/* We only need to get the lock if:
288 	 *  - We didn't do it already (in the read part of a read-modify-write)
289 	 *  - LESM is enabled.
290 	 */
291 	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
292 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
293 					IXGBE_GSSR_MAC_CSR_SM);
294 		if (ret_val != IXGBE_SUCCESS)
295 			return IXGBE_ERR_SWFW_SYNC;
296 
297 		locked = TRUE;
298 	}
299 
300 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
301 	ret_val = ixgbe_reset_pipeline_82599(hw);
302 
303 out:
304 	/* Free the SW/FW semaphore as we either grabbed it here or
305 	 * already had it when this function was called.
306 	 */
307 	if (locked)
308 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
309 
310 	return ret_val;
311 }
312 
313 /**
314  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
315  *  @hw: pointer to hardware structure
316  *
317  *  Initialize the function pointers and assign the MAC type for 82599.
318  *  Does not touch the hardware.
319  **/
320 
321 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
322 {
323 	struct ixgbe_mac_info *mac = &hw->mac;
324 	struct ixgbe_phy_info *phy = &hw->phy;
325 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
326 	s32 ret_val;
327 
328 	DEBUGFUNC("ixgbe_init_ops_82599");
329 
330 	ixgbe_init_phy_ops_generic(hw);
331 	ret_val = ixgbe_init_ops_generic(hw);
332 
333 	/* PHY */
334 	phy->ops.identify = ixgbe_identify_phy_82599;
335 	phy->ops.init = ixgbe_init_phy_ops_82599;
336 
337 	/* MAC */
338 	mac->ops.reset_hw = ixgbe_reset_hw_82599;
339 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
340 	mac->ops.get_media_type = ixgbe_get_media_type_82599;
341 	mac->ops.get_supported_physical_layer =
342 				    ixgbe_get_supported_physical_layer_82599;
343 	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
344 	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
345 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
346 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
347 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
348 	mac->ops.start_hw = ixgbe_start_hw_82599;
349 	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
350 	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
351 	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
352 	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
353 	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
354 	mac->ops.prot_autoc_read = prot_autoc_read_82599;
355 	mac->ops.prot_autoc_write = prot_autoc_write_82599;
356 
357 	/* RAR, Multicast, VLAN */
358 	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
359 	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
360 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
361 	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
362 	mac->rar_highwater = 1;
363 	mac->ops.set_vfta = ixgbe_set_vfta_generic;
364 	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
365 	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
366 	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
367 	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
368 	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
369 	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
370 
371 	/* Link */
372 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
373 	mac->ops.check_link = ixgbe_check_mac_link_generic;
374 	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
375 	ixgbe_init_mac_link_ops_82599(hw);
376 
377 	mac->mcft_size		= IXGBE_82599_MC_TBL_SIZE;
378 	mac->vft_size		= IXGBE_82599_VFT_TBL_SIZE;
379 	mac->num_rar_entries	= IXGBE_82599_RAR_ENTRIES;
380 	mac->rx_pb_size		= IXGBE_82599_RX_PB_SIZE;
381 	mac->max_rx_queues	= IXGBE_82599_MAX_RX_QUEUES;
382 	mac->max_tx_queues	= IXGBE_82599_MAX_TX_QUEUES;
383 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
384 
385 	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
386 				   IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
387 
388 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
389 
390 	/* EEPROM */
391 	eeprom->ops.read = ixgbe_read_eeprom_82599;
392 	eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
393 
394 	/* Manageability interface */
395 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
396 
397 
398 	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
399 
400 	return ret_val;
401 }
402 
403 /**
404  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
405  *  @hw: pointer to hardware structure
406  *  @speed: pointer to link speed
407  *  @autoneg: TRUE when autoneg or autotry is enabled
408  *
409  *  Determines the link capabilities by reading the AUTOC register.
410  **/
411 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
412 				      ixgbe_link_speed *speed,
413 				      bool *autoneg)
414 {
415 	s32 status = IXGBE_SUCCESS;
416 	u32 autoc = 0;
417 
418 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
419 
420 
421 	/* Check if 1G SFP module. */
422 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
423 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
424 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
425 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
426 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
427 		*autoneg = TRUE;
428 		goto out;
429 	}
430 
431 	/*
432 	 * Determine link capabilities based on the stored value of AUTOC,
433 	 * which represents EEPROM defaults.  If AUTOC value has not
434 	 * been stored, use the current register values.
435 	 */
436 	if (hw->mac.orig_link_settings_stored)
437 		autoc = hw->mac.orig_autoc;
438 	else
439 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
440 
441 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
442 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
443 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
444 		*autoneg = FALSE;
445 		break;
446 
447 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
448 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
449 		*autoneg = FALSE;
450 		break;
451 
452 	case IXGBE_AUTOC_LMS_1G_AN:
453 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
454 		*autoneg = TRUE;
455 		break;
456 
457 	case IXGBE_AUTOC_LMS_10G_SERIAL:
458 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
459 		*autoneg = FALSE;
460 		break;
461 
462 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
463 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
464 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
465 		if (autoc & IXGBE_AUTOC_KR_SUPP)
466 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
467 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
468 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
469 		if (autoc & IXGBE_AUTOC_KX_SUPP)
470 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
471 		*autoneg = TRUE;
472 		break;
473 
474 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
475 		*speed = IXGBE_LINK_SPEED_100_FULL;
476 		if (autoc & IXGBE_AUTOC_KR_SUPP)
477 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
478 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
479 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
480 		if (autoc & IXGBE_AUTOC_KX_SUPP)
481 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
482 		*autoneg = TRUE;
483 		break;
484 
485 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
486 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
487 		*autoneg = FALSE;
488 		break;
489 
490 	default:
491 		status = IXGBE_ERR_LINK_SETUP;
492 		goto out;
493 		break;
494 	}
495 
496 	if (hw->phy.multispeed_fiber) {
497 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
498 			  IXGBE_LINK_SPEED_1GB_FULL;
499 
500 		/* QSFP must not enable full auto-negotiation
501 		 * Limited autoneg is enabled at 1G
502 		 */
503 		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
504 			*autoneg = FALSE;
505 		else
506 			*autoneg = TRUE;
507 	}
508 
509 out:
510 	return status;
511 }
512 
513 /**
514  *  ixgbe_get_media_type_82599 - Get media type
515  *  @hw: pointer to hardware structure
516  *
517  *  Returns the media type (fiber, copper, backplane)
518  **/
519 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
520 {
521 	enum ixgbe_media_type media_type;
522 
523 	DEBUGFUNC("ixgbe_get_media_type_82599");
524 
525 	/* Detect if there is a copper PHY attached. */
526 	switch (hw->phy.type) {
527 	case ixgbe_phy_cu_unknown:
528 	case ixgbe_phy_tn:
529 		media_type = ixgbe_media_type_copper;
530 		goto out;
531 	default:
532 		break;
533 	}
534 
535 	switch (hw->device_id) {
536 	case IXGBE_DEV_ID_82599_KX4:
537 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
538 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
539 	case IXGBE_DEV_ID_82599_KR:
540 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
541 	case IXGBE_DEV_ID_82599_XAUI_LOM:
542 		/* Default device ID is mezzanine card KX/KX4 */
543 		media_type = ixgbe_media_type_backplane;
544 		break;
545 	case IXGBE_DEV_ID_82599_SFP:
546 	case IXGBE_DEV_ID_82599_SFP_FCOE:
547 	case IXGBE_DEV_ID_82599_SFP_EM:
548 	case IXGBE_DEV_ID_82599_SFP_SF2:
549 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
550 	case IXGBE_DEV_ID_82599EN_SFP:
551 		media_type = ixgbe_media_type_fiber;
552 		break;
553 	case IXGBE_DEV_ID_82599_CX4:
554 		media_type = ixgbe_media_type_cx4;
555 		break;
556 	case IXGBE_DEV_ID_82599_T3_LOM:
557 		media_type = ixgbe_media_type_copper;
558 		break;
559 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
560 		media_type = ixgbe_media_type_fiber_qsfp;
561 		break;
562 	case IXGBE_DEV_ID_82599_BYPASS:
563 		media_type = ixgbe_media_type_fiber_fixed;
564 		hw->phy.multispeed_fiber = TRUE;
565 		break;
566 	default:
567 		media_type = ixgbe_media_type_unknown;
568 		break;
569 	}
570 out:
571 	return media_type;
572 }
573 
574 /**
575  *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
576  *  @hw: pointer to hardware structure
577  *
578  *  Disables link during D3 power down sequence.
579  *
580  **/
581 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
582 {
583 	u32 autoc2_reg;
584 	u16 ee_ctrl_2 = 0;
585 
586 	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
587 	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
588 
589 	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
590 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
591 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
592 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
593 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
594 	}
595 }
596 
597 /**
598  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
599  *  @hw: pointer to hardware structure
600  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
601  *
602  *  Configures link settings based on values in the ixgbe_hw struct.
603  *  Restarts the link.  Performs autonegotiation if needed.
604  **/
605 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
606 			       bool autoneg_wait_to_complete)
607 {
608 	u32 autoc_reg;
609 	u32 links_reg;
610 	u32 i;
611 	s32 status = IXGBE_SUCCESS;
612 	bool got_lock = FALSE;
613 
614 	DEBUGFUNC("ixgbe_start_mac_link_82599");
615 
616 
617 	/*  reset_pipeline requires us to hold this lock as it writes to
618 	 *  AUTOC.
619 	 */
620 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
621 		status = hw->mac.ops.acquire_swfw_sync(hw,
622 						       IXGBE_GSSR_MAC_CSR_SM);
623 		if (status != IXGBE_SUCCESS)
624 			goto out;
625 
626 		got_lock = TRUE;
627 	}
628 
629 	/* Restart link */
630 	ixgbe_reset_pipeline_82599(hw);
631 
632 	if (got_lock)
633 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
634 
635 	/* Only poll for autoneg to complete if specified to do so */
636 	if (autoneg_wait_to_complete) {
637 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
638 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
639 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
640 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
641 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
642 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
643 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
644 			links_reg = 0; /* Just in case Autoneg time = 0 */
645 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
646 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
647 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
648 					break;
649 				msec_delay(100);
650 			}
651 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
652 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
653 				DEBUGOUT("Autoneg did not complete.\n");
654 			}
655 		}
656 	}
657 
658 	/* Add delay to filter out noises during initial link setup */
659 	msec_delay(50);
660 
661 out:
662 	return status;
663 }
664 
665 /**
666  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
667  *  @hw: pointer to hardware structure
668  *
669  *  The base drivers may require better control over SFP+ module
670  *  PHY states.  This includes selectively shutting down the Tx
671  *  laser on the PHY, effectively halting physical link.
672  **/
673 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
674 {
675 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
676 
677 	/* Blocked by MNG FW so bail */
678 	if (ixgbe_check_reset_blocked(hw))
679 		return;
680 
681 	/* Disable Tx laser; allow 100us to go dark per spec */
682 	esdp_reg |= IXGBE_ESDP_SDP3;
683 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
684 	IXGBE_WRITE_FLUSH(hw);
685 	usec_delay(100);
686 }
687 
688 /**
689  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
690  *  @hw: pointer to hardware structure
691  *
692  *  The base drivers may require better control over SFP+ module
693  *  PHY states.  This includes selectively turning on the Tx
694  *  laser on the PHY, effectively starting physical link.
695  **/
696 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
697 {
698 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
699 
700 	/* Enable Tx laser; allow 100ms to light up */
701 	esdp_reg &= ~IXGBE_ESDP_SDP3;
702 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
703 	IXGBE_WRITE_FLUSH(hw);
704 	msec_delay(100);
705 }
706 
707 /**
708  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
709  *  @hw: pointer to hardware structure
710  *
711  *  When the driver changes the link speeds that it can support,
712  *  it sets autotry_restart to TRUE to indicate that we need to
713  *  initiate a new autotry session with the link partner.  To do
714  *  so, we set the speed then disable and re-enable the Tx laser, to
715  *  alert the link partner that it also needs to restart autotry on its
716  *  end.  This is consistent with TRUE clause 37 autoneg, which also
717  *  involves a loss of signal.
718  **/
719 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
720 {
721 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
722 
723 	/* Blocked by MNG FW so bail */
724 	if (ixgbe_check_reset_blocked(hw))
725 		return;
726 
727 	if (hw->mac.autotry_restart) {
728 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
729 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
730 		hw->mac.autotry_restart = FALSE;
731 	}
732 }
733 
734 /**
735  *  ixgbe_set_hard_rate_select_speed - Set module link speed
736  *  @hw: pointer to hardware structure
737  *  @speed: link speed to set
738  *
739  *  Set module link speed via RS0/RS1 rate select pins.
740  */
741 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
742 					ixgbe_link_speed speed)
743 {
744 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
745 
746 	switch (speed) {
747 	case IXGBE_LINK_SPEED_10GB_FULL:
748 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
749 		break;
750 	case IXGBE_LINK_SPEED_1GB_FULL:
751 		esdp_reg &= ~IXGBE_ESDP_SDP5;
752 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
753 		break;
754 	default:
755 		DEBUGOUT("Invalid fixed module speed\n");
756 		return;
757 	}
758 
759 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
760 	IXGBE_WRITE_FLUSH(hw);
761 }
762 
763 /**
764  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
765  *  @hw: pointer to hardware structure
766  *  @speed: new link speed
767  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
768  *
769  *  Implements the Intel SmartSpeed algorithm.
770  **/
771 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
772 				    ixgbe_link_speed speed,
773 				    bool autoneg_wait_to_complete)
774 {
775 	s32 status = IXGBE_SUCCESS;
776 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
777 	s32 i, j;
778 	bool link_up = FALSE;
779 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
780 
781 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
782 
783 	 /* Set autoneg_advertised value based on input link speed */
784 	hw->phy.autoneg_advertised = 0;
785 
786 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
787 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
788 
789 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
790 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
791 
792 	if (speed & IXGBE_LINK_SPEED_100_FULL)
793 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
794 
795 	/*
796 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
797 	 * autoneg advertisement if link is unable to be established at the
798 	 * highest negotiated rate.  This can sometimes happen due to integrity
799 	 * issues with the physical media connection.
800 	 */
801 
802 	/* First, try to get link with full advertisement */
803 	hw->phy.smart_speed_active = FALSE;
804 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
805 		status = ixgbe_setup_mac_link_82599(hw, speed,
806 						    autoneg_wait_to_complete);
807 		if (status != IXGBE_SUCCESS)
808 			goto out;
809 
810 		/*
811 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
812 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
813 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
814 		 * Table 9 in the AN MAS.
815 		 */
816 		for (i = 0; i < 5; i++) {
817 			msec_delay(100);
818 
819 			/* If we have link, just jump out */
820 			status = ixgbe_check_link(hw, &link_speed, &link_up,
821 						  FALSE);
822 			if (status != IXGBE_SUCCESS)
823 				goto out;
824 
825 			if (link_up)
826 				goto out;
827 		}
828 	}
829 
830 	/*
831 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
832 	 * (or BX4/BX), then disable KR and try again.
833 	 */
834 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
835 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
836 		goto out;
837 
838 	/* Turn SmartSpeed on to disable KR support */
839 	hw->phy.smart_speed_active = TRUE;
840 	status = ixgbe_setup_mac_link_82599(hw, speed,
841 					    autoneg_wait_to_complete);
842 	if (status != IXGBE_SUCCESS)
843 		goto out;
844 
845 	/*
846 	 * Wait for the controller to acquire link.  600ms will allow for
847 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
848 	 * parallel detect, both 10g and 1g. This allows for the maximum
849 	 * connect attempts as defined in the AN MAS table 73-7.
850 	 */
851 	for (i = 0; i < 6; i++) {
852 		msec_delay(100);
853 
854 		/* If we have link, just jump out */
855 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
856 		if (status != IXGBE_SUCCESS)
857 			goto out;
858 
859 		if (link_up)
860 			goto out;
861 	}
862 
863 	/* We didn't get link.  Turn SmartSpeed back off. */
864 	hw->phy.smart_speed_active = FALSE;
865 	status = ixgbe_setup_mac_link_82599(hw, speed,
866 					    autoneg_wait_to_complete);
867 
868 out:
869 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
870 		DEBUGOUT("Smartspeed has downgraded the link speed "
871 		"from the maximum advertised\n");
872 	return status;
873 }
874 
875 /**
876  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
877  *  @hw: pointer to hardware structure
878  *  @speed: new link speed
879  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
880  *
881  *  Set the link speed in the AUTOC register and restarts link.
882  **/
883 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
884 			       ixgbe_link_speed speed,
885 			       bool autoneg_wait_to_complete)
886 {
887 	bool autoneg = FALSE;
888 	s32 status = IXGBE_SUCCESS;
889 	u32 pma_pmd_1g, link_mode;
890 	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
891 	u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
892 	u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
893 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
894 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
895 	u32 links_reg;
896 	u32 i;
897 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
898 
899 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
900 
901 	/* Check to see if speed passed in is supported. */
902 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
903 	if (status)
904 		goto out;
905 
906 	speed &= link_capabilities;
907 
908 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
909 		status = IXGBE_ERR_LINK_SETUP;
910 		goto out;
911 	}
912 
913 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
914 	if (hw->mac.orig_link_settings_stored)
915 		orig_autoc = hw->mac.orig_autoc;
916 	else
917 		orig_autoc = autoc;
918 
919 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
920 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
921 
922 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
923 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
924 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
925 		/* Set KX4/KX/KR support according to speed requested */
926 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
927 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
928 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
929 				autoc |= IXGBE_AUTOC_KX4_SUPP;
930 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
931 			    (hw->phy.smart_speed_active == FALSE))
932 				autoc |= IXGBE_AUTOC_KR_SUPP;
933 		}
934 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
935 			autoc |= IXGBE_AUTOC_KX_SUPP;
936 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
937 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
938 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
939 		/* Switch from 1G SFI to 10G SFI if requested */
940 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
941 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
942 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
943 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
944 		}
945 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
946 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
947 		/* Switch from 10G SFI to 1G SFI if requested */
948 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
949 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
950 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
951 			if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
952 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
953 			else
954 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
955 		}
956 	}
957 
958 	if (autoc != current_autoc) {
959 		/* Restart link */
960 		status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
961 		if (status != IXGBE_SUCCESS)
962 			goto out;
963 
964 		/* Only poll for autoneg to complete if specified to do so */
965 		if (autoneg_wait_to_complete) {
966 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
967 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
968 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
969 				links_reg = 0; /*Just in case Autoneg time=0*/
970 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
971 					links_reg =
972 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
973 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
974 						break;
975 					msec_delay(100);
976 				}
977 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
978 					status =
979 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
980 					DEBUGOUT("Autoneg did not complete.\n");
981 				}
982 			}
983 		}
984 
985 		/* Add delay to filter out noises during initial link setup */
986 		msec_delay(50);
987 	}
988 
989 out:
990 	return status;
991 }
992 
993 /**
994  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
995  *  @hw: pointer to hardware structure
996  *  @speed: new link speed
997  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
998  *
999  *  Restarts link on PHY and MAC based on settings passed in.
1000  **/
1001 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1002 					 ixgbe_link_speed speed,
1003 					 bool autoneg_wait_to_complete)
1004 {
1005 	s32 status;
1006 
1007 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
1008 
1009 	/* Setup the PHY according to input speed */
1010 	status = hw->phy.ops.setup_link_speed(hw, speed,
1011 					      autoneg_wait_to_complete);
1012 	/* Set up MAC */
1013 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1014 
1015 	return status;
1016 }
1017 
1018 /**
1019  *  ixgbe_reset_hw_82599 - Perform hardware reset
1020  *  @hw: pointer to hardware structure
1021  *
1022  *  Resets the hardware by resetting the transmit and receive units, masks
1023  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1024  *  reset.
1025  **/
1026 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1027 {
1028 	ixgbe_link_speed link_speed;
1029 	s32 status;
1030 	u32 ctrl = 0;
1031 	u32 i, autoc, autoc2;
1032 	u32 curr_lms;
1033 	bool link_up = FALSE;
1034 
1035 	DEBUGFUNC("ixgbe_reset_hw_82599");
1036 
1037 	/* Call adapter stop to disable tx/rx and clear interrupts */
1038 	status = hw->mac.ops.stop_adapter(hw);
1039 	if (status != IXGBE_SUCCESS)
1040 		goto reset_hw_out;
1041 
1042 	/* flush pending Tx transactions */
1043 	ixgbe_clear_tx_pending(hw);
1044 
1045 	/* PHY ops must be identified and initialized prior to reset */
1046 
1047 	/* Identify PHY and related function pointers */
1048 	status = hw->phy.ops.init(hw);
1049 
1050 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1051 		goto reset_hw_out;
1052 
1053 	/* Setup SFP module if there is one present. */
1054 	if (hw->phy.sfp_setup_needed) {
1055 		status = hw->mac.ops.setup_sfp(hw);
1056 		hw->phy.sfp_setup_needed = FALSE;
1057 	}
1058 
1059 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1060 		goto reset_hw_out;
1061 
1062 	/* Reset PHY */
1063 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1064 		hw->phy.ops.reset(hw);
1065 
1066 	/* remember AUTOC from before we reset */
1067 	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1068 
1069 mac_reset_top:
1070 	/*
1071 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
1072 	 * If link reset is used when link is up, it might reset the PHY when
1073 	 * mng is using it.  If link is down or the flag to force full link
1074 	 * reset is set, then perform link reset.
1075 	 */
1076 	ctrl = IXGBE_CTRL_LNK_RST;
1077 	if (!hw->force_full_reset) {
1078 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1079 		if (link_up)
1080 			ctrl = IXGBE_CTRL_RST;
1081 	}
1082 
1083 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1084 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1085 	IXGBE_WRITE_FLUSH(hw);
1086 
1087 	/* Poll for reset bit to self-clear meaning reset is complete */
1088 	for (i = 0; i < 10; i++) {
1089 		usec_delay(1);
1090 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1091 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
1092 			break;
1093 	}
1094 
1095 	if (ctrl & IXGBE_CTRL_RST_MASK) {
1096 		status = IXGBE_ERR_RESET_FAILED;
1097 		DEBUGOUT("Reset polling failed to complete.\n");
1098 	}
1099 
1100 	msec_delay(50);
1101 
1102 	/*
1103 	 * Double resets are required for recovery from certain error
1104 	 * conditions.  Between resets, it is necessary to stall to
1105 	 * allow time for any pending HW events to complete.
1106 	 */
1107 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1108 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1109 		goto mac_reset_top;
1110 	}
1111 
1112 	/*
1113 	 * Store the original AUTOC/AUTOC2 values if they have not been
1114 	 * stored off yet.  Otherwise restore the stored original
1115 	 * values since the reset operation sets back to defaults.
1116 	 */
1117 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1118 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1119 
1120 	/* Enable link if disabled in NVM */
1121 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1122 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1123 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1124 		IXGBE_WRITE_FLUSH(hw);
1125 	}
1126 
1127 	if (hw->mac.orig_link_settings_stored == FALSE) {
1128 		hw->mac.orig_autoc = autoc;
1129 		hw->mac.orig_autoc2 = autoc2;
1130 		hw->mac.orig_link_settings_stored = TRUE;
1131 	} else {
1132 
1133 		/* If MNG FW is running on a multi-speed device that
1134 		 * doesn't autoneg with out driver support we need to
1135 		 * leave LMS in the state it was before we MAC reset.
1136 		 * Likewise if we support WoL we don't want change the
1137 		 * LMS state.
1138 		 */
1139 		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1140 		    hw->wol_enabled)
1141 			hw->mac.orig_autoc =
1142 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1143 				curr_lms;
1144 
1145 		if (autoc != hw->mac.orig_autoc) {
1146 			status = hw->mac.ops.prot_autoc_write(hw,
1147 							hw->mac.orig_autoc,
1148 							FALSE);
1149 			if (status != IXGBE_SUCCESS)
1150 				goto reset_hw_out;
1151 		}
1152 
1153 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1154 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1155 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1156 			autoc2 |= (hw->mac.orig_autoc2 &
1157 				   IXGBE_AUTOC2_UPPER_MASK);
1158 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1159 		}
1160 	}
1161 
1162 	/* Store the permanent mac address */
1163 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1164 
1165 	/*
1166 	 * Store MAC address from RAR0, clear receive address registers, and
1167 	 * clear the multicast table.  Also reset num_rar_entries to 128,
1168 	 * since we modify this value when programming the SAN MAC address.
1169 	 */
1170 	hw->mac.num_rar_entries = 128;
1171 	hw->mac.ops.init_rx_addrs(hw);
1172 
1173 	/* Store the permanent SAN mac address */
1174 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1175 
1176 	/* Add the SAN MAC address to the RAR only if it's a valid address */
1177 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1178 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1179 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
1180 
1181 		/* Save the SAN MAC RAR index */
1182 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1183 
1184 		/* Reserve the last RAR for the SAN MAC address */
1185 		hw->mac.num_rar_entries--;
1186 	}
1187 
1188 	/* Store the alternative WWNN/WWPN prefix */
1189 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1190 				   &hw->mac.wwpn_prefix);
1191 
1192 reset_hw_out:
1193 	return status;
1194 }
1195 
1196 /**
1197  * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
1198  * @hw: pointer to hardware structure
1199  * @fdircmd: current value of FDIRCMD register
1200  */
1201 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
1202 {
1203 	int i;
1204 
1205 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1206 		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1207 		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1208 			return IXGBE_SUCCESS;
1209 		usec_delay(10);
1210 	}
1211 
1212 	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
1213 }
1214 
1215 /**
1216  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1217  *  @hw: pointer to hardware structure
1218  **/
1219 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1220 {
1221 	s32 err;
1222 	int i;
1223 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1224 	u32 fdircmd;
1225 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1226 
1227 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1228 
1229 	/*
1230 	 * Before starting reinitialization process,
1231 	 * FDIRCMD.CMD must be zero.
1232 	 */
1233 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1234 	if (err) {
1235 		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
1236 		return err;
1237 	}
1238 
1239 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1240 	IXGBE_WRITE_FLUSH(hw);
1241 	/*
1242 	 * 82599 adapters flow director init flow cannot be restarted,
1243 	 * Workaround 82599 silicon errata by performing the following steps
1244 	 * before re-writing the FDIRCTRL control register with the same value.
1245 	 * - write 1 to bit 8 of FDIRCMD register &
1246 	 * - write 0 to bit 8 of FDIRCMD register
1247 	 */
1248 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1249 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1250 			 IXGBE_FDIRCMD_CLEARHT));
1251 	IXGBE_WRITE_FLUSH(hw);
1252 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1253 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1254 			 ~IXGBE_FDIRCMD_CLEARHT));
1255 	IXGBE_WRITE_FLUSH(hw);
1256 	/*
1257 	 * Clear FDIR Hash register to clear any leftover hashes
1258 	 * waiting to be programmed.
1259 	 */
1260 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1261 	IXGBE_WRITE_FLUSH(hw);
1262 
1263 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1264 	IXGBE_WRITE_FLUSH(hw);
1265 
1266 	/* Poll init-done after we write FDIRCTRL register */
1267 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1268 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1269 				   IXGBE_FDIRCTRL_INIT_DONE)
1270 			break;
1271 		msec_delay(1);
1272 	}
1273 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1274 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1275 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1276 	}
1277 
1278 	/* Clear FDIR statistics registers (read to clear) */
1279 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1280 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1281 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1282 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1283 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1284 
1285 	return IXGBE_SUCCESS;
1286 }
1287 
1288 /**
1289  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1290  *  @hw: pointer to hardware structure
1291  *  @fdirctrl: value to write to flow director control register
1292  **/
1293 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1294 {
1295 	int i;
1296 
1297 	DEBUGFUNC("ixgbe_fdir_enable_82599");
1298 
1299 	/* Prime the keys for hashing */
1300 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1301 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1302 
1303 	/*
1304 	 * Poll init-done after we write the register.  Estimated times:
1305 	 *      10G: PBALLOC = 11b, timing is 60us
1306 	 *       1G: PBALLOC = 11b, timing is 600us
1307 	 *     100M: PBALLOC = 11b, timing is 6ms
1308 	 *
1309 	 *     Multiple these timings by 4 if under full Rx load
1310 	 *
1311 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1312 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1313 	 * this might not finish in our poll time, but we can live with that
1314 	 * for now.
1315 	 */
1316 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1317 	IXGBE_WRITE_FLUSH(hw);
1318 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1319 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1320 				   IXGBE_FDIRCTRL_INIT_DONE)
1321 			break;
1322 		msec_delay(1);
1323 	}
1324 
1325 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1326 		DEBUGOUT("Flow Director poll time exceeded!\n");
1327 }
1328 
1329 /**
1330  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1331  *  @hw: pointer to hardware structure
1332  *  @fdirctrl: value to write to flow director control register, initially
1333  *	     contains just the value of the Rx packet buffer allocation
1334  **/
1335 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1336 {
1337 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1338 
1339 	/*
1340 	 * Continue setup of fdirctrl register bits:
1341 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1342 	 *  Set the maximum length per hash bucket to 0xA filters
1343 	 *  Send interrupt when 64 filters are left
1344 	 */
1345 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1346 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1347 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1348 
1349 	/* write hashes and fdirctrl register, poll for completion */
1350 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1351 
1352 	return IXGBE_SUCCESS;
1353 }
1354 
1355 /**
1356  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1357  *  @hw: pointer to hardware structure
1358  *  @fdirctrl: value to write to flow director control register, initially
1359  *	     contains just the value of the Rx packet buffer allocation
1360  *  @cloud_mode: TRUE - cloud mode, FALSE - other mode
1361  **/
1362 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
1363 			bool cloud_mode)
1364 {
1365 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1366 
1367 	/*
1368 	 * Continue setup of fdirctrl register bits:
1369 	 *  Turn perfect match filtering on
1370 	 *  Report hash in RSS field of Rx wb descriptor
1371 	 *  Initialize the drop queue
1372 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1373 	 *  Set the maximum length per hash bucket to 0xA filters
1374 	 *  Send interrupt when 64 (0x4 * 16) filters are left
1375 	 */
1376 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1377 		    IXGBE_FDIRCTRL_REPORT_STATUS |
1378 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1379 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1380 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1381 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1382 
1383 	if (cloud_mode)
1384 		fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
1385 					IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
1386 
1387 	/* write hashes and fdirctrl register, poll for completion */
1388 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1389 
1390 	return IXGBE_SUCCESS;
1391 }
1392 
1393 /*
1394  * These defines allow us to quickly generate all of the necessary instructions
1395  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1396  * for values 0 through 15
1397  */
1398 #define IXGBE_ATR_COMMON_HASH_KEY \
1399 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1400 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1401 do { \
1402 	u32 n = (_n); \
1403 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1404 		common_hash ^= lo_hash_dword >> n; \
1405 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1406 		bucket_hash ^= lo_hash_dword >> n; \
1407 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1408 		sig_hash ^= lo_hash_dword << (16 - n); \
1409 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1410 		common_hash ^= hi_hash_dword >> n; \
1411 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1412 		bucket_hash ^= hi_hash_dword >> n; \
1413 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1414 		sig_hash ^= hi_hash_dword << (16 - n); \
1415 } while (0)
1416 
1417 /**
1418  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1419  *  @stream: input bitstream to compute the hash on
1420  *
1421  *  This function is almost identical to the function above but contains
1422  *  several optimizations such as unwinding all of the loops, letting the
1423  *  compiler work out all of the conditional ifs since the keys are static
1424  *  defines, and computing two keys at once since the hashed dword stream
1425  *  will be the same for both keys.
1426  **/
1427 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1428 				     union ixgbe_atr_hash_dword common)
1429 {
1430 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1431 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1432 
1433 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1434 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
1435 
1436 	/* generate common hash dword */
1437 	hi_hash_dword = IXGBE_NTOHL(common.dword);
1438 
1439 	/* low dword is word swapped version of common */
1440 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1441 
1442 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1443 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1444 
1445 	/* Process bits 0 and 16 */
1446 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1447 
1448 	/*
1449 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1450 	 * delay this because bit 0 of the stream should not be processed
1451 	 * so we do not add the VLAN until after bit 0 was processed
1452 	 */
1453 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1454 
1455 	/* Process remaining 30 bit of the key */
1456 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1457 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1458 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1459 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1460 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1461 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1462 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1463 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1464 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1465 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1466 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1467 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1468 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1469 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1470 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1471 
1472 	/* combine common_hash result with signature and bucket hashes */
1473 	bucket_hash ^= common_hash;
1474 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1475 
1476 	sig_hash ^= common_hash << 16;
1477 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1478 
1479 	/* return completed signature hash */
1480 	return sig_hash ^ bucket_hash;
1481 }
1482 
1483 /**
1484  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1485  *  @hw: pointer to hardware structure
1486  *  @input: unique input dword
1487  *  @common: compressed common input dword
1488  *  @queue: queue index to direct traffic to
1489  *
1490  * Note that the tunnel bit in input must not be set when the hardware
1491  * tunneling support does not exist.
1492  **/
1493 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1494 					  union ixgbe_atr_hash_dword input,
1495 					  union ixgbe_atr_hash_dword common,
1496 					  u8 queue)
1497 {
1498 	u64 fdirhashcmd;
1499 	u8 flow_type;
1500 	bool tunnel;
1501 	u32 fdircmd;
1502 	s32 err;
1503 
1504 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1505 
1506 	/*
1507 	 * Get the flow_type in order to program FDIRCMD properly
1508 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1509 	 * fifth is FDIRCMD.TUNNEL_FILTER
1510 	 */
1511 	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
1512 	flow_type = input.formatted.flow_type &
1513 		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
1514 	switch (flow_type) {
1515 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1516 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1517 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1518 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
1519 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
1520 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1521 		break;
1522 	default:
1523 		DEBUGOUT(" Error on flow type input\n");
1524 		return IXGBE_ERR_CONFIG;
1525 	}
1526 
1527 	/* configure FDIRCMD register */
1528 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1529 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1530 	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1531 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1532 	if (tunnel)
1533 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1534 
1535 	/*
1536 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1537 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1538 	 */
1539 	fdirhashcmd = (u64)fdircmd << 32;
1540 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1541 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1542 
1543 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1544 	if (err) {
1545 		DEBUGOUT("Flow Director command did not complete!\n");
1546 		return err;
1547 	}
1548 
1549 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1550 
1551 	return IXGBE_SUCCESS;
1552 }
1553 
1554 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1555 do { \
1556 	u32 n = (_n); \
1557 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1558 		bucket_hash ^= lo_hash_dword >> n; \
1559 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1560 		bucket_hash ^= hi_hash_dword >> n; \
1561 } while (0)
1562 
1563 /**
1564  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1565  *  @atr_input: input bitstream to compute the hash on
1566  *  @input_mask: mask for the input bitstream
1567  *
1568  *  This function serves two main purposes.  First it applies the input_mask
1569  *  to the atr_input resulting in a cleaned up atr_input data stream.
1570  *  Secondly it computes the hash and stores it in the bkt_hash field at
1571  *  the end of the input byte stream.  This way it will be available for
1572  *  future use without needing to recompute the hash.
1573  **/
1574 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1575 					  union ixgbe_atr_input *input_mask)
1576 {
1577 
1578 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1579 	u32 bucket_hash = 0;
1580 	u32 hi_dword = 0;
1581 	u32 i = 0;
1582 
1583 	/* Apply masks to input data */
1584 	for (i = 0; i < 14; i++)
1585 		input->dword_stream[i]  &= input_mask->dword_stream[i];
1586 
1587 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1588 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1589 
1590 	/* generate common hash dword */
1591 	for (i = 1; i <= 13; i++)
1592 		hi_dword ^= input->dword_stream[i];
1593 	hi_hash_dword = IXGBE_NTOHL(hi_dword);
1594 
1595 	/* low dword is word swapped version of common */
1596 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1597 
1598 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1599 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1600 
1601 	/* Process bits 0 and 16 */
1602 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1603 
1604 	/*
1605 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1606 	 * delay this because bit 0 of the stream should not be processed
1607 	 * so we do not add the VLAN until after bit 0 was processed
1608 	 */
1609 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1610 
1611 	/* Process remaining 30 bit of the key */
1612 	for (i = 1; i <= 15; i++)
1613 		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1614 
1615 	/*
1616 	 * Limit hash to 13 bits since max bucket count is 8K.
1617 	 * Store result at the end of the input stream.
1618 	 */
1619 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1620 }
1621 
1622 /**
1623  *  ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
1624  *  @input_mask: mask to be bit swapped
1625  *
1626  *  The source and destination port masks for flow director are bit swapped
1627  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
1628  *  generate a correctly swapped value we need to bit swap the mask and that
1629  *  is what is accomplished by this function.
1630  **/
1631 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1632 {
1633 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1634 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1635 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1636 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1637 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1638 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1639 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1640 }
1641 
1642 /*
1643  * These two macros are meant to address the fact that we have registers
1644  * that are either all or in part big-endian.  As a result on big-endian
1645  * systems we will end up byte swapping the value to little-endian before
1646  * it is byte swapped again and written to the hardware in the original
1647  * big-endian format.
1648  */
1649 #define IXGBE_STORE_AS_BE32(_value) \
1650 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1651 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1652 
1653 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1654 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1655 
1656 #define IXGBE_STORE_AS_BE16(_value) \
1657 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1658 
1659 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1660 				    union ixgbe_atr_input *input_mask, bool cloud_mode)
1661 {
1662 	/* mask IPv6 since it is currently not supported */
1663 	u32 fdirm = IXGBE_FDIRM_DIPv6;
1664 	u32 fdirtcpm;
1665 	u32 fdirip6m;
1666 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1667 
1668 	/*
1669 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1670 	 * are zero, then assume a full mask for that field.  Also assume that
1671 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1672 	 * cannot be masked out in this implementation.
1673 	 *
1674 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1675 	 * point in time.
1676 	 */
1677 
1678 	/* verify bucket hash is cleared on hash generation */
1679 	if (input_mask->formatted.bkt_hash)
1680 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
1681 
1682 	/* Program FDIRM and verify partial masks */
1683 	switch (input_mask->formatted.vm_pool & 0x7F) {
1684 	case 0x0:
1685 		fdirm |= IXGBE_FDIRM_POOL;
1686 	case 0x7F:
1687 		break;
1688 	default:
1689 		DEBUGOUT(" Error on vm pool mask\n");
1690 		return IXGBE_ERR_CONFIG;
1691 	}
1692 
1693 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1694 	case 0x0:
1695 		fdirm |= IXGBE_FDIRM_L4P;
1696 		if (input_mask->formatted.dst_port ||
1697 		    input_mask->formatted.src_port) {
1698 			DEBUGOUT(" Error on src/dst port mask\n");
1699 			return IXGBE_ERR_CONFIG;
1700 		}
1701 	case IXGBE_ATR_L4TYPE_MASK:
1702 		break;
1703 	default:
1704 		DEBUGOUT(" Error on flow type mask\n");
1705 		return IXGBE_ERR_CONFIG;
1706 	}
1707 
1708 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1709 	case 0x0000:
1710 		/* mask VLAN ID, fall through to mask VLAN priority */
1711 		fdirm |= IXGBE_FDIRM_VLANID;
1712 	case 0x0FFF:
1713 		/* mask VLAN priority */
1714 		fdirm |= IXGBE_FDIRM_VLANP;
1715 		break;
1716 	case 0xE000:
1717 		/* mask VLAN ID only, fall through */
1718 		fdirm |= IXGBE_FDIRM_VLANID;
1719 	case 0xEFFF:
1720 		/* no VLAN fields masked */
1721 		break;
1722 	default:
1723 		DEBUGOUT(" Error on VLAN mask\n");
1724 		return IXGBE_ERR_CONFIG;
1725 	}
1726 
1727 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1728 	case 0x0000:
1729 		/* Mask Flex Bytes, fall through */
1730 		fdirm |= IXGBE_FDIRM_FLEX;
1731 	case 0xFFFF:
1732 		break;
1733 	default:
1734 		DEBUGOUT(" Error on flexible byte mask\n");
1735 		return IXGBE_ERR_CONFIG;
1736 	}
1737 
1738 	if (cloud_mode) {
1739 		fdirm |= IXGBE_FDIRM_L3P;
1740 		fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
1741 		fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
1742 
1743 		switch (input_mask->formatted.inner_mac[0] & 0xFF) {
1744 		case 0x00:
1745 			/* Mask inner MAC, fall through */
1746 			fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
1747 		case 0xFF:
1748 			break;
1749 		default:
1750 			DEBUGOUT(" Error on inner_mac byte mask\n");
1751 			return IXGBE_ERR_CONFIG;
1752 		}
1753 
1754 		switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
1755 		case 0x0:
1756 			/* Mask vxlan id */
1757 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
1758 			break;
1759 		case 0x00FFFFFF:
1760 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
1761 			break;
1762 		case 0xFFFFFFFF:
1763 			break;
1764 		default:
1765 			DEBUGOUT(" Error on TNI/VNI byte mask\n");
1766 			return IXGBE_ERR_CONFIG;
1767 		}
1768 
1769 		switch (input_mask->formatted.tunnel_type & 0xFFFF) {
1770 		case 0x0:
1771 			/* Mask turnnel type, fall through */
1772 			fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
1773 		case 0xFFFF:
1774 			break;
1775 		default:
1776 			DEBUGOUT(" Error on tunnel type byte mask\n");
1777 			return IXGBE_ERR_CONFIG;
1778 		}
1779 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
1780 
1781 		/* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSIP4M and
1782 		 * FDIRDIP4M in cloud mode to allow L3/L3 packets to
1783 		 * tunnel.
1784 		 */
1785 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
1786 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
1787 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
1788 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
1789 	}
1790 
1791 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1792 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1793 
1794 	if (!cloud_mode) {
1795 		/* store the TCP/UDP port masks, bit reversed from port
1796 		 * layout */
1797 		fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1798 
1799 		/* write both the same so that UDP and TCP use the same mask */
1800 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1801 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1802 		/* also use it for SCTP */
1803 		switch (hw->mac.type) {
1804 		case ixgbe_mac_X550:
1805 		case ixgbe_mac_X550EM_x:
1806 		case ixgbe_mac_X550EM_a:
1807 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
1808 			break;
1809 		default:
1810 			break;
1811 		}
1812 
1813 		/* store source and destination IP masks (big-enian) */
1814 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1815 				     ~input_mask->formatted.src_ip[0]);
1816 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1817 				     ~input_mask->formatted.dst_ip[0]);
1818 	}
1819 	return IXGBE_SUCCESS;
1820 }
1821 
1822 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1823 					  union ixgbe_atr_input *input,
1824 					  u16 soft_id, u8 queue, bool cloud_mode)
1825 {
1826 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
1827 	u32 addr_low, addr_high;
1828 	u32 cloud_type = 0;
1829 	s32 err;
1830 
1831 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1832 	if (!cloud_mode) {
1833 		/* currently IPv6 is not supported, must be programmed with 0 */
1834 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1835 				     input->formatted.src_ip[0]);
1836 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1837 				     input->formatted.src_ip[1]);
1838 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1839 				     input->formatted.src_ip[2]);
1840 
1841 		/* record the source address (big-endian) */
1842 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
1843 			input->formatted.src_ip[0]);
1844 
1845 		/* record the first 32 bits of the destination address
1846 		 * (big-endian) */
1847 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
1848 			input->formatted.dst_ip[0]);
1849 
1850 		/* record source and destination port (little-endian)*/
1851 		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1852 		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1853 		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1854 		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1855 	}
1856 
1857 	/* record VLAN (little-endian) and flex_bytes(big-endian) */
1858 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1859 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1860 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1861 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1862 
1863 	if (cloud_mode) {
1864 		if (input->formatted.tunnel_type != 0)
1865 			cloud_type = 0x80000000;
1866 
1867 		addr_low = ((u32)input->formatted.inner_mac[0] |
1868 				((u32)input->formatted.inner_mac[1] << 8) |
1869 				((u32)input->formatted.inner_mac[2] << 16) |
1870 				((u32)input->formatted.inner_mac[3] << 24));
1871 		addr_high = ((u32)input->formatted.inner_mac[4] |
1872 				((u32)input->formatted.inner_mac[5] << 8));
1873 		cloud_type |= addr_high;
1874 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
1875 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
1876 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
1877 	}
1878 
1879 	/* configure FDIRHASH register */
1880 	fdirhash = input->formatted.bkt_hash;
1881 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1882 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1883 
1884 	/*
1885 	 * flush all previous writes to make certain registers are
1886 	 * programmed prior to issuing the command
1887 	 */
1888 	IXGBE_WRITE_FLUSH(hw);
1889 
1890 	/* configure FDIRCMD register */
1891 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1892 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1893 	if (queue == IXGBE_FDIR_DROP_QUEUE)
1894 		fdircmd |= IXGBE_FDIRCMD_DROP;
1895 	if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
1896 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1897 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1898 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1899 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1900 
1901 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1902 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1903 	if (err) {
1904 		DEBUGOUT("Flow Director command did not complete!\n");
1905 		return err;
1906 	}
1907 
1908 	return IXGBE_SUCCESS;
1909 }
1910 
1911 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1912 					  union ixgbe_atr_input *input,
1913 					  u16 soft_id)
1914 {
1915 	u32 fdirhash;
1916 	u32 fdircmd;
1917 	s32 err;
1918 
1919 	/* configure FDIRHASH register */
1920 	fdirhash = input->formatted.bkt_hash;
1921 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1922 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1923 
1924 	/* flush hash to HW */
1925 	IXGBE_WRITE_FLUSH(hw);
1926 
1927 	/* Query if filter is present */
1928 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1929 
1930 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1931 	if (err) {
1932 		DEBUGOUT("Flow Director command did not complete!\n");
1933 		return err;
1934 	}
1935 
1936 	/* if filter exists in hardware then remove it */
1937 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1938 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1939 		IXGBE_WRITE_FLUSH(hw);
1940 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1941 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1942 	}
1943 
1944 	return IXGBE_SUCCESS;
1945 }
1946 
1947 /**
1948  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1949  *  @hw: pointer to hardware structure
1950  *  @input: input bitstream
1951  *  @input_mask: mask for the input bitstream
1952  *  @soft_id: software index for the filters
1953  *  @queue: queue index to direct traffic to
1954  *
1955  *  Note that the caller to this function must lock before calling, since the
1956  *  hardware writes must be protected from one another.
1957  **/
1958 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1959 					union ixgbe_atr_input *input,
1960 					union ixgbe_atr_input *input_mask,
1961 					u16 soft_id, u8 queue, bool cloud_mode)
1962 {
1963 	s32 err = IXGBE_ERR_CONFIG;
1964 
1965 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1966 
1967 	/*
1968 	 * Check flow_type formatting, and bail out before we touch the hardware
1969 	 * if there's a configuration issue
1970 	 */
1971 	switch (input->formatted.flow_type) {
1972 	case IXGBE_ATR_FLOW_TYPE_IPV4:
1973 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
1974 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1975 		if (input->formatted.dst_port || input->formatted.src_port) {
1976 			DEBUGOUT(" Error on src/dst port\n");
1977 			return IXGBE_ERR_CONFIG;
1978 		}
1979 		break;
1980 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1981 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
1982 		if (input->formatted.dst_port || input->formatted.src_port) {
1983 			DEBUGOUT(" Error on src/dst port\n");
1984 			return IXGBE_ERR_CONFIG;
1985 		}
1986 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1987 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
1988 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1989 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
1990 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1991 						  IXGBE_ATR_L4TYPE_MASK;
1992 		break;
1993 	default:
1994 		DEBUGOUT(" Error on flow type input\n");
1995 		return err;
1996 	}
1997 
1998 	/* program input mask into the HW */
1999 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
2000 	if (err)
2001 		return err;
2002 
2003 	/* apply mask and compute/store hash */
2004 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2005 
2006 	/* program filters to filter memory */
2007 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2008 						     soft_id, queue, cloud_mode);
2009 }
2010 
2011 /**
2012  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2013  *  @hw: pointer to hardware structure
2014  *  @reg: analog register to read
2015  *  @val: read value
2016  *
2017  *  Performs read operation to Omer analog register specified.
2018  **/
2019 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2020 {
2021 	u32  core_ctl;
2022 
2023 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2024 
2025 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2026 			(reg << 8));
2027 	IXGBE_WRITE_FLUSH(hw);
2028 	usec_delay(10);
2029 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2030 	*val = (u8)core_ctl;
2031 
2032 	return IXGBE_SUCCESS;
2033 }
2034 
2035 /**
2036  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2037  *  @hw: pointer to hardware structure
2038  *  @reg: atlas register to write
2039  *  @val: value to write
2040  *
2041  *  Performs write operation to Omer analog register specified.
2042  **/
2043 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2044 {
2045 	u32  core_ctl;
2046 
2047 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2048 
2049 	core_ctl = (reg << 8) | val;
2050 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2051 	IXGBE_WRITE_FLUSH(hw);
2052 	usec_delay(10);
2053 
2054 	return IXGBE_SUCCESS;
2055 }
2056 
2057 /**
2058  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2059  *  @hw: pointer to hardware structure
2060  *
2061  *  Starts the hardware using the generic start_hw function
2062  *  and the generation start_hw function.
2063  *  Then performs revision-specific operations, if any.
2064  **/
2065 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2066 {
2067 	s32 ret_val = IXGBE_SUCCESS;
2068 
2069 	DEBUGFUNC("ixgbe_start_hw_82599");
2070 
2071 	ret_val = ixgbe_start_hw_generic(hw);
2072 	if (ret_val != IXGBE_SUCCESS)
2073 		goto out;
2074 
2075 	ret_val = ixgbe_start_hw_gen2(hw);
2076 	if (ret_val != IXGBE_SUCCESS)
2077 		goto out;
2078 
2079 	/* We need to run link autotry after the driver loads */
2080 	hw->mac.autotry_restart = TRUE;
2081 
2082 	if (ret_val == IXGBE_SUCCESS)
2083 		ret_val = ixgbe_verify_fw_version_82599(hw);
2084 out:
2085 	return ret_val;
2086 }
2087 
2088 /**
2089  *  ixgbe_identify_phy_82599 - Get physical layer module
2090  *  @hw: pointer to hardware structure
2091  *
2092  *  Determines the physical layer module found on the current adapter.
2093  *  If PHY already detected, maintains current PHY type in hw struct,
2094  *  otherwise executes the PHY detection routine.
2095  **/
2096 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2097 {
2098 	s32 status;
2099 
2100 	DEBUGFUNC("ixgbe_identify_phy_82599");
2101 
2102 	/* Detect PHY if not unknown - returns success if already detected. */
2103 	status = ixgbe_identify_phy_generic(hw);
2104 	if (status != IXGBE_SUCCESS) {
2105 		/* 82599 10GBASE-T requires an external PHY */
2106 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2107 			return status;
2108 		else
2109 			status = ixgbe_identify_module_generic(hw);
2110 	}
2111 
2112 	/* Set PHY type none if no PHY detected */
2113 	if (hw->phy.type == ixgbe_phy_unknown) {
2114 		hw->phy.type = ixgbe_phy_none;
2115 		return IXGBE_SUCCESS;
2116 	}
2117 
2118 	/* Return error if SFP module has been detected but is not supported */
2119 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2120 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
2121 
2122 	return status;
2123 }
2124 
2125 /**
2126  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2127  *  @hw: pointer to hardware structure
2128  *
2129  *  Determines physical layer capabilities of the current configuration.
2130  **/
2131 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2132 {
2133 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2134 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2135 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2136 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2137 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2138 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2139 	u16 ext_ability = 0;
2140 
2141 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2142 
2143 	hw->phy.ops.identify(hw);
2144 
2145 	switch (hw->phy.type) {
2146 	case ixgbe_phy_tn:
2147 	case ixgbe_phy_cu_unknown:
2148 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2149 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2150 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2151 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2152 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2153 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2154 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2155 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2156 		goto out;
2157 	default:
2158 		break;
2159 	}
2160 
2161 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2162 	case IXGBE_AUTOC_LMS_1G_AN:
2163 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2164 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2165 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2166 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2167 			goto out;
2168 		} else
2169 			/* SFI mode so read SFP module */
2170 			goto sfp_check;
2171 		break;
2172 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2173 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2174 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2175 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2176 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2177 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2178 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2179 		goto out;
2180 		break;
2181 	case IXGBE_AUTOC_LMS_10G_SERIAL:
2182 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2183 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2184 			goto out;
2185 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2186 			goto sfp_check;
2187 		break;
2188 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
2189 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2190 		if (autoc & IXGBE_AUTOC_KX_SUPP)
2191 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2192 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
2193 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2194 		if (autoc & IXGBE_AUTOC_KR_SUPP)
2195 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2196 		goto out;
2197 		break;
2198 	default:
2199 		goto out;
2200 		break;
2201 	}
2202 
2203 sfp_check:
2204 	/* SFP check must be done last since DA modules are sometimes used to
2205 	 * test KR mode -  we need to id KR mode correctly before SFP module.
2206 	 * Call identify_sfp because the pluggable module may have changed */
2207 	physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2208 out:
2209 	return physical_layer;
2210 }
2211 
2212 /**
2213  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2214  *  @hw: pointer to hardware structure
2215  *  @regval: register value to write to RXCTRL
2216  *
2217  *  Enables the Rx DMA unit for 82599
2218  **/
2219 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2220 {
2221 
2222 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2223 
2224 	/*
2225 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2226 	 * If traffic is incoming before we enable the Rx unit, it could hang
2227 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2228 	 * completely disabled prior to enabling the Rx unit.
2229 	 */
2230 
2231 	hw->mac.ops.disable_sec_rx_path(hw);
2232 
2233 	if (regval & IXGBE_RXCTRL_RXEN)
2234 		ixgbe_enable_rx(hw);
2235 	else
2236 		ixgbe_disable_rx(hw);
2237 
2238 	hw->mac.ops.enable_sec_rx_path(hw);
2239 
2240 	return IXGBE_SUCCESS;
2241 }
2242 
2243 /**
2244  *  ixgbe_verify_fw_version_82599 - verify FW version for 82599
2245  *  @hw: pointer to hardware structure
2246  *
2247  *  Verifies that installed the firmware version is 0.6 or higher
2248  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2249  *
2250  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2251  *  if the FW version is not supported.
2252  **/
2253 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2254 {
2255 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2256 	u16 fw_offset, fw_ptp_cfg_offset;
2257 	u16 fw_version;
2258 
2259 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
2260 
2261 	/* firmware check is only necessary for SFI devices */
2262 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
2263 		status = IXGBE_SUCCESS;
2264 		goto fw_version_out;
2265 	}
2266 
2267 	/* get the offset to the Firmware Module block */
2268 	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2269 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2270 			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
2271 		return IXGBE_ERR_EEPROM_VERSION;
2272 	}
2273 
2274 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2275 		goto fw_version_out;
2276 
2277 	/* get the offset to the Pass Through Patch Configuration block */
2278 	if (hw->eeprom.ops.read(hw, (fw_offset +
2279 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2280 				 &fw_ptp_cfg_offset)) {
2281 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2282 			      "eeprom read at offset %d failed",
2283 			      fw_offset +
2284 			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2285 		return IXGBE_ERR_EEPROM_VERSION;
2286 	}
2287 
2288 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2289 		goto fw_version_out;
2290 
2291 	/* get the firmware version */
2292 	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2293 			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2294 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2295 			      "eeprom read at offset %d failed",
2296 			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2297 		return IXGBE_ERR_EEPROM_VERSION;
2298 	}
2299 
2300 	if (fw_version > 0x5)
2301 		status = IXGBE_SUCCESS;
2302 
2303 fw_version_out:
2304 	return status;
2305 }
2306 
2307 /**
2308  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2309  *  @hw: pointer to hardware structure
2310  *
2311  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
2312  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2313  **/
2314 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2315 {
2316 	bool lesm_enabled = FALSE;
2317 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2318 	s32 status;
2319 
2320 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2321 
2322 	/* get the offset to the Firmware Module block */
2323 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2324 
2325 	if ((status != IXGBE_SUCCESS) ||
2326 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
2327 		goto out;
2328 
2329 	/* get the offset to the LESM Parameters block */
2330 	status = hw->eeprom.ops.read(hw, (fw_offset +
2331 				     IXGBE_FW_LESM_PARAMETERS_PTR),
2332 				     &fw_lesm_param_offset);
2333 
2334 	if ((status != IXGBE_SUCCESS) ||
2335 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2336 		goto out;
2337 
2338 	/* get the LESM state word */
2339 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2340 				     IXGBE_FW_LESM_STATE_1),
2341 				     &fw_lesm_state);
2342 
2343 	if ((status == IXGBE_SUCCESS) &&
2344 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2345 		lesm_enabled = TRUE;
2346 
2347 out:
2348 	return lesm_enabled;
2349 }
2350 
2351 /**
2352  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2353  *  fastest available method
2354  *
2355  *  @hw: pointer to hardware structure
2356  *  @offset: offset of  word in EEPROM to read
2357  *  @words: number of words
2358  *  @data: word(s) read from the EEPROM
2359  *
2360  *  Retrieves 16 bit word(s) read from EEPROM
2361  **/
2362 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2363 					  u16 words, u16 *data)
2364 {
2365 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2366 	s32 ret_val = IXGBE_ERR_CONFIG;
2367 
2368 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2369 
2370 	/*
2371 	 * If EEPROM is detected and can be addressed using 14 bits,
2372 	 * use EERD otherwise use bit bang
2373 	 */
2374 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2375 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2376 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2377 							 data);
2378 	else
2379 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2380 								    words,
2381 								    data);
2382 
2383 	return ret_val;
2384 }
2385 
2386 /**
2387  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
2388  *  fastest available method
2389  *
2390  *  @hw: pointer to hardware structure
2391  *  @offset: offset of  word in the EEPROM to read
2392  *  @data: word read from the EEPROM
2393  *
2394  *  Reads a 16 bit word from the EEPROM
2395  **/
2396 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2397 				   u16 offset, u16 *data)
2398 {
2399 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2400 	s32 ret_val = IXGBE_ERR_CONFIG;
2401 
2402 	DEBUGFUNC("ixgbe_read_eeprom_82599");
2403 
2404 	/*
2405 	 * If EEPROM is detected and can be addressed using 14 bits,
2406 	 * use EERD otherwise use bit bang
2407 	 */
2408 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2409 	    (offset <= IXGBE_EERD_MAX_ADDR))
2410 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2411 	else
2412 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2413 
2414 	return ret_val;
2415 }
2416 
2417 /**
2418  * ixgbe_reset_pipeline_82599 - perform pipeline reset
2419  *
2420  *  @hw: pointer to hardware structure
2421  *
2422  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2423  * full pipeline reset.  This function assumes the SW/FW lock is held.
2424  **/
2425 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2426 {
2427 	s32 ret_val;
2428 	u32 anlp1_reg = 0;
2429 	u32 i, autoc_reg, autoc2_reg;
2430 
2431 	/* Enable link if disabled in NVM */
2432 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2433 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2434 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2435 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2436 		IXGBE_WRITE_FLUSH(hw);
2437 	}
2438 
2439 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2440 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2441 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2442 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2443 			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2444 	/* Wait for AN to leave state 0 */
2445 	for (i = 0; i < 10; i++) {
2446 		msec_delay(4);
2447 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2448 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2449 			break;
2450 	}
2451 
2452 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2453 		DEBUGOUT("auto negotiation not completed\n");
2454 		ret_val = IXGBE_ERR_RESET_FAILED;
2455 		goto reset_pipeline_out;
2456 	}
2457 
2458 	ret_val = IXGBE_SUCCESS;
2459 
2460 reset_pipeline_out:
2461 	/* Write AUTOC register with original LMS field and Restart_AN */
2462 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2463 	IXGBE_WRITE_FLUSH(hw);
2464 
2465 	return ret_val;
2466 }
2467 
2468 
2469 /**
2470  *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2471  *  @hw: pointer to hardware structure
2472  *  @byte_offset: byte offset to read
2473  *  @data: value read
2474  *
2475  *  Performs byte read operation to SFP module's EEPROM over I2C interface at
2476  *  a specified device address.
2477  **/
2478 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2479 				u8 dev_addr, u8 *data)
2480 {
2481 	u32 esdp;
2482 	s32 status;
2483 	s32 timeout = 200;
2484 
2485 	DEBUGFUNC("ixgbe_read_i2c_byte_82599");
2486 
2487 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2488 		/* Acquire I2C bus ownership. */
2489 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2490 		esdp |= IXGBE_ESDP_SDP0;
2491 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2492 		IXGBE_WRITE_FLUSH(hw);
2493 
2494 		while (timeout) {
2495 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2496 			if (esdp & IXGBE_ESDP_SDP1)
2497 				break;
2498 
2499 			msec_delay(5);
2500 			timeout--;
2501 		}
2502 
2503 		if (!timeout) {
2504 			DEBUGOUT("Driver can't access resource,"
2505 				 " acquiring I2C bus timeout.\n");
2506 			status = IXGBE_ERR_I2C;
2507 			goto release_i2c_access;
2508 		}
2509 	}
2510 
2511 	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2512 
2513 release_i2c_access:
2514 
2515 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2516 		/* Release I2C bus ownership. */
2517 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2518 		esdp &= ~IXGBE_ESDP_SDP0;
2519 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2520 		IXGBE_WRITE_FLUSH(hw);
2521 	}
2522 
2523 	return status;
2524 }
2525 
2526 /**
2527  *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2528  *  @hw: pointer to hardware structure
2529  *  @byte_offset: byte offset to write
2530  *  @data: value to write
2531  *
2532  *  Performs byte write operation to SFP module's EEPROM over I2C interface at
2533  *  a specified device address.
2534  **/
2535 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2536 				 u8 dev_addr, u8 data)
2537 {
2538 	u32 esdp;
2539 	s32 status;
2540 	s32 timeout = 200;
2541 
2542 	DEBUGFUNC("ixgbe_write_i2c_byte_82599");
2543 
2544 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2545 		/* Acquire I2C bus ownership. */
2546 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2547 		esdp |= IXGBE_ESDP_SDP0;
2548 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2549 		IXGBE_WRITE_FLUSH(hw);
2550 
2551 		while (timeout) {
2552 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2553 			if (esdp & IXGBE_ESDP_SDP1)
2554 				break;
2555 
2556 			msec_delay(5);
2557 			timeout--;
2558 		}
2559 
2560 		if (!timeout) {
2561 			DEBUGOUT("Driver can't access resource,"
2562 				 " acquiring I2C bus timeout.\n");
2563 			status = IXGBE_ERR_I2C;
2564 			goto release_i2c_access;
2565 		}
2566 	}
2567 
2568 	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2569 
2570 release_i2c_access:
2571 
2572 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2573 		/* Release I2C bus ownership. */
2574 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2575 		esdp &= ~IXGBE_ESDP_SDP0;
2576 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2577 		IXGBE_WRITE_FLUSH(hw);
2578 	}
2579 
2580 	return status;
2581 }
2582