xref: /freebsd/sys/dev/ixgbe/ixgbe_82599.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2009, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_api.h"
37 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h"
39 
40 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
41 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
42                                       ixgbe_link_speed *speed,
43                                       bool *autoneg);
44 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
45 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
46                                      ixgbe_link_speed speed, bool autoneg,
47                                      bool autoneg_wait_to_complete);
48 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
49 				     ixgbe_link_speed speed, bool autoneg,
50 				     bool autoneg_wait_to_complete);
51 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
52 				bool autoneg_wait_to_complete);
53 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
54                                      ixgbe_link_speed speed,
55                                      bool autoneg,
56                                      bool autoneg_wait_to_complete);
57 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
58                                                ixgbe_link_speed speed,
59                                                bool autoneg,
60                                                bool autoneg_wait_to_complete);
61 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
62 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
63 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
64 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
65 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
66 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
67 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
68 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
69 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
70 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
71 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps);
72 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
73 
74 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
75 {
76 	struct ixgbe_mac_info *mac = &hw->mac;
77 
78 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
79 
80 	if (hw->phy.multispeed_fiber) {
81 		/* Set up dual speed SFP+ support */
82 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
83 	} else {
84 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
85 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
86 		      hw->phy.smart_speed == ixgbe_smart_speed_on))
87 			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
88 		else
89 			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
90 	}
91 }
92 
93 /**
94  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
95  *  @hw: pointer to hardware structure
96  *
97  *  Initialize any function pointers that were not able to be
98  *  set during init_shared_code because the PHY/SFP type was
99  *  not known.  Perform the SFP init if necessary.
100  *
101  **/
102 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
103 {
104 	struct ixgbe_mac_info *mac = &hw->mac;
105 	struct ixgbe_phy_info *phy = &hw->phy;
106 	s32 ret_val = IXGBE_SUCCESS;
107 
108 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
109 
110 	/* Identify the PHY or SFP module */
111 	ret_val = phy->ops.identify(hw);
112 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
113 		goto init_phy_ops_out;
114 
115 	/* Setup function pointers based on detected SFP module and speeds */
116 	ixgbe_init_mac_link_ops_82599(hw);
117 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
118 		hw->phy.ops.reset = NULL;
119 
120 	/* If copper media, overwrite with copper function pointers */
121 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
122 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
123 		mac->ops.get_link_capabilities =
124 		                  &ixgbe_get_copper_link_capabilities_generic;
125 	}
126 
127 	/* Set necessary function pointers based on phy type */
128 	switch (hw->phy.type) {
129 	case ixgbe_phy_tn:
130 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
131 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
132 		phy->ops.get_firmware_version =
133 		             &ixgbe_get_phy_firmware_version_tnx;
134 		break;
135 	case ixgbe_phy_aq:
136 		phy->ops.get_firmware_version =
137 		             &ixgbe_get_phy_firmware_version_generic;
138 		break;
139 	default:
140 		break;
141 	}
142 init_phy_ops_out:
143 	return ret_val;
144 }
145 
146 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
147 {
148 	s32 ret_val = IXGBE_SUCCESS;
149 	u16 list_offset, data_offset, data_value;
150 
151 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
152 
153 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
154 		ixgbe_init_mac_link_ops_82599(hw);
155 
156 		hw->phy.ops.reset = NULL;
157 
158 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
159 		                                              &data_offset);
160 		if (ret_val != IXGBE_SUCCESS)
161 			goto setup_sfp_out;
162 
163 		/* PHY config will finish before releasing the semaphore */
164 		ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
165 		if (ret_val != IXGBE_SUCCESS) {
166 			ret_val = IXGBE_ERR_SWFW_SYNC;
167 			goto setup_sfp_out;
168 		}
169 
170 		hw->eeprom.ops.read(hw, ++data_offset, &data_value);
171 		while (data_value != 0xffff) {
172 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
173 			IXGBE_WRITE_FLUSH(hw);
174 			hw->eeprom.ops.read(hw, ++data_offset, &data_value);
175 		}
176 		/* Now restart DSP by setting Restart_AN */
177 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
178 		    (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
179 
180 		/* Release the semaphore */
181 		ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
182 		/* Delay obtaining semaphore again to allow FW access */
183 		msec_delay(hw->eeprom.semaphore_delay);
184 	}
185 
186 setup_sfp_out:
187 	return ret_val;
188 }
189 
190 /**
191  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
192  *  @hw: pointer to hardware structure
193  *
194  *  Initialize the function pointers and assign the MAC type for 82599.
195  *  Does not touch the hardware.
196  **/
197 
198 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
199 {
200 	struct ixgbe_mac_info *mac = &hw->mac;
201 	struct ixgbe_phy_info *phy = &hw->phy;
202 	s32 ret_val;
203 
204 	DEBUGFUNC("ixgbe_init_ops_82599");
205 
206 	ret_val = ixgbe_init_phy_ops_generic(hw);
207 	ret_val = ixgbe_init_ops_generic(hw);
208 
209 	/* PHY */
210 	phy->ops.identify = &ixgbe_identify_phy_82599;
211 	phy->ops.init = &ixgbe_init_phy_ops_82599;
212 
213 	/* MAC */
214 	mac->ops.reset_hw = &ixgbe_reset_hw_82599;
215 	mac->ops.get_media_type = &ixgbe_get_media_type_82599;
216 	mac->ops.get_supported_physical_layer =
217 	                            &ixgbe_get_supported_physical_layer_82599;
218 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
219 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
220 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
221 	mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
222 	mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
223 	mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
224 	mac->ops.get_device_caps = &ixgbe_get_device_caps_82599;
225 	mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
226 
227 	/* RAR, Multicast, VLAN */
228 	mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
229 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
230 	mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
231 	mac->rar_highwater = 1;
232 	mac->ops.set_vfta = &ixgbe_set_vfta_generic;
233 	mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
234 	mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
235 	mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
236 
237 	/* Link */
238 	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
239 	mac->ops.check_link            = &ixgbe_check_mac_link_generic;
240 	ixgbe_init_mac_link_ops_82599(hw);
241 
242 	mac->mcft_size        = 128;
243 	mac->vft_size         = 128;
244 	mac->num_rar_entries  = 128;
245 	mac->max_tx_queues    = 128;
246 	mac->max_rx_queues    = 128;
247 	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
248 
249 
250 	return ret_val;
251 }
252 
253 /**
254  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
255  *  @hw: pointer to hardware structure
256  *  @speed: pointer to link speed
257  *  @negotiation: TRUE when autoneg or autotry is enabled
258  *
259  *  Determines the link capabilities by reading the AUTOC register.
260  **/
261 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
262                                       ixgbe_link_speed *speed,
263                                       bool *negotiation)
264 {
265 	s32 status = IXGBE_SUCCESS;
266 	u32 autoc = 0;
267 
268 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
269 
270 	/*
271 	 * Determine link capabilities based on the stored value of AUTOC,
272 	 * which represents EEPROM defaults.  If AUTOC value has not
273 	 * been stored, use the current register values.
274 	 */
275 	if (hw->mac.orig_link_settings_stored)
276 		autoc = hw->mac.orig_autoc;
277 	else
278 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
279 
280 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
281 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
282 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
283 		*negotiation = FALSE;
284 		break;
285 
286 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
287 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
288 		*negotiation = FALSE;
289 		break;
290 
291 	case IXGBE_AUTOC_LMS_1G_AN:
292 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
293 		*negotiation = TRUE;
294 		break;
295 
296 	case IXGBE_AUTOC_LMS_10G_SERIAL:
297 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
298 		*negotiation = FALSE;
299 		break;
300 
301 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
302 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
303 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
304 		if (autoc & IXGBE_AUTOC_KR_SUPP)
305 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
306 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
307 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
308 		if (autoc & IXGBE_AUTOC_KX_SUPP)
309 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
310 		*negotiation = TRUE;
311 		break;
312 
313 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
314 		*speed = IXGBE_LINK_SPEED_100_FULL;
315 		if (autoc & IXGBE_AUTOC_KR_SUPP)
316 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
317 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
318 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
319 		if (autoc & IXGBE_AUTOC_KX_SUPP)
320 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
321 		*negotiation = TRUE;
322 		break;
323 
324 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
325 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
326 		*negotiation = FALSE;
327 		break;
328 
329 	default:
330 		status = IXGBE_ERR_LINK_SETUP;
331 		goto out;
332 		break;
333 	}
334 
335 	if (hw->phy.multispeed_fiber) {
336 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
337 		          IXGBE_LINK_SPEED_1GB_FULL;
338 		*negotiation = TRUE;
339 	}
340 
341 out:
342 	return status;
343 }
344 
345 /**
346  *  ixgbe_get_media_type_82599 - Get media type
347  *  @hw: pointer to hardware structure
348  *
349  *  Returns the media type (fiber, copper, backplane)
350  **/
351 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
352 {
353 	enum ixgbe_media_type media_type;
354 
355 	DEBUGFUNC("ixgbe_get_media_type_82599");
356 
357 	/* Detect if there is a copper PHY attached. */
358 	if (hw->phy.type == ixgbe_phy_cu_unknown ||
359 	    hw->phy.type == ixgbe_phy_tn ||
360 	    hw->phy.type == ixgbe_phy_aq) {
361 		media_type = ixgbe_media_type_copper;
362 		goto out;
363 	}
364 
365 	switch (hw->device_id) {
366 	case IXGBE_DEV_ID_82599_KX4:
367 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
368 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
369 	case IXGBE_DEV_ID_82599_XAUI_LOM:
370 		/* Default device ID is mezzanine card KX/KX4 */
371 		media_type = ixgbe_media_type_backplane;
372 		break;
373 	case IXGBE_DEV_ID_82599_SFP:
374 		media_type = ixgbe_media_type_fiber;
375 		break;
376 	case IXGBE_DEV_ID_82599_CX4:
377 		media_type = ixgbe_media_type_cx4;
378 		break;
379 	default:
380 		media_type = ixgbe_media_type_unknown;
381 		break;
382 	}
383 out:
384 	return media_type;
385 }
386 
387 /**
388  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
389  *  @hw: pointer to hardware structure
390  *
391  *  Configures link settings based on values in the ixgbe_hw struct.
392  *  Restarts the link.  Performs autonegotiation if needed.
393  **/
394 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
395                                bool autoneg_wait_to_complete)
396 {
397 	u32 autoc_reg;
398 	u32 links_reg;
399 	u32 i;
400 	s32 status = IXGBE_SUCCESS;
401 
402 	DEBUGFUNC("ixgbe_start_mac_link_82599");
403 
404 
405 	/* Restart link */
406 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
407 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
408 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
409 
410 	/* Only poll for autoneg to complete if specified to do so */
411 	if (autoneg_wait_to_complete) {
412 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
413 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
414 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
415 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN
416 		    || (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
417 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
418 			links_reg = 0; /* Just in case Autoneg time = 0 */
419 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
420 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
421 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
422 					break;
423 				msec_delay(100);
424 			}
425 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
426 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
427 				DEBUGOUT("Autoneg did not complete.\n");
428 			}
429 		}
430 	}
431 
432 	/* Add delay to filter out noises during initial link setup */
433 	msec_delay(50);
434 
435 	return status;
436 }
437 
438 /**
439  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
440  *  @hw: pointer to hardware structure
441  *  @speed: new link speed
442  *  @autoneg: TRUE if autonegotiation enabled
443  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
444  *
445  *  Set the link speed in the AUTOC register and restarts link.
446  **/
447 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
448                                      ixgbe_link_speed speed, bool autoneg,
449                                      bool autoneg_wait_to_complete)
450 {
451 	s32 status = IXGBE_SUCCESS;
452 	ixgbe_link_speed link_speed;
453 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
454 	u32 speedcnt = 0;
455 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
456 	u32 i = 0;
457 	bool link_up = FALSE;
458 	bool negotiation;
459 
460 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
461 
462 	/* Mask off requested but non-supported speeds */
463 	status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
464 	if (status != IXGBE_SUCCESS)
465 		return status;
466 
467 	speed &= link_speed;
468 
469 	/*
470 	 * When the driver changes the link speeds that it can support,
471 	 * it sets autotry_restart to TRUE to indicate that we need to
472 	 * initiate a new autotry session with the link partner.  To do
473 	 * so, we set the speed then disable and re-enable the tx laser, to
474 	 * alert the link partner that it also needs to restart autotry on its
475 	 * end.  This is consistent with TRUE clause 37 autoneg, which also
476 	 * involves a loss of signal.
477 	 */
478 
479 	/*
480 	 * Try each speed one by one, highest priority first.  We do this in
481 	 * software because 10gb fiber doesn't support speed autonegotiation.
482 	 */
483 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
484 		speedcnt++;
485 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
486 
487 		/* If we already have link at this speed, just jump out */
488 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
489 		if (status != IXGBE_SUCCESS)
490 			return status;
491 
492 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
493 			goto out;
494 
495 		/* Set the module link speed */
496 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
497 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
498 
499 		/* Allow module to change analog characteristics (1G->10G) */
500 		msec_delay(40);
501 
502 		status = ixgbe_setup_mac_link_82599(
503 			hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg,
504 			autoneg_wait_to_complete);
505 		if (status != IXGBE_SUCCESS)
506 			return status;
507 
508 		/* Flap the tx laser if it has not already been done */
509 		if (hw->mac.autotry_restart) {
510 			/* Disable tx laser; allow 100us to go dark per spec */
511 			esdp_reg |= IXGBE_ESDP_SDP3;
512 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
513 			usec_delay(100);
514 
515 			/* Enable tx laser; allow 2ms to light up per spec */
516 			esdp_reg &= ~IXGBE_ESDP_SDP3;
517 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
518 			msec_delay(2);
519 
520 			hw->mac.autotry_restart = FALSE;
521 		}
522 
523 		/*
524 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
525 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
526 		 * attempted.  82599 uses the same timing for 10g SFI.
527 		 */
528 		for (i = 0; i < 5; i++) {
529 			/* Wait for the link partner to also set speed */
530 			msec_delay(100);
531 
532 			/* If we have link, just jump out */
533 			status = ixgbe_check_link(hw, &link_speed,
534 			                          &link_up, FALSE);
535 			if (status != IXGBE_SUCCESS)
536 				return status;
537 
538 			if (link_up)
539 				goto out;
540 		}
541 	}
542 
543 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
544 		speedcnt++;
545 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
546 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
547 
548 		/* If we already have link at this speed, just jump out */
549 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
550 		if (status != IXGBE_SUCCESS)
551 			return status;
552 
553 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
554 			goto out;
555 
556 		/* Set the module link speed */
557 		esdp_reg &= ~IXGBE_ESDP_SDP5;
558 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
559 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
560 
561 		/* Allow module to change analog characteristics (10G->1G) */
562 		msec_delay(40);
563 
564 		status = ixgbe_setup_mac_link_82599(
565 			hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
566 			autoneg_wait_to_complete);
567 		if (status != IXGBE_SUCCESS)
568 			return status;
569 
570 		/* Flap the tx laser if it has not already been done */
571 		if (hw->mac.autotry_restart) {
572 			/* Disable tx laser; allow 100us to go dark per spec */
573 			esdp_reg |= IXGBE_ESDP_SDP3;
574 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
575 			usec_delay(100);
576 
577 			/* Enable tx laser; allow 2ms to light up per spec */
578 			esdp_reg &= ~IXGBE_ESDP_SDP3;
579 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
580 			msec_delay(2);
581 
582 			hw->mac.autotry_restart = FALSE;
583 		}
584 
585 		/* Wait for the link partner to also set speed */
586 		msec_delay(100);
587 
588 		/* If we have link, just jump out */
589 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
590 		if (status != IXGBE_SUCCESS)
591 			return status;
592 
593 		if (link_up)
594 			goto out;
595 	}
596 
597 	/*
598 	 * We didn't get link.  Configure back to the highest speed we tried,
599 	 * (if there was more than one).  We call ourselves back with just the
600 	 * single highest speed that the user requested.
601 	 */
602 	if (speedcnt > 1)
603 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
604 		        highest_link_speed, autoneg, autoneg_wait_to_complete);
605 
606 out:
607 	/* Set autoneg_advertised value based on input link speed */
608 	hw->phy.autoneg_advertised = 0;
609 
610 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
611 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
612 
613 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
614 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
615 
616 	return status;
617 }
618 
619 /**
620  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
621  *  @hw: pointer to hardware structure
622  *  @speed: new link speed
623  *  @autoneg: TRUE if autonegotiation enabled
624  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
625  *
626  *  Implements the Intel SmartSpeed algorithm.
627  **/
628 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
629 				     ixgbe_link_speed speed, bool autoneg,
630 				     bool autoneg_wait_to_complete)
631 {
632 	s32 status = IXGBE_SUCCESS;
633 	ixgbe_link_speed link_speed;
634 	s32 i, j;
635 	bool link_up = FALSE;
636 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
637 
638 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
639 
640 	 /* Set autoneg_advertised value based on input link speed */
641 	hw->phy.autoneg_advertised = 0;
642 
643 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
644 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
645 
646 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
647 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
648 
649 	if (speed & IXGBE_LINK_SPEED_100_FULL)
650 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
651 
652 	/*
653 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
654 	 * autoneg advertisement if link is unable to be established at the
655 	 * highest negotiated rate.  This can sometimes happen due to integrity
656 	 * issues with the physical media connection.
657 	 */
658 
659 	/* First, try to get link with full advertisement */
660 	hw->phy.smart_speed_active = FALSE;
661 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
662 		status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
663 						    autoneg_wait_to_complete);
664 		if (status != IXGBE_SUCCESS)
665 			goto out;
666 
667 		/*
668 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
669 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
670 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
671 		 * Table 9 in the AN MAS.
672 		 */
673 		for (i = 0; i < 5; i++) {
674 			msec_delay(100);
675 
676 			/* If we have link, just jump out */
677 			status = ixgbe_check_link(hw, &link_speed, &link_up,
678 						  FALSE);
679 			if (status != IXGBE_SUCCESS)
680 				goto out;
681 
682 			if (link_up)
683 				goto out;
684 		}
685 	}
686 
687 	/*
688 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
689 	 * (or BX4/BX), then disable KR and try again.
690 	 */
691 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
692 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
693 		goto out;
694 
695 	/* Turn SmartSpeed on to disable KR support */
696 	hw->phy.smart_speed_active = TRUE;
697 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
698 					    autoneg_wait_to_complete);
699 	if (status != IXGBE_SUCCESS)
700 		goto out;
701 
702 	/*
703 	 * Wait for the controller to acquire link.  600ms will allow for
704 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
705 	 * parallel detect, both 10g and 1g. This allows for the maximum
706 	 * connect attempts as defined in the AN MAS table 73-7.
707 	 */
708 	for (i = 0; i < 6; i++) {
709 		msec_delay(100);
710 
711 		/* If we have link, just jump out */
712 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
713 		if (status != IXGBE_SUCCESS)
714 			goto out;
715 
716 		if (link_up)
717 			goto out;
718 	}
719 
720 	/* We didn't get link.  Turn SmartSpeed back off. */
721 	hw->phy.smart_speed_active = FALSE;
722 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
723 					    autoneg_wait_to_complete);
724 
725 out:
726 	return status;
727 }
728 
729 /**
730  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
731  *  @hw: pointer to hardware structure
732  *  @speed: new link speed
733  *  @autoneg: TRUE if autonegotiation enabled
734  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
735  *
736  *  Set the link speed in the AUTOC register and restarts link.
737  **/
738 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
739                                      ixgbe_link_speed speed, bool autoneg,
740                                      bool autoneg_wait_to_complete)
741 {
742 	s32 status = IXGBE_SUCCESS;
743 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
744 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
745 	u32 start_autoc = autoc;
746 	u32 orig_autoc = 0;
747 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
748 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
749 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
750 	u32 links_reg;
751 	u32 i;
752 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
753 
754 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
755 
756 	/* Check to see if speed passed in is supported. */
757 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
758 	if (status != IXGBE_SUCCESS)
759 		goto out;
760 
761 	speed &= link_capabilities;
762 
763 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
764 		status = IXGBE_ERR_LINK_SETUP;
765 		goto out;
766 	}
767 
768 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
769 	if (hw->mac.orig_link_settings_stored)
770 		orig_autoc = hw->mac.orig_autoc;
771 	else
772 		orig_autoc = autoc;
773 
774 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
775 	         link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
776 	         link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
777 		/* Set KX4/KX/KR support according to speed requested */
778 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
779 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
780 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
781 				autoc |= IXGBE_AUTOC_KX4_SUPP;
782 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
783 			    (hw->phy.smart_speed_active == FALSE))
784 				autoc |= IXGBE_AUTOC_KR_SUPP;
785 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
786 			autoc |= IXGBE_AUTOC_KX_SUPP;
787 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
788 	         (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
789 	          link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
790 		/* Switch from 1G SFI to 10G SFI if requested */
791 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
792 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
793 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
794 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
795 		}
796 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
797 	         (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
798 		/* Switch from 10G SFI to 1G SFI if requested */
799 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
800 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
801 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
802 			if (autoneg)
803 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
804 			else
805 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
806 		}
807 	}
808 
809 	if (autoc != start_autoc) {
810 
811 		/* Restart link */
812 		autoc |= IXGBE_AUTOC_AN_RESTART;
813 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
814 
815 		/* Only poll for autoneg to complete if specified to do so */
816 		if (autoneg_wait_to_complete) {
817 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
818 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
819 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
820 				links_reg = 0; /*Just in case Autoneg time=0*/
821 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
822 					links_reg =
823 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
824 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
825 						break;
826 					msec_delay(100);
827 				}
828 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
829 					status =
830 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
831 					DEBUGOUT("Autoneg did not complete.\n");
832 				}
833 			}
834 		}
835 
836 		/* Add delay to filter out noises during initial link setup */
837 		msec_delay(50);
838 	}
839 
840 out:
841 	return status;
842 }
843 
844 /**
845  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
846  *  @hw: pointer to hardware structure
847  *  @speed: new link speed
848  *  @autoneg: TRUE if autonegotiation enabled
849  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
850  *
851  *  Restarts link on PHY and MAC based on settings passed in.
852  **/
853 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
854                                                ixgbe_link_speed speed,
855                                                bool autoneg,
856                                                bool autoneg_wait_to_complete)
857 {
858 	s32 status;
859 
860 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
861 
862 	/* Setup the PHY according to input speed */
863 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
864 	                                      autoneg_wait_to_complete);
865 	/* Set up MAC */
866 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
867 
868 	return status;
869 }
870 /**
871  *  ixgbe_reset_hw_82599 - Perform hardware reset
872  *  @hw: pointer to hardware structure
873  *
874  *  Resets the hardware by resetting the transmit and receive units, masks
875  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
876  *  reset.
877  **/
878 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
879 {
880 	s32 status = IXGBE_SUCCESS;
881 	u32 ctrl, ctrl_ext;
882 	u32 i;
883 	u32 autoc;
884 	u32 autoc2;
885 
886 	DEBUGFUNC("ixgbe_reset_hw_82599");
887 
888 	/* Call adapter stop to disable tx/rx and clear interrupts */
889 	hw->mac.ops.stop_adapter(hw);
890 
891 	/* PHY ops must be identified and initialized prior to reset */
892 
893 	/* Identify PHY and related function pointers */
894 	status = hw->phy.ops.init(hw);
895 
896 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
897 		goto reset_hw_out;
898 
899 	/* Setup SFP module if there is one present. */
900 	if (hw->phy.sfp_setup_needed) {
901 		status = hw->mac.ops.setup_sfp(hw);
902 		hw->phy.sfp_setup_needed = FALSE;
903 	}
904 
905 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
906 		goto reset_hw_out;
907 
908 	/* Reset PHY */
909 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
910 		hw->phy.ops.reset(hw);
911 
912 	/*
913 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
914 	 * access and verify no pending requests before reset
915 	 */
916 	status = ixgbe_disable_pcie_master(hw);
917 	if (status != IXGBE_SUCCESS) {
918 		status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
919 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
920 	}
921 
922 	/*
923 	 * Issue global reset to the MAC.  This needs to be a SW reset.
924 	 * If link reset is used, it might reset the MAC when mng is using it
925 	 */
926 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
927 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
928 	IXGBE_WRITE_FLUSH(hw);
929 
930 	/* Poll for reset bit to self-clear indicating reset is complete */
931 	for (i = 0; i < 10; i++) {
932 		usec_delay(1);
933 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
934 		if (!(ctrl & IXGBE_CTRL_RST))
935 			break;
936 	}
937 	if (ctrl & IXGBE_CTRL_RST) {
938 		status = IXGBE_ERR_RESET_FAILED;
939 		DEBUGOUT("Reset polling failed to complete.\n");
940 	}
941 	/* Clear PF Reset Done bit so PF/VF Mail Ops can work */
942 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
943 	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
944 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
945 
946 	msec_delay(50);
947 
948 	/*
949 	 * Store the original AUTOC/AUTOC2 values if they have not been
950 	 * stored off yet.  Otherwise restore the stored original
951 	 * values since the reset operation sets back to defaults.
952 	 */
953 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
954 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
955 	if (hw->mac.orig_link_settings_stored == FALSE) {
956 		hw->mac.orig_autoc = autoc;
957 		hw->mac.orig_autoc2 = autoc2;
958 		hw->mac.orig_link_settings_stored = TRUE;
959 	} else {
960 		if (autoc != hw->mac.orig_autoc)
961 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
962 					IXGBE_AUTOC_AN_RESTART));
963 
964 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
965 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
966 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
967 			autoc2 |= (hw->mac.orig_autoc2 &
968 			           IXGBE_AUTOC2_UPPER_MASK);
969 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
970 		}
971 	}
972 
973        /* Store the permanent mac address */
974 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
975 
976 	/*
977 	 * Store MAC address from RAR0, clear receive address registers, and
978 	 * clear the multicast table.  Also reset num_rar_entries to 128,
979 	 * since we modify this value when programming the SAN MAC address.
980 	 */
981 	hw->mac.num_rar_entries = 128;
982 	hw->mac.ops.init_rx_addrs(hw);
983 
984 
985 
986 	/* Store the permanent SAN mac address */
987 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
988 
989 	/* Add the SAN MAC address to the RAR only if it's a valid address */
990 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
991 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
992 		                    hw->mac.san_addr, 0, IXGBE_RAH_AV);
993 
994 		/* Reserve the last RAR for the SAN MAC address */
995 		hw->mac.num_rar_entries--;
996        }
997 
998 	/* Store the alternative WWNN/WWPN prefix */
999 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1000 	                               &hw->mac.wwpn_prefix);
1001 
1002 reset_hw_out:
1003 	return status;
1004 }
1005 
1006 /**
1007  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1008  *  @hw: pointer to hardware structure
1009  **/
1010 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1011 {
1012 	int i;
1013 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1014 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1015 
1016 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1017 
1018 	/*
1019 	 * Before starting reinitialization process,
1020 	 * FDIRCMD.CMD must be zero.
1021 	 */
1022 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1023 		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1024 		      IXGBE_FDIRCMD_CMD_MASK))
1025 			break;
1026 		usec_delay(10);
1027 	}
1028 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1029 		DEBUGOUT("Flow Director previous command isn't complete, "
1030 		         "aborting table re-initialization. \n");
1031 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1032 	}
1033 
1034 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1035 	IXGBE_WRITE_FLUSH(hw);
1036 	/*
1037 	 * 82599 adapters flow director init flow cannot be restarted,
1038 	 * Workaround 82599 silicon errata by performing the following steps
1039 	 * before re-writing the FDIRCTRL control register with the same value.
1040 	 * - write 1 to bit 8 of FDIRCMD register &
1041 	 * - write 0 to bit 8 of FDIRCMD register
1042 	 */
1043 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1044 	                (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1045 	                 IXGBE_FDIRCMD_CLEARHT));
1046 	IXGBE_WRITE_FLUSH(hw);
1047 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1048 	                (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1049 	                 ~IXGBE_FDIRCMD_CLEARHT));
1050 	IXGBE_WRITE_FLUSH(hw);
1051 	/*
1052 	 * Clear FDIR Hash register to clear any leftover hashes
1053 	 * waiting to be programmed.
1054 	 */
1055 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1056 	IXGBE_WRITE_FLUSH(hw);
1057 
1058 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1059 	IXGBE_WRITE_FLUSH(hw);
1060 
1061 	/* Poll init-done after we write FDIRCTRL register */
1062 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1063 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1064 		                   IXGBE_FDIRCTRL_INIT_DONE)
1065 			break;
1066 		usec_delay(10);
1067 	}
1068 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1069 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1070 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1071 	}
1072 
1073 	/* Clear FDIR statistics registers (read to clear) */
1074 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1075 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1076 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1077 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1078 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1079 
1080 	return IXGBE_SUCCESS;
1081 }
1082 
1083 /**
1084  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1085  *  @hw: pointer to hardware structure
1086  *  @pballoc: which mode to allocate filters with
1087  **/
1088 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1089 {
1090 	u32 fdirctrl = 0;
1091 	u32 pbsize;
1092 	int i;
1093 
1094 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1095 
1096 	/*
1097 	 * Before enabling Flow Director, the Rx Packet Buffer size
1098 	 * must be reduced.  The new value is the current size minus
1099 	 * flow director memory usage size.
1100 	 */
1101 	pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1102 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1103 	    (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1104 
1105 	/*
1106 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1107 	 * intialized to zero for non DCB mode otherwise actual total RX PB
1108 	 * would be bigger than programmed and filter space would run into
1109 	 * the PB 0 region.
1110 	 */
1111 	for (i = 1; i < 8; i++)
1112 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1113 
1114 	/* Send interrupt when 64 filters are left */
1115 	fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1116 
1117 	/* Set the maximum length per hash bucket to 0xA filters */
1118 	fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
1119 
1120 	switch (pballoc) {
1121 	case IXGBE_FDIR_PBALLOC_64K:
1122 		/* 8k - 1 signature filters */
1123 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1124 		break;
1125 	case IXGBE_FDIR_PBALLOC_128K:
1126 		/* 16k - 1 signature filters */
1127 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1128 		break;
1129 	case IXGBE_FDIR_PBALLOC_256K:
1130 		/* 32k - 1 signature filters */
1131 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1132 		break;
1133 	default:
1134 		/* bad value */
1135 		return IXGBE_ERR_CONFIG;
1136 	};
1137 
1138 	/* Move the flexible bytes to use the ethertype - shift 6 words */
1139 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1140 
1141 
1142 	/* Prime the keys for hashing */
1143 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1144 	                IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1145 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1146 	                IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1147 
1148 	/*
1149 	 * Poll init-done after we write the register.  Estimated times:
1150 	 *      10G: PBALLOC = 11b, timing is 60us
1151 	 *       1G: PBALLOC = 11b, timing is 600us
1152 	 *     100M: PBALLOC = 11b, timing is 6ms
1153 	 *
1154 	 *     Multiple these timings by 4 if under full Rx load
1155 	 *
1156 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1157 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1158 	 * this might not finish in our poll time, but we can live with that
1159 	 * for now.
1160 	 */
1161 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1162 	IXGBE_WRITE_FLUSH(hw);
1163 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1164 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1165 		                   IXGBE_FDIRCTRL_INIT_DONE)
1166 			break;
1167 		msec_delay(1);
1168 	}
1169 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1170 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1171 
1172 	return IXGBE_SUCCESS;
1173 }
1174 
1175 /**
1176  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1177  *  @hw: pointer to hardware structure
1178  *  @pballoc: which mode to allocate filters with
1179  **/
1180 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1181 {
1182 	u32 fdirctrl = 0;
1183 	u32 pbsize;
1184 	int i;
1185 
1186 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1187 
1188 	/*
1189 	 * Before enabling Flow Director, the Rx Packet Buffer size
1190 	 * must be reduced.  The new value is the current size minus
1191 	 * flow director memory usage size.
1192 	 */
1193 
1194 	pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1195 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1196 	    (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1197 
1198 	/*
1199 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1200 	 * intialized to zero for non DCB mode otherwise actual total RX PB
1201 	 * would be bigger than programmed and filter space would run into
1202 	 * the PB 0 region.
1203 	 */
1204 	for (i = 1; i < 8; i++)
1205 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1206 
1207 	/* Send interrupt when 64 filters are left */
1208 	fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1209 
1210 	switch (pballoc) {
1211 	case IXGBE_FDIR_PBALLOC_64K:
1212 		/* 2k - 1 perfect filters */
1213 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1214 		break;
1215 	case IXGBE_FDIR_PBALLOC_128K:
1216 		/* 4k - 1 perfect filters */
1217 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1218 		break;
1219 	case IXGBE_FDIR_PBALLOC_256K:
1220 		/* 8k - 1 perfect filters */
1221 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1222 		break;
1223 	default:
1224 		/* bad value */
1225 		return IXGBE_ERR_CONFIG;
1226 	};
1227 
1228 	/* Turn perfect match filtering on */
1229 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
1230 	fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1231 
1232 	/* Move the flexible bytes to use the ethertype - shift 6 words */
1233 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1234 
1235 	/* Prime the keys for hashing */
1236 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1237 	                IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1238 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1239 	                IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1240 
1241 	/*
1242 	 * Poll init-done after we write the register.  Estimated times:
1243 	 *      10G: PBALLOC = 11b, timing is 60us
1244 	 *       1G: PBALLOC = 11b, timing is 600us
1245 	 *     100M: PBALLOC = 11b, timing is 6ms
1246 	 *
1247 	 *     Multiple these timings by 4 if under full Rx load
1248 	 *
1249 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1250 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1251 	 * this might not finish in our poll time, but we can live with that
1252 	 * for now.
1253 	 */
1254 
1255 	/* Set the maximum length per hash bucket to 0xA filters */
1256 	fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
1257 
1258 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1259 	IXGBE_WRITE_FLUSH(hw);
1260 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1261 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1262 		                   IXGBE_FDIRCTRL_INIT_DONE)
1263 			break;
1264 		msec_delay(1);
1265 	}
1266 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1267 		DEBUGOUT("Flow Director Perfect poll time exceeded!\n");
1268 
1269 	return IXGBE_SUCCESS;
1270 }
1271 
1272 
1273 /**
1274  *  ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
1275  *  @stream: input bitstream to compute the hash on
1276  *  @key: 32-bit hash key
1277  **/
1278 u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
1279 {
1280 	/*
1281 	 * The algorithm is as follows:
1282 	 *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
1283 	 *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
1284 	 *    and A[n] x B[n] is bitwise AND between same length strings
1285 	 *
1286 	 *    K[n] is 16 bits, defined as:
1287 	 *       for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
1288 	 *       for n modulo 32 < 15, K[n] =
1289 	 *             K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
1290 	 *
1291 	 *    S[n] is 16 bits, defined as:
1292 	 *       for n >= 15, S[n] = S[n:n - 15]
1293 	 *       for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
1294 	 *
1295 	 *    To simplify for programming, the algorithm is implemented
1296 	 *    in software this way:
1297 	 *
1298 	 *    Key[31:0], Stream[335:0]
1299 	 *
1300 	 *    tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
1301 	 *    int_key[350:0] = tmp_key[351:1]
1302 	 *    int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
1303 	 *
1304 	 *    hash[15:0] = 0;
1305 	 *    for (i = 0; i < 351; i++) {
1306 	 *        if (int_key[i])
1307 	 *            hash ^= int_stream[(i + 15):i];
1308 	 *    }
1309 	 */
1310 
1311 	union {
1312 		u64    fill[6];
1313 		u32    key[11];
1314 		u8     key_stream[44];
1315 	} tmp_key;
1316 
1317 	u8   *stream = (u8 *)atr_input;
1318 	u8   int_key[44];      /* upper-most bit unused */
1319 	u8   hash_str[46];     /* upper-most 2 bits unused */
1320 	u16  hash_result = 0;
1321 	int  i, j, k, h;
1322 
1323 	DEBUGFUNC("ixgbe_atr_compute_hash_82599");
1324 
1325 	/*
1326 	 * Initialize the fill member to prevent warnings
1327 	 * on some compilers
1328 	 */
1329 	 tmp_key.fill[0] = 0;
1330 
1331 	/* First load the temporary key stream */
1332 	for (i = 0; i < 6; i++) {
1333 		u64 fillkey = ((u64)key << 32) | key;
1334 		tmp_key.fill[i] = fillkey;
1335 	}
1336 
1337 	/*
1338 	 * Set the interim key for the hashing.  Bit 352 is unused, so we must
1339 	 * shift and compensate when building the key.
1340 	 */
1341 
1342 	int_key[0] = tmp_key.key_stream[0] >> 1;
1343 	for (i = 1, j = 0; i < 44; i++) {
1344 		unsigned int this_key = tmp_key.key_stream[j] << 7;
1345 		j++;
1346 		int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
1347 	}
1348 
1349 	/*
1350 	 * Set the interim bit string for the hashing.  Bits 368 and 367 are
1351 	 * unused, so shift and compensate when building the string.
1352 	 */
1353 	hash_str[0] = (stream[40] & 0x7f) >> 1;
1354 	for (i = 1, j = 40; i < 46; i++) {
1355 		unsigned int this_str = stream[j] << 7;
1356 		j++;
1357 		if (j > 41)
1358 			j = 0;
1359 		hash_str[i] = (u8)(this_str | (stream[j] >> 1));
1360 	}
1361 
1362 	/*
1363 	 * Now compute the hash.  i is the index into hash_str, j is into our
1364 	 * key stream, k is counting the number of bits, and h interates within
1365 	 * each byte.
1366 	 */
1367 	for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
1368 		for (h = 0; h < 8 && k < 351; h++, k++) {
1369 			if (int_key[j] & (1 << h)) {
1370 				/*
1371 				 * Key bit is set, XOR in the current 16-bit
1372 				 * string.  Example of processing:
1373 				 *    h = 0,
1374 				 *      tmp = (hash_str[i - 2] & 0 << 16) |
1375 				 *            (hash_str[i - 1] & 0xff << 8) |
1376 				 *            (hash_str[i] & 0xff >> 0)
1377 				 *      So tmp = hash_str[15 + k:k], since the
1378 				 *      i + 2 clause rolls off the 16-bit value
1379 				 *    h = 7,
1380 				 *      tmp = (hash_str[i - 2] & 0x7f << 9) |
1381 				 *            (hash_str[i - 1] & 0xff << 1) |
1382 				 *            (hash_str[i] & 0x80 >> 7)
1383 				 */
1384 				int tmp = (hash_str[i] >> h);
1385 				tmp |= (hash_str[i - 1] << (8 - h));
1386 				tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
1387 				             << (16 - h);
1388 				hash_result ^= (u16)tmp;
1389 			}
1390 		}
1391 	}
1392 
1393 	return hash_result;
1394 }
1395 
1396 /**
1397  *  ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1398  *  @input: input stream to modify
1399  *  @vlan: the VLAN id to load
1400  **/
1401 s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
1402 {
1403 	DEBUGFUNC("ixgbe_atr_set_vlan_id_82599");
1404 
1405 	input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
1406 	input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
1407 
1408 	return IXGBE_SUCCESS;
1409 }
1410 
1411 /**
1412  *  ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1413  *  @input: input stream to modify
1414  *  @src_addr: the IP address to load
1415  **/
1416 s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
1417 {
1418 	DEBUGFUNC("ixgbe_atr_set_src_ipv4_82599");
1419 
1420 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
1421 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
1422 	                                               (src_addr >> 16) & 0xff;
1423 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
1424 	                                                (src_addr >> 8) & 0xff;
1425 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
1426 
1427 	return IXGBE_SUCCESS;
1428 }
1429 
1430 /**
1431  *  ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1432  *  @input: input stream to modify
1433  *  @dst_addr: the IP address to load
1434  **/
1435 s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1436 {
1437 	DEBUGFUNC("ixgbe_atr_set_dst_ipv4_82599");
1438 
1439 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
1440 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
1441 	                                               (dst_addr >> 16) & 0xff;
1442 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
1443 	                                                (dst_addr >> 8) & 0xff;
1444 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
1445 
1446 	return IXGBE_SUCCESS;
1447 }
1448 
1449 /**
1450  *  ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
1451  *  @input: input stream to modify
1452  *  @src_addr_1: the first 4 bytes of the IP address to load
1453  *  @src_addr_2: the second 4 bytes of the IP address to load
1454  *  @src_addr_3: the third 4 bytes of the IP address to load
1455  *  @src_addr_4: the fourth 4 bytes of the IP address to load
1456  **/
1457 s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
1458                                  u32 src_addr_1, u32 src_addr_2,
1459                                  u32 src_addr_3, u32 src_addr_4)
1460 {
1461 	DEBUGFUNC("ixgbe_atr_set_src_ipv6_82599");
1462 
1463 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
1464 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
1465 	                                               (src_addr_4 >> 8) & 0xff;
1466 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
1467 	                                              (src_addr_4 >> 16) & 0xff;
1468 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
1469 
1470 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
1471 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
1472 	                                               (src_addr_3 >> 8) & 0xff;
1473 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
1474 	                                              (src_addr_3 >> 16) & 0xff;
1475 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
1476 
1477 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
1478 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
1479 	                                               (src_addr_2 >> 8) & 0xff;
1480 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
1481 	                                              (src_addr_2 >> 16) & 0xff;
1482 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
1483 
1484 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
1485 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
1486 	                                               (src_addr_1 >> 8) & 0xff;
1487 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
1488 	                                              (src_addr_1 >> 16) & 0xff;
1489 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
1490 
1491 	return IXGBE_SUCCESS;
1492 }
1493 
1494 /**
1495  *  ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
1496  *  @input: input stream to modify
1497  *  @dst_addr_1: the first 4 bytes of the IP address to load
1498  *  @dst_addr_2: the second 4 bytes of the IP address to load
1499  *  @dst_addr_3: the third 4 bytes of the IP address to load
1500  *  @dst_addr_4: the fourth 4 bytes of the IP address to load
1501  **/
1502 s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
1503                                  u32 dst_addr_1, u32 dst_addr_2,
1504                                  u32 dst_addr_3, u32 dst_addr_4)
1505 {
1506 	DEBUGFUNC("ixgbe_atr_set_dst_ipv6_82599");
1507 
1508 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
1509 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
1510 	                                               (dst_addr_4 >> 8) & 0xff;
1511 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
1512 	                                              (dst_addr_4 >> 16) & 0xff;
1513 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
1514 
1515 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
1516 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
1517 	                                               (dst_addr_3 >> 8) & 0xff;
1518 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
1519 	                                              (dst_addr_3 >> 16) & 0xff;
1520 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
1521 
1522 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
1523 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
1524 	                                               (dst_addr_2 >> 8) & 0xff;
1525 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
1526 	                                              (dst_addr_2 >> 16) & 0xff;
1527 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
1528 
1529 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
1530 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
1531 	                                               (dst_addr_1 >> 8) & 0xff;
1532 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
1533 	                                              (dst_addr_1 >> 16) & 0xff;
1534 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
1535 
1536 	return IXGBE_SUCCESS;
1537 }
1538 
1539 /**
1540  *  ixgbe_atr_set_src_port_82599 - Sets the source port
1541  *  @input: input stream to modify
1542  *  @src_port: the source port to load
1543  **/
1544 s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
1545 {
1546 	DEBUGFUNC("ixgbe_atr_set_src_port_82599");
1547 
1548 	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
1549 	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
1550 
1551 	return IXGBE_SUCCESS;
1552 }
1553 
1554 /**
1555  *  ixgbe_atr_set_dst_port_82599 - Sets the destination port
1556  *  @input: input stream to modify
1557  *  @dst_port: the destination port to load
1558  **/
1559 s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
1560 {
1561 	DEBUGFUNC("ixgbe_atr_set_dst_port_82599");
1562 
1563 	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
1564 	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
1565 
1566 	return IXGBE_SUCCESS;
1567 }
1568 
1569 /**
1570  *  ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1571  *  @input: input stream to modify
1572  *  @flex_bytes: the flexible bytes to load
1573  **/
1574 s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1575 {
1576 	DEBUGFUNC("ixgbe_atr_set_flex_byte_82599");
1577 
1578 	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
1579 	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
1580 
1581 	return IXGBE_SUCCESS;
1582 }
1583 
1584 /**
1585  *  ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
1586  *  @input: input stream to modify
1587  *  @vm_pool: the Virtual Machine pool to load
1588  **/
1589 s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
1590 {
1591 	DEBUGFUNC("ixgbe_atr_set_vm_pool_82599");
1592 
1593 	input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
1594 
1595 	return IXGBE_SUCCESS;
1596 }
1597 
1598 /**
1599  *  ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1600  *  @input: input stream to modify
1601  *  @l4type: the layer 4 type value to load
1602  **/
1603 s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1604 {
1605 	DEBUGFUNC("ixgbe_atr_set_l4type_82599");
1606 
1607 	input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
1608 
1609 	return IXGBE_SUCCESS;
1610 }
1611 
1612 /**
1613  *  ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1614  *  @input: input stream to search
1615  *  @vlan: the VLAN id to load
1616  **/
1617 s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1618 {
1619 	DEBUGFUNC("ixgbe_atr_get_vlan_id_82599");
1620 
1621 	*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1622 	*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
1623 
1624 	return IXGBE_SUCCESS;
1625 }
1626 
1627 /**
1628  *  ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
1629  *  @input: input stream to search
1630  *  @src_addr: the IP address to load
1631  **/
1632 s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
1633 {
1634 	DEBUGFUNC("ixgbe_atr_get_src_ipv4_82599");
1635 
1636 	*src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
1637 	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
1638 	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
1639 	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
1640 
1641 	return IXGBE_SUCCESS;
1642 }
1643 
1644 /**
1645  *  ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
1646  *  @input: input stream to search
1647  *  @dst_addr: the IP address to load
1648  **/
1649 s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
1650 {
1651 	DEBUGFUNC("ixgbe_atr_get_dst_ipv4_82599");
1652 
1653 	*dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
1654 	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
1655 	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
1656 	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
1657 
1658 	return IXGBE_SUCCESS;
1659 }
1660 
1661 /**
1662  *  ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
1663  *  @input: input stream to search
1664  *  @src_addr_1: the first 4 bytes of the IP address to load
1665  *  @src_addr_2: the second 4 bytes of the IP address to load
1666  *  @src_addr_3: the third 4 bytes of the IP address to load
1667  *  @src_addr_4: the fourth 4 bytes of the IP address to load
1668  **/
1669 s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
1670                                  u32 *src_addr_1, u32 *src_addr_2,
1671                                  u32 *src_addr_3, u32 *src_addr_4)
1672 {
1673 	DEBUGFUNC("ixgbe_atr_get_src_ipv6_82599");
1674 
1675 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
1676 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
1677 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
1678 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
1679 
1680 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
1681 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
1682 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
1683 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
1684 
1685 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
1686 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
1687 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
1688 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
1689 
1690 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
1691 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
1692 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
1693 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
1694 
1695 	return IXGBE_SUCCESS;
1696 }
1697 
1698 /**
1699  *  ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
1700  *  @input: input stream to search
1701  *  @dst_addr_1: the first 4 bytes of the IP address to load
1702  *  @dst_addr_2: the second 4 bytes of the IP address to load
1703  *  @dst_addr_3: the third 4 bytes of the IP address to load
1704  *  @dst_addr_4: the fourth 4 bytes of the IP address to load
1705  **/
1706 s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
1707                                  u32 *dst_addr_1, u32 *dst_addr_2,
1708                                  u32 *dst_addr_3, u32 *dst_addr_4)
1709 {
1710 	DEBUGFUNC("ixgbe_atr_get_dst_ipv6_82599");
1711 
1712 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
1713 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
1714 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
1715 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
1716 
1717 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
1718 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
1719 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
1720 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
1721 
1722 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
1723 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
1724 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
1725 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
1726 
1727 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
1728 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
1729 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
1730 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
1731 
1732 	return IXGBE_SUCCESS;
1733 }
1734 
1735 /**
1736  *  ixgbe_atr_get_src_port_82599 - Gets the source port
1737  *  @input: input stream to modify
1738  *  @src_port: the source port to load
1739  *
1740  *  Even though the input is given in big-endian, the FDIRPORT registers
1741  *  expect the ports to be programmed in little-endian.  Hence the need to swap
1742  *  endianness when retrieving the data.  This can be confusing since the
1743  *  internal hash engine expects it to be big-endian.
1744  **/
1745 s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
1746 {
1747 	DEBUGFUNC("ixgbe_atr_get_src_port_82599");
1748 
1749 	*src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
1750 	*src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
1751 
1752 	return IXGBE_SUCCESS;
1753 }
1754 
1755 /**
1756  *  ixgbe_atr_get_dst_port_82599 - Gets the destination port
1757  *  @input: input stream to modify
1758  *  @dst_port: the destination port to load
1759  *
1760  *  Even though the input is given in big-endian, the FDIRPORT registers
1761  *  expect the ports to be programmed in little-endian.  Hence the need to swap
1762  *  endianness when retrieving the data.  This can be confusing since the
1763  *  internal hash engine expects it to be big-endian.
1764  **/
1765 s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
1766 {
1767 	DEBUGFUNC("ixgbe_atr_get_dst_port_82599");
1768 
1769 	*dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
1770 	*dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
1771 
1772 	return IXGBE_SUCCESS;
1773 }
1774 
1775 /**
1776  *  ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
1777  *  @input: input stream to modify
1778  *  @flex_bytes: the flexible bytes to load
1779  **/
1780 s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte)
1781 {
1782 	DEBUGFUNC("ixgbe_atr_get_flex_byte_82599");
1783 
1784 	*flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
1785 	*flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
1786 
1787 	return IXGBE_SUCCESS;
1788 }
1789 
1790 /**
1791  *  ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
1792  *  @input: input stream to modify
1793  *  @vm_pool: the Virtual Machine pool to load
1794  **/
1795 s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
1796 {
1797 	DEBUGFUNC("ixgbe_atr_get_vm_pool_82599");
1798 
1799 	*vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
1800 
1801 	return IXGBE_SUCCESS;
1802 }
1803 
1804 /**
1805  *  ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
1806  *  @input: input stream to modify
1807  *  @l4type: the layer 4 type value to load
1808  **/
1809 s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
1810 {
1811 	DEBUGFUNC("ixgbe_atr_get_l4type__82599");
1812 
1813 	*l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
1814 
1815 	return IXGBE_SUCCESS;
1816 }
1817 
1818 /**
1819  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1820  *  @hw: pointer to hardware structure
1821  *  @stream: input bitstream
1822  *  @queue: queue index to direct traffic to
1823  **/
1824 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1825                                           struct ixgbe_atr_input *input,
1826                                           u8 queue)
1827 {
1828 	u64  fdirhashcmd;
1829 	u64  fdircmd;
1830 	u32  fdirhash;
1831 	u16  bucket_hash, sig_hash;
1832 	u8   l4type;
1833 
1834 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1835 
1836 	bucket_hash = ixgbe_atr_compute_hash_82599(input,
1837 	                                           IXGBE_ATR_BUCKET_HASH_KEY);
1838 
1839 	/* bucket_hash is only 15 bits */
1840 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1841 
1842 	sig_hash = ixgbe_atr_compute_hash_82599(input,
1843 	                                        IXGBE_ATR_SIGNATURE_HASH_KEY);
1844 
1845 	/* Get the l4type in order to program FDIRCMD properly */
1846 	/* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
1847 	ixgbe_atr_get_l4type_82599(input, &l4type);
1848 
1849 	/*
1850 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1851 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1852 	 */
1853 	fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1854 
1855 	fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1856 	           IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
1857 
1858 	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1859 	case IXGBE_ATR_L4TYPE_TCP:
1860 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1861 		break;
1862 	case IXGBE_ATR_L4TYPE_UDP:
1863 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1864 		break;
1865 	case IXGBE_ATR_L4TYPE_SCTP:
1866 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1867 		break;
1868 	default:
1869 		DEBUGOUT(" Error on l4type input\n");
1870 		return IXGBE_ERR_CONFIG;
1871 	}
1872 
1873 	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
1874 		fdircmd |= IXGBE_FDIRCMD_IPV6;
1875 
1876 	fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
1877 	fdirhashcmd = ((fdircmd << 32) | fdirhash);
1878 
1879 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF);
1880 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1881 
1882 	return IXGBE_SUCCESS;
1883 }
1884 
1885 /**
1886  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1887  *  @hw: pointer to hardware structure
1888  *  @input: input bitstream
1889  *  @queue: queue index to direct traffic to
1890  *
1891  *  Note that the caller to this function must lock before calling, since the
1892  *  hardware writes must be protected from one another.
1893  **/
1894 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1895                                         struct ixgbe_atr_input *input,
1896                                         u16 soft_id,
1897                                         u8 queue)
1898 {
1899 	u32 fdircmd = 0;
1900 	u32 fdirhash;
1901 	u32 src_ipv4, dst_ipv4;
1902 	u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
1903 	u16 src_port, dst_port, vlan_id, flex_bytes;
1904 	u16 bucket_hash;
1905 	u8  l4type;
1906 
1907 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1908 
1909 	/* Get our input values */
1910 	ixgbe_atr_get_l4type_82599(input, &l4type);
1911 
1912 	/*
1913 	 * Check l4type formatting, and bail out before we touch the hardware
1914 	 * if there's a configuration issue
1915 	 */
1916 	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1917 	case IXGBE_ATR_L4TYPE_TCP:
1918 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1919 		break;
1920 	case IXGBE_ATR_L4TYPE_UDP:
1921 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1922 		break;
1923 	case IXGBE_ATR_L4TYPE_SCTP:
1924 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1925 		break;
1926 	default:
1927 		DEBUGOUT(" Error on l4type input\n");
1928 		return IXGBE_ERR_CONFIG;
1929 	}
1930 
1931 	bucket_hash = ixgbe_atr_compute_hash_82599(input,
1932 	                                           IXGBE_ATR_BUCKET_HASH_KEY);
1933 
1934 	/* bucket_hash is only 15 bits */
1935 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1936 
1937 	ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
1938 	ixgbe_atr_get_src_port_82599(input, &src_port);
1939 	ixgbe_atr_get_dst_port_82599(input, &dst_port);
1940 	ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
1941 
1942 	fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1943 
1944 	/* Now figure out if we're IPv4 or IPv6 */
1945 	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
1946 		/* IPv6 */
1947 		ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
1948 	                                     &src_ipv6_3, &src_ipv6_4);
1949 
1950 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
1951 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
1952 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
1953 		/* The last 4 bytes is the same register as IPv4 */
1954 		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
1955 
1956 		fdircmd |= IXGBE_FDIRCMD_IPV6;
1957 		fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
1958 	} else {
1959 		/* IPv4 */
1960 		ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
1961 		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
1962 
1963 	}
1964 
1965 	ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
1966 	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
1967 
1968 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
1969 	                            (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
1970 	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
1971 	                       (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
1972 
1973 	fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
1974 	fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
1975 	fdircmd |= IXGBE_FDIRCMD_LAST;
1976 	fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
1977 	fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1978 
1979 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1980 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1981 
1982 	return IXGBE_SUCCESS;
1983 }
1984 
1985 /**
1986  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1987  *  @hw: pointer to hardware structure
1988  *  @reg: analog register to read
1989  *  @val: read value
1990  *
1991  *  Performs read operation to Omer analog register specified.
1992  **/
1993 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1994 {
1995 	u32  core_ctl;
1996 
1997 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
1998 
1999 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2000 	                (reg << 8));
2001 	IXGBE_WRITE_FLUSH(hw);
2002 	usec_delay(10);
2003 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2004 	*val = (u8)core_ctl;
2005 
2006 	return IXGBE_SUCCESS;
2007 }
2008 
2009 /**
2010  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2011  *  @hw: pointer to hardware structure
2012  *  @reg: atlas register to write
2013  *  @val: value to write
2014  *
2015  *  Performs write operation to Omer analog register specified.
2016  **/
2017 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2018 {
2019 	u32  core_ctl;
2020 
2021 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2022 
2023 	core_ctl = (reg << 8) | val;
2024 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2025 	IXGBE_WRITE_FLUSH(hw);
2026 	usec_delay(10);
2027 
2028 	return IXGBE_SUCCESS;
2029 }
2030 
2031 /**
2032  *  ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
2033  *  @hw: pointer to hardware structure
2034  *
2035  *  Starts the hardware using the generic start_hw function.
2036  *  Then performs revision-specific operations:
2037  *  Clears the rate limiter registers.
2038  **/
2039 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
2040 {
2041 	u32 i;
2042 	u32 regval;
2043 	s32 ret_val = IXGBE_SUCCESS;
2044 
2045 	DEBUGFUNC("ixgbe_start_hw_rev_1__82599");
2046 
2047 	ret_val = ixgbe_start_hw_generic(hw);
2048 
2049 	/* Clear the rate limiters */
2050 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
2051 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
2052 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
2053 	}
2054 	IXGBE_WRITE_FLUSH(hw);
2055 
2056 	/* Disable relaxed ordering */
2057 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
2058 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2059 		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2060 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
2061 	}
2062 
2063 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
2064 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
2065 		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
2066 			    IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
2067 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
2068 	}
2069 
2070 	/* We need to run link autotry after the driver loads */
2071 	hw->mac.autotry_restart = TRUE;
2072 
2073 	if (ret_val == IXGBE_SUCCESS)
2074 		ret_val = ixgbe_verify_fw_version_82599(hw);
2075 	return ret_val;
2076 }
2077 
2078 /**
2079  *  ixgbe_identify_phy_82599 - Get physical layer module
2080  *  @hw: pointer to hardware structure
2081  *
2082  *  Determines the physical layer module found on the current adapter.
2083  *  If PHY already detected, maintains current PHY type in hw struct,
2084  *  otherwise executes the PHY detection routine.
2085  **/
2086 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2087 {
2088 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2089 
2090 	DEBUGFUNC("ixgbe_identify_phy_82599");
2091 
2092 	/* Detect PHY if not unknown - returns success if already detected. */
2093 	status = ixgbe_identify_phy_generic(hw);
2094 	if (status != IXGBE_SUCCESS)
2095 		status = ixgbe_identify_sfp_module_generic(hw);
2096 	/* Set PHY type none if no PHY detected */
2097 	if (hw->phy.type == ixgbe_phy_unknown) {
2098 		hw->phy.type = ixgbe_phy_none;
2099 		status = IXGBE_SUCCESS;
2100 	}
2101 
2102 	/* Return error if SFP module has been detected but is not supported */
2103 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2104 		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2105 
2106 	return status;
2107 }
2108 
2109 /**
2110  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2111  *  @hw: pointer to hardware structure
2112  *
2113  *  Determines physical layer capabilities of the current configuration.
2114  **/
2115 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2116 {
2117 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2118 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2119 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2120 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2121 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2122 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2123 	u16 ext_ability = 0;
2124 	u8 comp_codes_10g = 0;
2125 
2126 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2127 
2128 	hw->phy.ops.identify(hw);
2129 
2130 	if (hw->phy.type == ixgbe_phy_tn ||
2131 	    hw->phy.type == ixgbe_phy_aq ||
2132 	    hw->phy.type == ixgbe_phy_cu_unknown) {
2133 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2134 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2135 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2136 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2137 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2138 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2139 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2140 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2141 		goto out;
2142 	}
2143 
2144 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2145 	case IXGBE_AUTOC_LMS_1G_AN:
2146 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2147 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2148 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2149 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2150 			goto out;
2151 		} else
2152 			/* SFI mode so read SFP module */
2153 			goto sfp_check;
2154 		break;
2155 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2156 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2157 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2158 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2159 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2160 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2161 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2162 		goto out;
2163 		break;
2164 	case IXGBE_AUTOC_LMS_10G_SERIAL:
2165 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2166 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2167 			goto out;
2168 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2169 			goto sfp_check;
2170 		break;
2171 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
2172 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2173 		if (autoc & IXGBE_AUTOC_KX_SUPP)
2174 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2175 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
2176 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2177 		if (autoc & IXGBE_AUTOC_KR_SUPP)
2178 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2179 		goto out;
2180 		break;
2181 	default:
2182 		goto out;
2183 		break;
2184 	}
2185 
2186 sfp_check:
2187 	/* SFP check must be done last since DA modules are sometimes used to
2188 	 * test KR mode -  we need to id KR mode correctly before SFP module.
2189 	 * Call identify_sfp because the pluggable module may have changed */
2190 	hw->phy.ops.identify_sfp(hw);
2191 	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2192 		goto out;
2193 
2194 	switch (hw->phy.type) {
2195 	case ixgbe_phy_tw_tyco:
2196 	case ixgbe_phy_tw_unknown:
2197 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2198 		break;
2199 	case ixgbe_phy_sfp_avago:
2200 	case ixgbe_phy_sfp_ftl:
2201 	case ixgbe_phy_sfp_intel:
2202 	case ixgbe_phy_sfp_unknown:
2203 		hw->phy.ops.read_i2c_eeprom(hw,
2204 		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2205 		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2206 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2207 		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2208 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2209 		break;
2210 	default:
2211 		break;
2212 	}
2213 
2214 out:
2215 	return physical_layer;
2216 }
2217 
2218 /**
2219  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2220  *  @hw: pointer to hardware structure
2221  *  @regval: register value to write to RXCTRL
2222  *
2223  *  Enables the Rx DMA unit for 82599
2224  **/
2225 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2226 {
2227 #define IXGBE_MAX_SECRX_POLL 30
2228 	int i;
2229 	int secrxreg;
2230 
2231 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2232 
2233 	/*
2234 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2235 	 * If traffic is incoming before we enable the Rx unit, it could hang
2236 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2237 	 * completely disabled prior to enabling the Rx unit.
2238 	 */
2239 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2240 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2241 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2242 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2243 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2244 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2245 			break;
2246 		else
2247 			/* Use interrupt-safe sleep just in case */
2248 			usec_delay(10);
2249 	}
2250 
2251 	/* For informational purposes only */
2252 	if (i >= IXGBE_MAX_SECRX_POLL)
2253 		DEBUGOUT("Rx unit being enabled before security "
2254 		         "path fully disabled.  Continuing with init.\n");
2255 
2256 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2257 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2258 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2259 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2260 	IXGBE_WRITE_FLUSH(hw);
2261 
2262 	return IXGBE_SUCCESS;
2263 }
2264 
2265 /**
2266  *  ixgbe_get_device_caps_82599 - Get additional device capabilities
2267  *  @hw: pointer to hardware structure
2268  *  @device_caps: the EEPROM word with the extra device capabilities
2269  *
2270  *  This function will read the EEPROM location for the device capabilities,
2271  *  and return the word through device_caps.
2272  **/
2273 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
2274 {
2275 	DEBUGFUNC("ixgbe_get_device_caps_82599");
2276 
2277 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
2278 
2279 	return IXGBE_SUCCESS;
2280 }
2281 
2282 /**
2283  *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
2284  *  @hw: pointer to hardware structure
2285  *
2286  *  Verifies that installed the firmware version is 0.6 or higher
2287  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2288  *
2289  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2290  *  if the FW version is not supported.
2291  **/
2292 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2293 {
2294 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2295 	u16 fw_offset, fw_ptp_cfg_offset;
2296 	u16 fw_version = 0;
2297 
2298 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
2299 
2300 	/* firmware check is only necessary for SFI devices */
2301 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
2302 		status = IXGBE_SUCCESS;
2303 		goto fw_version_out;
2304 	}
2305 
2306 	/* get the offset to the Firmware Module block */
2307 	hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2308 
2309 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2310 		goto fw_version_out;
2311 
2312 	/* get the offset to the Pass Through Patch Configuration block */
2313 	hw->eeprom.ops.read(hw, (fw_offset +
2314 	                         IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2315 	                         &fw_ptp_cfg_offset);
2316 
2317 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2318 		goto fw_version_out;
2319 
2320 	/* get the firmware version */
2321 	hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2322 	                         IXGBE_FW_PATCH_VERSION_4),
2323 	                         &fw_version);
2324 
2325 	if (fw_version > 0x5)
2326 		status = IXGBE_SUCCESS;
2327 
2328 fw_version_out:
2329 	return status;
2330 }
2331