xref: /freebsd/sys/dev/ixgbe/ixgbe_82599.c (revision a3cf0ef5a295c885c895fabfd56470c0d1db322d)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2010, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_api.h"
37 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h"
39 
40 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
41 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
42                                       ixgbe_link_speed *speed,
43                                       bool *autoneg);
44 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
45 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
46                                      ixgbe_link_speed speed, bool autoneg,
47                                      bool autoneg_wait_to_complete);
48 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
49 				     ixgbe_link_speed speed, bool autoneg,
50 				     bool autoneg_wait_to_complete);
51 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
52 				bool autoneg_wait_to_complete);
53 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
54                                      ixgbe_link_speed speed,
55                                      bool autoneg,
56                                      bool autoneg_wait_to_complete);
57 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
58                                                ixgbe_link_speed speed,
59                                                bool autoneg,
60                                                bool autoneg_wait_to_complete);
61 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
62 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
63 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
64 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
65 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
66 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
67 void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw);
68 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
69 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
70 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
71 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
72 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps);
73 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
74 
75 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
76 {
77 	struct ixgbe_mac_info *mac = &hw->mac;
78 
79 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
80 
81 	if (hw->phy.multispeed_fiber) {
82 		/* Set up dual speed SFP+ support */
83 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
84 	} else {
85 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
86 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
87 		      hw->phy.smart_speed == ixgbe_smart_speed_on))
88 			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
89 		else
90 			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
91 	}
92 }
93 
94 /**
95  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
96  *  @hw: pointer to hardware structure
97  *
98  *  Initialize any function pointers that were not able to be
99  *  set during init_shared_code because the PHY/SFP type was
100  *  not known.  Perform the SFP init if necessary.
101  *
102  **/
103 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
104 {
105 	struct ixgbe_mac_info *mac = &hw->mac;
106 	struct ixgbe_phy_info *phy = &hw->phy;
107 	s32 ret_val = IXGBE_SUCCESS;
108 
109 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
110 
111 	/* Identify the PHY or SFP module */
112 	ret_val = phy->ops.identify(hw);
113 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
114 		goto init_phy_ops_out;
115 
116 	/* Setup function pointers based on detected SFP module and speeds */
117 	ixgbe_init_mac_link_ops_82599(hw);
118 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
119 		hw->phy.ops.reset = NULL;
120 
121 	/* If copper media, overwrite with copper function pointers */
122 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
123 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
124 		mac->ops.get_link_capabilities =
125 		                  &ixgbe_get_copper_link_capabilities_generic;
126 	}
127 
128 	/* Set necessary function pointers based on phy type */
129 	switch (hw->phy.type) {
130 	case ixgbe_phy_tn:
131 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
132 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
133 		phy->ops.get_firmware_version =
134 		             &ixgbe_get_phy_firmware_version_tnx;
135 		break;
136 	case ixgbe_phy_aq:
137 		phy->ops.get_firmware_version =
138 		             &ixgbe_get_phy_firmware_version_generic;
139 		break;
140 	default:
141 		break;
142 	}
143 init_phy_ops_out:
144 	return ret_val;
145 }
146 
147 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
148 {
149 	s32 ret_val = IXGBE_SUCCESS;
150 	u16 list_offset, data_offset, data_value;
151 
152 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
153 
154 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
155 		ixgbe_init_mac_link_ops_82599(hw);
156 
157 		hw->phy.ops.reset = NULL;
158 
159 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
160 		                                              &data_offset);
161 		if (ret_val != IXGBE_SUCCESS)
162 			goto setup_sfp_out;
163 
164 		/* PHY config will finish before releasing the semaphore */
165 		ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
166 		if (ret_val != IXGBE_SUCCESS) {
167 			ret_val = IXGBE_ERR_SWFW_SYNC;
168 			goto setup_sfp_out;
169 		}
170 
171 		hw->eeprom.ops.read(hw, ++data_offset, &data_value);
172 		while (data_value != 0xffff) {
173 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
174 			IXGBE_WRITE_FLUSH(hw);
175 			hw->eeprom.ops.read(hw, ++data_offset, &data_value);
176 		}
177 		/* Now restart DSP by setting Restart_AN */
178 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
179 		    (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
180 
181 		/* Release the semaphore */
182 		ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
183 		/* Delay obtaining semaphore again to allow FW access */
184 		msec_delay(hw->eeprom.semaphore_delay);
185 	}
186 
187 setup_sfp_out:
188 	return ret_val;
189 }
190 
191 /**
192  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
193  *  @hw: pointer to hardware structure
194  *
195  *  Initialize the function pointers and assign the MAC type for 82599.
196  *  Does not touch the hardware.
197  **/
198 
199 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
200 {
201 	struct ixgbe_mac_info *mac = &hw->mac;
202 	struct ixgbe_phy_info *phy = &hw->phy;
203 	s32 ret_val;
204 
205 	DEBUGFUNC("ixgbe_init_ops_82599");
206 
207 	ret_val = ixgbe_init_phy_ops_generic(hw);
208 	ret_val = ixgbe_init_ops_generic(hw);
209 
210 	/* PHY */
211 	phy->ops.identify = &ixgbe_identify_phy_82599;
212 	phy->ops.init = &ixgbe_init_phy_ops_82599;
213 
214 	/* MAC */
215 	mac->ops.reset_hw = &ixgbe_reset_hw_82599;
216 	mac->ops.get_media_type = &ixgbe_get_media_type_82599;
217 	mac->ops.get_supported_physical_layer =
218 	                            &ixgbe_get_supported_physical_layer_82599;
219 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
220 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
221 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
222 	mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
223 	mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
224 	mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
225 	mac->ops.get_device_caps = &ixgbe_get_device_caps_82599;
226 	mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
227 
228 	/* RAR, Multicast, VLAN */
229 	mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
230 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
231 	mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
232 	mac->rar_highwater = 1;
233 	mac->ops.set_vfta = &ixgbe_set_vfta_generic;
234 	mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
235 	mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
236 	mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
237 
238 	/* Link */
239 	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
240 	mac->ops.check_link            = &ixgbe_check_mac_link_generic;
241 	ixgbe_init_mac_link_ops_82599(hw);
242 
243 	mac->mcft_size        = 128;
244 	mac->vft_size         = 128;
245 	mac->num_rar_entries  = 128;
246 	mac->max_tx_queues    = 128;
247 	mac->max_rx_queues    = 128;
248 	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
249 
250 
251 	return ret_val;
252 }
253 
254 /**
255  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
256  *  @hw: pointer to hardware structure
257  *  @speed: pointer to link speed
258  *  @negotiation: TRUE when autoneg or autotry is enabled
259  *
260  *  Determines the link capabilities by reading the AUTOC register.
261  **/
262 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
263                                       ixgbe_link_speed *speed,
264                                       bool *negotiation)
265 {
266 	s32 status = IXGBE_SUCCESS;
267 	u32 autoc = 0;
268 
269 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
270 
271 
272 
273 	/*
274 	 * Determine link capabilities based on the stored value of AUTOC,
275 	 * which represents EEPROM defaults.  If AUTOC value has not
276 	 * been stored, use the current register values.
277 	 */
278 	if (hw->mac.orig_link_settings_stored)
279 		autoc = hw->mac.orig_autoc;
280 	else
281 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
282 
283 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
284 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
285 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
286 		*negotiation = FALSE;
287 		break;
288 
289 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
290 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
291 		*negotiation = FALSE;
292 		break;
293 
294 	case IXGBE_AUTOC_LMS_1G_AN:
295 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
296 		*negotiation = TRUE;
297 		break;
298 
299 	case IXGBE_AUTOC_LMS_10G_SERIAL:
300 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
301 		*negotiation = FALSE;
302 		break;
303 
304 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
305 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
306 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
307 		if (autoc & IXGBE_AUTOC_KR_SUPP)
308 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
309 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
310 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
311 		if (autoc & IXGBE_AUTOC_KX_SUPP)
312 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
313 		*negotiation = TRUE;
314 		break;
315 
316 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
317 		*speed = IXGBE_LINK_SPEED_100_FULL;
318 		if (autoc & IXGBE_AUTOC_KR_SUPP)
319 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
320 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
321 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
322 		if (autoc & IXGBE_AUTOC_KX_SUPP)
323 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
324 		*negotiation = TRUE;
325 		break;
326 
327 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
328 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
329 		*negotiation = FALSE;
330 		break;
331 
332 	default:
333 		status = IXGBE_ERR_LINK_SETUP;
334 		goto out;
335 		break;
336 	}
337 
338 	if (hw->phy.multispeed_fiber) {
339 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
340 		          IXGBE_LINK_SPEED_1GB_FULL;
341 		*negotiation = TRUE;
342 	}
343 
344 out:
345 	return status;
346 }
347 
348 /**
349  *  ixgbe_get_media_type_82599 - Get media type
350  *  @hw: pointer to hardware structure
351  *
352  *  Returns the media type (fiber, copper, backplane)
353  **/
354 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
355 {
356 	enum ixgbe_media_type media_type;
357 
358 	DEBUGFUNC("ixgbe_get_media_type_82599");
359 
360 	/* Detect if there is a copper PHY attached. */
361 	if (hw->phy.type == ixgbe_phy_cu_unknown ||
362 	    hw->phy.type == ixgbe_phy_tn ||
363 	    hw->phy.type == ixgbe_phy_aq) {
364 		media_type = ixgbe_media_type_copper;
365 		goto out;
366 	}
367 
368 	switch (hw->device_id) {
369 	case IXGBE_DEV_ID_82599_KX4:
370 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
371 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
372 	case IXGBE_DEV_ID_82599_XAUI_LOM:
373 		/* Default device ID is mezzanine card KX/KX4 */
374 		media_type = ixgbe_media_type_backplane;
375 		break;
376 	case IXGBE_DEV_ID_82599_SFP:
377 		media_type = ixgbe_media_type_fiber;
378 		break;
379 	case IXGBE_DEV_ID_82599_CX4:
380 		media_type = ixgbe_media_type_cx4;
381 		break;
382 	default:
383 		media_type = ixgbe_media_type_unknown;
384 		break;
385 	}
386 out:
387 	return media_type;
388 }
389 
390 /**
391  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
392  *  @hw: pointer to hardware structure
393  *
394  *  Configures link settings based on values in the ixgbe_hw struct.
395  *  Restarts the link.  Performs autonegotiation if needed.
396  **/
397 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
398                                bool autoneg_wait_to_complete)
399 {
400 	u32 autoc_reg;
401 	u32 links_reg;
402 	u32 i;
403 	s32 status = IXGBE_SUCCESS;
404 
405 	DEBUGFUNC("ixgbe_start_mac_link_82599");
406 
407 
408 	/* Restart link */
409 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
410 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
411 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
412 
413 	/* Only poll for autoneg to complete if specified to do so */
414 	if (autoneg_wait_to_complete) {
415 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
416 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
417 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
418 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN
419 		    || (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
420 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
421 			links_reg = 0; /* Just in case Autoneg time = 0 */
422 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
423 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
424 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
425 					break;
426 				msec_delay(100);
427 			}
428 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
429 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
430 				DEBUGOUT("Autoneg did not complete.\n");
431 			}
432 		}
433 	}
434 
435 	/* Add delay to filter out noises during initial link setup */
436 	msec_delay(50);
437 
438 	return status;
439 }
440 
441 /**
442  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
443  *  @hw: pointer to hardware structure
444  *  @speed: new link speed
445  *  @autoneg: TRUE if autonegotiation enabled
446  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
447  *
448  *  Set the link speed in the AUTOC register and restarts link.
449  **/
450 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
451                                      ixgbe_link_speed speed, bool autoneg,
452                                      bool autoneg_wait_to_complete)
453 {
454 	s32 status = IXGBE_SUCCESS;
455 	ixgbe_link_speed link_speed;
456 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
457 	u32 speedcnt = 0;
458 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
459 	u32 i = 0;
460 	bool link_up = FALSE;
461 	bool negotiation;
462 
463 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
464 
465 	/* Mask off requested but non-supported speeds */
466 	status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
467 	if (status != IXGBE_SUCCESS)
468 		return status;
469 
470 	speed &= link_speed;
471 
472 	/*
473 	 * When the driver changes the link speeds that it can support,
474 	 * it sets autotry_restart to TRUE to indicate that we need to
475 	 * initiate a new autotry session with the link partner.  To do
476 	 * so, we set the speed then disable and re-enable the tx laser, to
477 	 * alert the link partner that it also needs to restart autotry on its
478 	 * end.  This is consistent with TRUE clause 37 autoneg, which also
479 	 * involves a loss of signal.
480 	 */
481 
482 	/*
483 	 * Try each speed one by one, highest priority first.  We do this in
484 	 * software because 10gb fiber doesn't support speed autonegotiation.
485 	 */
486 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
487 		speedcnt++;
488 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
489 
490 		/* If we already have link at this speed, just jump out */
491 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
492 		if (status != IXGBE_SUCCESS)
493 			return status;
494 
495 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
496 			goto out;
497 
498 		/* Set the module link speed */
499 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
500 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
501 
502 		/* Allow module to change analog characteristics (1G->10G) */
503 		msec_delay(40);
504 
505 		status = ixgbe_setup_mac_link_82599(
506 			hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg,
507 			autoneg_wait_to_complete);
508 		if (status != IXGBE_SUCCESS)
509 			return status;
510 
511 		/* Flap the tx laser if it has not already been done */
512 		if (hw->mac.autotry_restart) {
513 			/* Disable tx laser; allow 100us to go dark per spec */
514 			esdp_reg |= IXGBE_ESDP_SDP3;
515 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
516 			usec_delay(100);
517 
518 			/* Enable tx laser; allow 2ms to light up per spec */
519 			esdp_reg &= ~IXGBE_ESDP_SDP3;
520 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
521 			msec_delay(2);
522 
523 			hw->mac.autotry_restart = FALSE;
524 		}
525 
526 		/*
527 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
528 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
529 		 * attempted.  82599 uses the same timing for 10g SFI.
530 		 */
531 		for (i = 0; i < 5; i++) {
532 			/* Wait for the link partner to also set speed */
533 			msec_delay(100);
534 
535 			/* If we have link, just jump out */
536 			status = ixgbe_check_link(hw, &link_speed,
537 			                          &link_up, FALSE);
538 			if (status != IXGBE_SUCCESS)
539 				return status;
540 
541 			if (link_up)
542 				goto out;
543 		}
544 	}
545 
546 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
547 		speedcnt++;
548 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
549 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
550 
551 		/* If we already have link at this speed, just jump out */
552 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
553 		if (status != IXGBE_SUCCESS)
554 			return status;
555 
556 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
557 			goto out;
558 
559 		/* Set the module link speed */
560 		esdp_reg &= ~IXGBE_ESDP_SDP5;
561 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
562 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
563 
564 		/* Allow module to change analog characteristics (10G->1G) */
565 		msec_delay(40);
566 
567 		status = ixgbe_setup_mac_link_82599(
568 			hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
569 			autoneg_wait_to_complete);
570 		if (status != IXGBE_SUCCESS)
571 			return status;
572 
573 		/* Flap the tx laser if it has not already been done */
574 		if (hw->mac.autotry_restart) {
575 			/* Disable tx laser; allow 100us to go dark per spec */
576 			esdp_reg |= IXGBE_ESDP_SDP3;
577 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
578 			usec_delay(100);
579 
580 			/* Enable tx laser; allow 2ms to light up per spec */
581 			esdp_reg &= ~IXGBE_ESDP_SDP3;
582 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
583 			msec_delay(2);
584 
585 			hw->mac.autotry_restart = FALSE;
586 		}
587 
588 		/* Wait for the link partner to also set speed */
589 		msec_delay(100);
590 
591 		/* If we have link, just jump out */
592 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
593 		if (status != IXGBE_SUCCESS)
594 			return status;
595 
596 		if (link_up)
597 			goto out;
598 	}
599 
600 	/*
601 	 * We didn't get link.  Configure back to the highest speed we tried,
602 	 * (if there was more than one).  We call ourselves back with just the
603 	 * single highest speed that the user requested.
604 	 */
605 	if (speedcnt > 1)
606 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
607 		        highest_link_speed, autoneg, autoneg_wait_to_complete);
608 
609 out:
610 	/* Set autoneg_advertised value based on input link speed */
611 	hw->phy.autoneg_advertised = 0;
612 
613 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
614 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
615 
616 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
617 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
618 
619 	return status;
620 }
621 
622 /**
623  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
624  *  @hw: pointer to hardware structure
625  *  @speed: new link speed
626  *  @autoneg: TRUE if autonegotiation enabled
627  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
628  *
629  *  Implements the Intel SmartSpeed algorithm.
630  **/
631 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
632 				     ixgbe_link_speed speed, bool autoneg,
633 				     bool autoneg_wait_to_complete)
634 {
635 	s32 status = IXGBE_SUCCESS;
636 	ixgbe_link_speed link_speed;
637 	s32 i, j;
638 	bool link_up = FALSE;
639 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
640 
641 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
642 
643 	 /* Set autoneg_advertised value based on input link speed */
644 	hw->phy.autoneg_advertised = 0;
645 
646 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
647 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
648 
649 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
650 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
651 
652 	if (speed & IXGBE_LINK_SPEED_100_FULL)
653 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
654 
655 	/*
656 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
657 	 * autoneg advertisement if link is unable to be established at the
658 	 * highest negotiated rate.  This can sometimes happen due to integrity
659 	 * issues with the physical media connection.
660 	 */
661 
662 	/* First, try to get link with full advertisement */
663 	hw->phy.smart_speed_active = FALSE;
664 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
665 		status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
666 						    autoneg_wait_to_complete);
667 		if (status != IXGBE_SUCCESS)
668 			goto out;
669 
670 		/*
671 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
672 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
673 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
674 		 * Table 9 in the AN MAS.
675 		 */
676 		for (i = 0; i < 5; i++) {
677 			msec_delay(100);
678 
679 			/* If we have link, just jump out */
680 			status = ixgbe_check_link(hw, &link_speed, &link_up,
681 						  FALSE);
682 			if (status != IXGBE_SUCCESS)
683 				goto out;
684 
685 			if (link_up)
686 				goto out;
687 		}
688 	}
689 
690 	/*
691 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
692 	 * (or BX4/BX), then disable KR and try again.
693 	 */
694 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
695 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
696 		goto out;
697 
698 	/* Turn SmartSpeed on to disable KR support */
699 	hw->phy.smart_speed_active = TRUE;
700 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
701 					    autoneg_wait_to_complete);
702 	if (status != IXGBE_SUCCESS)
703 		goto out;
704 
705 	/*
706 	 * Wait for the controller to acquire link.  600ms will allow for
707 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
708 	 * parallel detect, both 10g and 1g. This allows for the maximum
709 	 * connect attempts as defined in the AN MAS table 73-7.
710 	 */
711 	for (i = 0; i < 6; i++) {
712 		msec_delay(100);
713 
714 		/* If we have link, just jump out */
715 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
716 		if (status != IXGBE_SUCCESS)
717 			goto out;
718 
719 		if (link_up)
720 			goto out;
721 	}
722 
723 	/* We didn't get link.  Turn SmartSpeed back off. */
724 	hw->phy.smart_speed_active = FALSE;
725 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
726 					    autoneg_wait_to_complete);
727 
728 out:
729 	return status;
730 }
731 
732 /**
733  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
734  *  @hw: pointer to hardware structure
735  *  @speed: new link speed
736  *  @autoneg: TRUE if autonegotiation enabled
737  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
738  *
739  *  Set the link speed in the AUTOC register and restarts link.
740  **/
741 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
742                                      ixgbe_link_speed speed, bool autoneg,
743                                      bool autoneg_wait_to_complete)
744 {
745 	s32 status = IXGBE_SUCCESS;
746 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
747 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
748 	u32 start_autoc = autoc;
749 	u32 orig_autoc = 0;
750 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
751 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
752 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
753 	u32 links_reg;
754 	u32 i;
755 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
756 
757 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
758 
759 	/* Check to see if speed passed in is supported. */
760 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
761 	if (status != IXGBE_SUCCESS)
762 		goto out;
763 
764 	speed &= link_capabilities;
765 
766 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
767 		status = IXGBE_ERR_LINK_SETUP;
768 		goto out;
769 	}
770 
771 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
772 	if (hw->mac.orig_link_settings_stored)
773 		orig_autoc = hw->mac.orig_autoc;
774 	else
775 		orig_autoc = autoc;
776 
777 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
778 	         link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
779 	         link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
780 		/* Set KX4/KX/KR support according to speed requested */
781 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
782 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
783 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
784 				autoc |= IXGBE_AUTOC_KX4_SUPP;
785 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
786 			    (hw->phy.smart_speed_active == FALSE))
787 				autoc |= IXGBE_AUTOC_KR_SUPP;
788 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
789 			autoc |= IXGBE_AUTOC_KX_SUPP;
790 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
791 	         (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
792 	          link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
793 		/* Switch from 1G SFI to 10G SFI if requested */
794 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
795 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
796 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
797 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
798 		}
799 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
800 	         (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
801 		/* Switch from 10G SFI to 1G SFI if requested */
802 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
803 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
804 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
805 			if (autoneg)
806 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
807 			else
808 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
809 		}
810 	}
811 
812 	if (autoc != start_autoc) {
813 
814 		/* Restart link */
815 		autoc |= IXGBE_AUTOC_AN_RESTART;
816 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
817 
818 		/* Only poll for autoneg to complete if specified to do so */
819 		if (autoneg_wait_to_complete) {
820 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
821 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
822 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
823 				links_reg = 0; /*Just in case Autoneg time=0*/
824 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
825 					links_reg =
826 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
827 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
828 						break;
829 					msec_delay(100);
830 				}
831 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
832 					status =
833 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
834 					DEBUGOUT("Autoneg did not complete.\n");
835 				}
836 			}
837 		}
838 
839 		/* Add delay to filter out noises during initial link setup */
840 		msec_delay(50);
841 	}
842 
843 out:
844 	return status;
845 }
846 
847 /**
848  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
849  *  @hw: pointer to hardware structure
850  *  @speed: new link speed
851  *  @autoneg: TRUE if autonegotiation enabled
852  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
853  *
854  *  Restarts link on PHY and MAC based on settings passed in.
855  **/
856 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
857                                                ixgbe_link_speed speed,
858                                                bool autoneg,
859                                                bool autoneg_wait_to_complete)
860 {
861 	s32 status;
862 
863 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
864 
865 	/* Setup the PHY according to input speed */
866 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
867 	                                      autoneg_wait_to_complete);
868 	/* Set up MAC */
869 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
870 
871 	return status;
872 }
873 /**
874  *  ixgbe_reset_hw_82599 - Perform hardware reset
875  *  @hw: pointer to hardware structure
876  *
877  *  Resets the hardware by resetting the transmit and receive units, masks
878  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
879  *  reset.
880  **/
881 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
882 {
883 	s32 status = IXGBE_SUCCESS;
884 	u32 ctrl;
885 	u32 i;
886 	u32 autoc;
887 	u32 autoc2;
888 
889 	DEBUGFUNC("ixgbe_reset_hw_82599");
890 
891 	/* Call adapter stop to disable tx/rx and clear interrupts */
892 	hw->mac.ops.stop_adapter(hw);
893 
894 	/* PHY ops must be identified and initialized prior to reset */
895 
896 	/* Identify PHY and related function pointers */
897 	status = hw->phy.ops.init(hw);
898 
899 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
900 		goto reset_hw_out;
901 
902 	/* Setup SFP module if there is one present. */
903 	if (hw->phy.sfp_setup_needed) {
904 		status = hw->mac.ops.setup_sfp(hw);
905 		hw->phy.sfp_setup_needed = FALSE;
906 	}
907 
908 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
909 		goto reset_hw_out;
910 
911 	/* Reset PHY */
912 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
913 		hw->phy.ops.reset(hw);
914 
915 	/*
916 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
917 	 * access and verify no pending requests before reset
918 	 */
919 	ixgbe_disable_pcie_master(hw);
920 
921 mac_reset_top:
922 	/*
923 	 * Issue global reset to the MAC.  This needs to be a SW reset.
924 	 * If link reset is used, it might reset the MAC when mng is using it
925 	 */
926 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
927 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
928 	IXGBE_WRITE_FLUSH(hw);
929 
930 	/* Poll for reset bit to self-clear indicating reset is complete */
931 	for (i = 0; i < 10; i++) {
932 		usec_delay(1);
933 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
934 		if (!(ctrl & IXGBE_CTRL_RST))
935 			break;
936 	}
937 	if (ctrl & IXGBE_CTRL_RST) {
938 		status = IXGBE_ERR_RESET_FAILED;
939 		DEBUGOUT("Reset polling failed to complete.\n");
940 	}
941 
942 	/*
943 	 * Double resets are required for recovery from certain error
944 	 * conditions.  Between resets, it is necessary to stall to allow time
945 	 * for any pending HW events to complete.  We use 1usec since that is
946 	 * what is needed for ixgbe_disable_pcie_master().  The second reset
947 	 * then clears out any effects of those events.
948 	 */
949 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
950 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
951 		usec_delay(1);
952 		goto mac_reset_top;
953 	}
954 
955 	msec_delay(50);
956 
957 	/*
958 	 * Store the original AUTOC/AUTOC2 values if they have not been
959 	 * stored off yet.  Otherwise restore the stored original
960 	 * values since the reset operation sets back to defaults.
961 	 */
962 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
963 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
964 	if (hw->mac.orig_link_settings_stored == FALSE) {
965 		hw->mac.orig_autoc = autoc;
966 		hw->mac.orig_autoc2 = autoc2;
967 		hw->mac.orig_link_settings_stored = TRUE;
968 	} else {
969 		if (autoc != hw->mac.orig_autoc)
970 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
971 					IXGBE_AUTOC_AN_RESTART));
972 
973 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
974 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
975 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
976 			autoc2 |= (hw->mac.orig_autoc2 &
977 			           IXGBE_AUTOC2_UPPER_MASK);
978 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
979 		}
980 	}
981 
982        /* Store the permanent mac address */
983 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
984 
985 	/*
986 	 * Store MAC address from RAR0, clear receive address registers, and
987 	 * clear the multicast table.  Also reset num_rar_entries to 128,
988 	 * since we modify this value when programming the SAN MAC address.
989 	 */
990 	hw->mac.num_rar_entries = 128;
991 	hw->mac.ops.init_rx_addrs(hw);
992 
993 	/* Store the permanent SAN mac address */
994 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
995 
996 	/* Add the SAN MAC address to the RAR only if it's a valid address */
997 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
998 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
999 		                    hw->mac.san_addr, 0, IXGBE_RAH_AV);
1000 
1001 		/* Reserve the last RAR for the SAN MAC address */
1002 		hw->mac.num_rar_entries--;
1003        }
1004 
1005 	/* Store the alternative WWNN/WWPN prefix */
1006 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1007 	                               &hw->mac.wwpn_prefix);
1008 
1009 reset_hw_out:
1010 	return status;
1011 }
1012 
1013 /**
1014  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1015  *  @hw: pointer to hardware structure
1016  **/
1017 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1018 {
1019 	int i;
1020 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1021 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1022 
1023 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1024 
1025 	/*
1026 	 * Before starting reinitialization process,
1027 	 * FDIRCMD.CMD must be zero.
1028 	 */
1029 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1030 		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1031 		      IXGBE_FDIRCMD_CMD_MASK))
1032 			break;
1033 		usec_delay(10);
1034 	}
1035 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1036 		DEBUGOUT("Flow Director previous command isn't complete, "
1037 		         "aborting table re-initialization. \n");
1038 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1039 	}
1040 
1041 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1042 	IXGBE_WRITE_FLUSH(hw);
1043 	/*
1044 	 * 82599 adapters flow director init flow cannot be restarted,
1045 	 * Workaround 82599 silicon errata by performing the following steps
1046 	 * before re-writing the FDIRCTRL control register with the same value.
1047 	 * - write 1 to bit 8 of FDIRCMD register &
1048 	 * - write 0 to bit 8 of FDIRCMD register
1049 	 */
1050 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1051 	                (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1052 	                 IXGBE_FDIRCMD_CLEARHT));
1053 	IXGBE_WRITE_FLUSH(hw);
1054 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1055 	                (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1056 	                 ~IXGBE_FDIRCMD_CLEARHT));
1057 	IXGBE_WRITE_FLUSH(hw);
1058 	/*
1059 	 * Clear FDIR Hash register to clear any leftover hashes
1060 	 * waiting to be programmed.
1061 	 */
1062 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1063 	IXGBE_WRITE_FLUSH(hw);
1064 
1065 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1066 	IXGBE_WRITE_FLUSH(hw);
1067 
1068 	/* Poll init-done after we write FDIRCTRL register */
1069 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1070 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1071 		                   IXGBE_FDIRCTRL_INIT_DONE)
1072 			break;
1073 		usec_delay(10);
1074 	}
1075 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1076 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1077 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1078 	}
1079 
1080 	/* Clear FDIR statistics registers (read to clear) */
1081 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1082 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1083 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1084 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1085 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1086 
1087 	return IXGBE_SUCCESS;
1088 }
1089 
1090 /**
1091  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1092  *  @hw: pointer to hardware structure
1093  *  @pballoc: which mode to allocate filters with
1094  **/
1095 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1096 {
1097 	u32 fdirctrl = 0;
1098 	u32 pbsize;
1099 	int i;
1100 
1101 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1102 
1103 	/*
1104 	 * Before enabling Flow Director, the Rx Packet Buffer size
1105 	 * must be reduced.  The new value is the current size minus
1106 	 * flow director memory usage size.
1107 	 */
1108 	pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1109 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1110 	    (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1111 
1112 	/*
1113 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1114 	 * intialized to zero for non DCB mode otherwise actual total RX PB
1115 	 * would be bigger than programmed and filter space would run into
1116 	 * the PB 0 region.
1117 	 */
1118 	for (i = 1; i < 8; i++)
1119 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1120 
1121 	/* Send interrupt when 64 filters are left */
1122 	fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1123 
1124 	/* Set the maximum length per hash bucket to 0xA filters */
1125 	fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
1126 
1127 	switch (pballoc) {
1128 	case IXGBE_FDIR_PBALLOC_64K:
1129 		/* 8k - 1 signature filters */
1130 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1131 		break;
1132 	case IXGBE_FDIR_PBALLOC_128K:
1133 		/* 16k - 1 signature filters */
1134 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1135 		break;
1136 	case IXGBE_FDIR_PBALLOC_256K:
1137 		/* 32k - 1 signature filters */
1138 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1139 		break;
1140 	default:
1141 		/* bad value */
1142 		return IXGBE_ERR_CONFIG;
1143 	};
1144 
1145 	/* Move the flexible bytes to use the ethertype - shift 6 words */
1146 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1147 
1148 
1149 	/* Prime the keys for hashing */
1150 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1151 	                IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1152 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1153 	                IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1154 
1155 	/*
1156 	 * Poll init-done after we write the register.  Estimated times:
1157 	 *      10G: PBALLOC = 11b, timing is 60us
1158 	 *       1G: PBALLOC = 11b, timing is 600us
1159 	 *     100M: PBALLOC = 11b, timing is 6ms
1160 	 *
1161 	 *     Multiple these timings by 4 if under full Rx load
1162 	 *
1163 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1164 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1165 	 * this might not finish in our poll time, but we can live with that
1166 	 * for now.
1167 	 */
1168 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1169 	IXGBE_WRITE_FLUSH(hw);
1170 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1171 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1172 		                   IXGBE_FDIRCTRL_INIT_DONE)
1173 			break;
1174 		msec_delay(1);
1175 	}
1176 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1177 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1178 
1179 	return IXGBE_SUCCESS;
1180 }
1181 
1182 /**
1183  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1184  *  @hw: pointer to hardware structure
1185  *  @pballoc: which mode to allocate filters with
1186  **/
1187 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1188 {
1189 	u32 fdirctrl = 0;
1190 	u32 pbsize;
1191 	int i;
1192 
1193 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1194 
1195 	/*
1196 	 * Before enabling Flow Director, the Rx Packet Buffer size
1197 	 * must be reduced.  The new value is the current size minus
1198 	 * flow director memory usage size.
1199 	 */
1200 
1201 	pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1202 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1203 	    (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1204 
1205 	/*
1206 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1207 	 * intialized to zero for non DCB mode otherwise actual total RX PB
1208 	 * would be bigger than programmed and filter space would run into
1209 	 * the PB 0 region.
1210 	 */
1211 	for (i = 1; i < 8; i++)
1212 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1213 
1214 	/* Send interrupt when 64 filters are left */
1215 	fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1216 
1217 	/* Initialize the drop queue to Rx queue 127 */
1218 	fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1219 
1220 	switch (pballoc) {
1221 	case IXGBE_FDIR_PBALLOC_64K:
1222 		/* 2k - 1 perfect filters */
1223 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1224 		break;
1225 	case IXGBE_FDIR_PBALLOC_128K:
1226 		/* 4k - 1 perfect filters */
1227 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1228 		break;
1229 	case IXGBE_FDIR_PBALLOC_256K:
1230 		/* 8k - 1 perfect filters */
1231 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1232 		break;
1233 	default:
1234 		/* bad value */
1235 		return IXGBE_ERR_CONFIG;
1236 	};
1237 
1238 	/* Turn perfect match filtering on */
1239 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
1240 	fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1241 
1242 	/* Move the flexible bytes to use the ethertype - shift 6 words */
1243 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1244 
1245 	/* Prime the keys for hashing */
1246 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1247 	                IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1248 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1249 	                IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1250 
1251 	/*
1252 	 * Poll init-done after we write the register.  Estimated times:
1253 	 *      10G: PBALLOC = 11b, timing is 60us
1254 	 *       1G: PBALLOC = 11b, timing is 600us
1255 	 *     100M: PBALLOC = 11b, timing is 6ms
1256 	 *
1257 	 *     Multiple these timings by 4 if under full Rx load
1258 	 *
1259 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1260 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1261 	 * this might not finish in our poll time, but we can live with that
1262 	 * for now.
1263 	 */
1264 
1265 	/* Set the maximum length per hash bucket to 0xA filters */
1266 	fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
1267 
1268 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1269 	IXGBE_WRITE_FLUSH(hw);
1270 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1271 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1272 		                   IXGBE_FDIRCTRL_INIT_DONE)
1273 			break;
1274 		msec_delay(1);
1275 	}
1276 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1277 		DEBUGOUT("Flow Director Perfect poll time exceeded!\n");
1278 
1279 	return IXGBE_SUCCESS;
1280 }
1281 
1282 
1283 /**
1284  *  ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
1285  *  @stream: input bitstream to compute the hash on
1286  *  @key: 32-bit hash key
1287  **/
1288 u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
1289 {
1290 	/*
1291 	 * The algorithm is as follows:
1292 	 *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
1293 	 *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
1294 	 *    and A[n] x B[n] is bitwise AND between same length strings
1295 	 *
1296 	 *    K[n] is 16 bits, defined as:
1297 	 *       for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
1298 	 *       for n modulo 32 < 15, K[n] =
1299 	 *             K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
1300 	 *
1301 	 *    S[n] is 16 bits, defined as:
1302 	 *       for n >= 15, S[n] = S[n:n - 15]
1303 	 *       for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
1304 	 *
1305 	 *    To simplify for programming, the algorithm is implemented
1306 	 *    in software this way:
1307 	 *
1308 	 *    Key[31:0], Stream[335:0]
1309 	 *
1310 	 *    tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
1311 	 *    int_key[350:0] = tmp_key[351:1]
1312 	 *    int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
1313 	 *
1314 	 *    hash[15:0] = 0;
1315 	 *    for (i = 0; i < 351; i++) {
1316 	 *        if (int_key[i])
1317 	 *            hash ^= int_stream[(i + 15):i];
1318 	 *    }
1319 	 */
1320 
1321 	union {
1322 		u64    fill[6];
1323 		u32    key[11];
1324 		u8     key_stream[44];
1325 	} tmp_key;
1326 
1327 	u8   *stream = (u8 *)atr_input;
1328 	u8   int_key[44];      /* upper-most bit unused */
1329 	u8   hash_str[46];     /* upper-most 2 bits unused */
1330 	u16  hash_result = 0;
1331 	int  i, j, k, h;
1332 
1333 	DEBUGFUNC("ixgbe_atr_compute_hash_82599");
1334 
1335 	/*
1336 	 * Initialize the fill member to prevent warnings
1337 	 * on some compilers
1338 	 */
1339 	 tmp_key.fill[0] = 0;
1340 
1341 	/* First load the temporary key stream */
1342 	for (i = 0; i < 6; i++) {
1343 		u64 fillkey = ((u64)key << 32) | key;
1344 		tmp_key.fill[i] = fillkey;
1345 	}
1346 
1347 	/*
1348 	 * Set the interim key for the hashing.  Bit 352 is unused, so we must
1349 	 * shift and compensate when building the key.
1350 	 */
1351 
1352 	int_key[0] = tmp_key.key_stream[0] >> 1;
1353 	for (i = 1, j = 0; i < 44; i++) {
1354 		unsigned int this_key = tmp_key.key_stream[j] << 7;
1355 		j++;
1356 		int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
1357 	}
1358 
1359 	/*
1360 	 * Set the interim bit string for the hashing.  Bits 368 and 367 are
1361 	 * unused, so shift and compensate when building the string.
1362 	 */
1363 	hash_str[0] = (stream[40] & 0x7f) >> 1;
1364 	for (i = 1, j = 40; i < 46; i++) {
1365 		unsigned int this_str = stream[j] << 7;
1366 		j++;
1367 		if (j > 41)
1368 			j = 0;
1369 		hash_str[i] = (u8)(this_str | (stream[j] >> 1));
1370 	}
1371 
1372 	/*
1373 	 * Now compute the hash.  i is the index into hash_str, j is into our
1374 	 * key stream, k is counting the number of bits, and h interates within
1375 	 * each byte.
1376 	 */
1377 	for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
1378 		for (h = 0; h < 8 && k < 351; h++, k++) {
1379 			if (int_key[j] & (1 << h)) {
1380 				/*
1381 				 * Key bit is set, XOR in the current 16-bit
1382 				 * string.  Example of processing:
1383 				 *    h = 0,
1384 				 *      tmp = (hash_str[i - 2] & 0 << 16) |
1385 				 *            (hash_str[i - 1] & 0xff << 8) |
1386 				 *            (hash_str[i] & 0xff >> 0)
1387 				 *      So tmp = hash_str[15 + k:k], since the
1388 				 *      i + 2 clause rolls off the 16-bit value
1389 				 *    h = 7,
1390 				 *      tmp = (hash_str[i - 2] & 0x7f << 9) |
1391 				 *            (hash_str[i - 1] & 0xff << 1) |
1392 				 *            (hash_str[i] & 0x80 >> 7)
1393 				 */
1394 				int tmp = (hash_str[i] >> h);
1395 				tmp |= (hash_str[i - 1] << (8 - h));
1396 				tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
1397 				             << (16 - h);
1398 				hash_result ^= (u16)tmp;
1399 			}
1400 		}
1401 	}
1402 
1403 	return hash_result;
1404 }
1405 
1406 /**
1407  *  ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1408  *  @input: input stream to modify
1409  *  @vlan: the VLAN id to load
1410  **/
1411 s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
1412 {
1413 	DEBUGFUNC("ixgbe_atr_set_vlan_id_82599");
1414 
1415 	input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
1416 	input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
1417 
1418 	return IXGBE_SUCCESS;
1419 }
1420 
1421 /**
1422  *  ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1423  *  @input: input stream to modify
1424  *  @src_addr: the IP address to load
1425  **/
1426 s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
1427 {
1428 	DEBUGFUNC("ixgbe_atr_set_src_ipv4_82599");
1429 
1430 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
1431 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
1432 	                                               (src_addr >> 16) & 0xff;
1433 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
1434 	                                                (src_addr >> 8) & 0xff;
1435 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
1436 
1437 	return IXGBE_SUCCESS;
1438 }
1439 
1440 /**
1441  *  ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1442  *  @input: input stream to modify
1443  *  @dst_addr: the IP address to load
1444  **/
1445 s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1446 {
1447 	DEBUGFUNC("ixgbe_atr_set_dst_ipv4_82599");
1448 
1449 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
1450 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
1451 	                                               (dst_addr >> 16) & 0xff;
1452 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
1453 	                                                (dst_addr >> 8) & 0xff;
1454 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
1455 
1456 	return IXGBE_SUCCESS;
1457 }
1458 
1459 /**
1460  *  ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
1461  *  @input: input stream to modify
1462  *  @src_addr_1: the first 4 bytes of the IP address to load
1463  *  @src_addr_2: the second 4 bytes of the IP address to load
1464  *  @src_addr_3: the third 4 bytes of the IP address to load
1465  *  @src_addr_4: the fourth 4 bytes of the IP address to load
1466  **/
1467 s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
1468                                  u32 src_addr_1, u32 src_addr_2,
1469                                  u32 src_addr_3, u32 src_addr_4)
1470 {
1471 	DEBUGFUNC("ixgbe_atr_set_src_ipv6_82599");
1472 
1473 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
1474 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
1475 	                                               (src_addr_4 >> 8) & 0xff;
1476 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
1477 	                                              (src_addr_4 >> 16) & 0xff;
1478 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
1479 
1480 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
1481 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
1482 	                                               (src_addr_3 >> 8) & 0xff;
1483 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
1484 	                                              (src_addr_3 >> 16) & 0xff;
1485 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
1486 
1487 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
1488 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
1489 	                                               (src_addr_2 >> 8) & 0xff;
1490 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
1491 	                                              (src_addr_2 >> 16) & 0xff;
1492 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
1493 
1494 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
1495 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
1496 	                                               (src_addr_1 >> 8) & 0xff;
1497 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
1498 	                                              (src_addr_1 >> 16) & 0xff;
1499 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
1500 
1501 	return IXGBE_SUCCESS;
1502 }
1503 
1504 /**
1505  *  ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
1506  *  @input: input stream to modify
1507  *  @dst_addr_1: the first 4 bytes of the IP address to load
1508  *  @dst_addr_2: the second 4 bytes of the IP address to load
1509  *  @dst_addr_3: the third 4 bytes of the IP address to load
1510  *  @dst_addr_4: the fourth 4 bytes of the IP address to load
1511  **/
1512 s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
1513                                  u32 dst_addr_1, u32 dst_addr_2,
1514                                  u32 dst_addr_3, u32 dst_addr_4)
1515 {
1516 	DEBUGFUNC("ixgbe_atr_set_dst_ipv6_82599");
1517 
1518 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
1519 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
1520 	                                               (dst_addr_4 >> 8) & 0xff;
1521 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
1522 	                                              (dst_addr_4 >> 16) & 0xff;
1523 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
1524 
1525 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
1526 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
1527 	                                               (dst_addr_3 >> 8) & 0xff;
1528 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
1529 	                                              (dst_addr_3 >> 16) & 0xff;
1530 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
1531 
1532 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
1533 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
1534 	                                               (dst_addr_2 >> 8) & 0xff;
1535 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
1536 	                                              (dst_addr_2 >> 16) & 0xff;
1537 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
1538 
1539 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
1540 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
1541 	                                               (dst_addr_1 >> 8) & 0xff;
1542 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
1543 	                                              (dst_addr_1 >> 16) & 0xff;
1544 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
1545 
1546 	return IXGBE_SUCCESS;
1547 }
1548 
1549 /**
1550  *  ixgbe_atr_set_src_port_82599 - Sets the source port
1551  *  @input: input stream to modify
1552  *  @src_port: the source port to load
1553  **/
1554 s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
1555 {
1556 	DEBUGFUNC("ixgbe_atr_set_src_port_82599");
1557 
1558 	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
1559 	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
1560 
1561 	return IXGBE_SUCCESS;
1562 }
1563 
1564 /**
1565  *  ixgbe_atr_set_dst_port_82599 - Sets the destination port
1566  *  @input: input stream to modify
1567  *  @dst_port: the destination port to load
1568  **/
1569 s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
1570 {
1571 	DEBUGFUNC("ixgbe_atr_set_dst_port_82599");
1572 
1573 	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
1574 	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
1575 
1576 	return IXGBE_SUCCESS;
1577 }
1578 
1579 /**
1580  *  ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1581  *  @input: input stream to modify
1582  *  @flex_bytes: the flexible bytes to load
1583  **/
1584 s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1585 {
1586 	DEBUGFUNC("ixgbe_atr_set_flex_byte_82599");
1587 
1588 	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
1589 	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
1590 
1591 	return IXGBE_SUCCESS;
1592 }
1593 
1594 /**
1595  *  ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
1596  *  @input: input stream to modify
1597  *  @vm_pool: the Virtual Machine pool to load
1598  **/
1599 s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
1600 {
1601 	DEBUGFUNC("ixgbe_atr_set_vm_pool_82599");
1602 
1603 	input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
1604 
1605 	return IXGBE_SUCCESS;
1606 }
1607 
1608 /**
1609  *  ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1610  *  @input: input stream to modify
1611  *  @l4type: the layer 4 type value to load
1612  **/
1613 s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1614 {
1615 	DEBUGFUNC("ixgbe_atr_set_l4type_82599");
1616 
1617 	input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
1618 
1619 	return IXGBE_SUCCESS;
1620 }
1621 
1622 /**
1623  *  ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1624  *  @input: input stream to search
1625  *  @vlan: the VLAN id to load
1626  **/
1627 s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1628 {
1629 	DEBUGFUNC("ixgbe_atr_get_vlan_id_82599");
1630 
1631 	*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1632 	*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
1633 
1634 	return IXGBE_SUCCESS;
1635 }
1636 
1637 /**
1638  *  ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
1639  *  @input: input stream to search
1640  *  @src_addr: the IP address to load
1641  **/
1642 s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
1643 {
1644 	DEBUGFUNC("ixgbe_atr_get_src_ipv4_82599");
1645 
1646 	*src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
1647 	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
1648 	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
1649 	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
1650 
1651 	return IXGBE_SUCCESS;
1652 }
1653 
1654 /**
1655  *  ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
1656  *  @input: input stream to search
1657  *  @dst_addr: the IP address to load
1658  **/
1659 s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
1660 {
1661 	DEBUGFUNC("ixgbe_atr_get_dst_ipv4_82599");
1662 
1663 	*dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
1664 	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
1665 	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
1666 	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
1667 
1668 	return IXGBE_SUCCESS;
1669 }
1670 
1671 /**
1672  *  ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
1673  *  @input: input stream to search
1674  *  @src_addr_1: the first 4 bytes of the IP address to load
1675  *  @src_addr_2: the second 4 bytes of the IP address to load
1676  *  @src_addr_3: the third 4 bytes of the IP address to load
1677  *  @src_addr_4: the fourth 4 bytes of the IP address to load
1678  **/
1679 s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
1680                                  u32 *src_addr_1, u32 *src_addr_2,
1681                                  u32 *src_addr_3, u32 *src_addr_4)
1682 {
1683 	DEBUGFUNC("ixgbe_atr_get_src_ipv6_82599");
1684 
1685 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
1686 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
1687 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
1688 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
1689 
1690 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
1691 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
1692 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
1693 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
1694 
1695 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
1696 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
1697 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
1698 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
1699 
1700 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
1701 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
1702 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
1703 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
1704 
1705 	return IXGBE_SUCCESS;
1706 }
1707 
1708 /**
1709  *  ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
1710  *  @input: input stream to search
1711  *  @dst_addr_1: the first 4 bytes of the IP address to load
1712  *  @dst_addr_2: the second 4 bytes of the IP address to load
1713  *  @dst_addr_3: the third 4 bytes of the IP address to load
1714  *  @dst_addr_4: the fourth 4 bytes of the IP address to load
1715  **/
1716 s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
1717                                  u32 *dst_addr_1, u32 *dst_addr_2,
1718                                  u32 *dst_addr_3, u32 *dst_addr_4)
1719 {
1720 	DEBUGFUNC("ixgbe_atr_get_dst_ipv6_82599");
1721 
1722 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
1723 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
1724 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
1725 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
1726 
1727 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
1728 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
1729 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
1730 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
1731 
1732 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
1733 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
1734 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
1735 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
1736 
1737 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
1738 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
1739 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
1740 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
1741 
1742 	return IXGBE_SUCCESS;
1743 }
1744 
1745 /**
1746  *  ixgbe_atr_get_src_port_82599 - Gets the source port
1747  *  @input: input stream to modify
1748  *  @src_port: the source port to load
1749  *
1750  *  Even though the input is given in big-endian, the FDIRPORT registers
1751  *  expect the ports to be programmed in little-endian.  Hence the need to swap
1752  *  endianness when retrieving the data.  This can be confusing since the
1753  *  internal hash engine expects it to be big-endian.
1754  **/
1755 s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
1756 {
1757 	DEBUGFUNC("ixgbe_atr_get_src_port_82599");
1758 
1759 	*src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
1760 	*src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
1761 
1762 	return IXGBE_SUCCESS;
1763 }
1764 
1765 /**
1766  *  ixgbe_atr_get_dst_port_82599 - Gets the destination port
1767  *  @input: input stream to modify
1768  *  @dst_port: the destination port to load
1769  *
1770  *  Even though the input is given in big-endian, the FDIRPORT registers
1771  *  expect the ports to be programmed in little-endian.  Hence the need to swap
1772  *  endianness when retrieving the data.  This can be confusing since the
1773  *  internal hash engine expects it to be big-endian.
1774  **/
1775 s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
1776 {
1777 	DEBUGFUNC("ixgbe_atr_get_dst_port_82599");
1778 
1779 	*dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
1780 	*dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
1781 
1782 	return IXGBE_SUCCESS;
1783 }
1784 
1785 /**
1786  *  ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
1787  *  @input: input stream to modify
1788  *  @flex_bytes: the flexible bytes to load
1789  **/
1790 s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte)
1791 {
1792 	DEBUGFUNC("ixgbe_atr_get_flex_byte_82599");
1793 
1794 	*flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
1795 	*flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
1796 
1797 	return IXGBE_SUCCESS;
1798 }
1799 
1800 /**
1801  *  ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
1802  *  @input: input stream to modify
1803  *  @vm_pool: the Virtual Machine pool to load
1804  **/
1805 s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
1806 {
1807 	DEBUGFUNC("ixgbe_atr_get_vm_pool_82599");
1808 
1809 	*vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
1810 
1811 	return IXGBE_SUCCESS;
1812 }
1813 
1814 /**
1815  *  ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
1816  *  @input: input stream to modify
1817  *  @l4type: the layer 4 type value to load
1818  **/
1819 s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
1820 {
1821 	DEBUGFUNC("ixgbe_atr_get_l4type__82599");
1822 
1823 	*l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
1824 
1825 	return IXGBE_SUCCESS;
1826 }
1827 
1828 /**
1829  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1830  *  @hw: pointer to hardware structure
1831  *  @stream: input bitstream
1832  *  @queue: queue index to direct traffic to
1833  **/
1834 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1835                                           struct ixgbe_atr_input *input,
1836                                           u8 queue)
1837 {
1838 	u64  fdirhashcmd;
1839 	u64  fdircmd;
1840 	u32  fdirhash;
1841 	u16  bucket_hash, sig_hash;
1842 	u8   l4type;
1843 
1844 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1845 
1846 	bucket_hash = ixgbe_atr_compute_hash_82599(input,
1847 	                                           IXGBE_ATR_BUCKET_HASH_KEY);
1848 
1849 	/* bucket_hash is only 15 bits */
1850 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1851 
1852 	sig_hash = ixgbe_atr_compute_hash_82599(input,
1853 	                                        IXGBE_ATR_SIGNATURE_HASH_KEY);
1854 
1855 	/* Get the l4type in order to program FDIRCMD properly */
1856 	/* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
1857 	ixgbe_atr_get_l4type_82599(input, &l4type);
1858 
1859 	/*
1860 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1861 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1862 	 */
1863 	fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1864 
1865 	fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1866 	           IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
1867 
1868 	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1869 	case IXGBE_ATR_L4TYPE_TCP:
1870 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1871 		break;
1872 	case IXGBE_ATR_L4TYPE_UDP:
1873 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1874 		break;
1875 	case IXGBE_ATR_L4TYPE_SCTP:
1876 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1877 		break;
1878 	default:
1879 		DEBUGOUT(" Error on l4type input\n");
1880 		return IXGBE_ERR_CONFIG;
1881 	}
1882 
1883 	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
1884 		fdircmd |= IXGBE_FDIRCMD_IPV6;
1885 
1886 	fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
1887 	fdirhashcmd = ((fdircmd << 32) | fdirhash);
1888 
1889 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF);
1890 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1891 
1892 	return IXGBE_SUCCESS;
1893 }
1894 
1895 /**
1896  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1897  *  @hw: pointer to hardware structure
1898  *  @input: input bitstream
1899  *  @input_masks: masks for the input bitstream
1900  *  @soft_id: software index for the filters
1901  *  @queue: queue index to direct traffic to
1902  *
1903  *  Note that the caller to this function must lock before calling, since the
1904  *  hardware writes must be protected from one another.
1905  **/
1906 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1907                                       struct ixgbe_atr_input *input,
1908                                       struct ixgbe_atr_input_masks *input_masks,
1909                                       u16 soft_id, u8 queue)
1910 {
1911 	u32 fdircmd = 0;
1912 	u32 fdirhash;
1913 	u32 src_ipv4 = 0, dst_ipv4 = 0;
1914 	u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
1915 	u16 src_port, dst_port, vlan_id, flex_bytes;
1916 	u16 bucket_hash;
1917 	u8  l4type;
1918 	u8  fdirm = 0;
1919 
1920 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1921 
1922 	/* Get our input values */
1923 	ixgbe_atr_get_l4type_82599(input, &l4type);
1924 
1925 	/*
1926 	 * Check l4type formatting, and bail out before we touch the hardware
1927 	 * if there's a configuration issue
1928 	 */
1929 	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1930 	case IXGBE_ATR_L4TYPE_TCP:
1931 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1932 		break;
1933 	case IXGBE_ATR_L4TYPE_UDP:
1934 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1935 		break;
1936 	case IXGBE_ATR_L4TYPE_SCTP:
1937 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1938 		break;
1939 	default:
1940 		DEBUGOUT(" Error on l4type input\n");
1941 		return IXGBE_ERR_CONFIG;
1942 	}
1943 
1944 	bucket_hash = ixgbe_atr_compute_hash_82599(input,
1945 	                                           IXGBE_ATR_BUCKET_HASH_KEY);
1946 
1947 	/* bucket_hash is only 15 bits */
1948 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1949 
1950 	ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
1951 	ixgbe_atr_get_src_port_82599(input, &src_port);
1952 	ixgbe_atr_get_dst_port_82599(input, &dst_port);
1953 	ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
1954 
1955 	fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1956 
1957 	/* Now figure out if we're IPv4 or IPv6 */
1958 	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
1959 		/* IPv6 */
1960 		ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
1961 	                                     &src_ipv6_3, &src_ipv6_4);
1962 
1963 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
1964 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
1965 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
1966 		/* The last 4 bytes is the same register as IPv4 */
1967 		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
1968 
1969 		fdircmd |= IXGBE_FDIRCMD_IPV6;
1970 		fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
1971 	} else {
1972 		/* IPv4 */
1973 		ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
1974 		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
1975 	}
1976 
1977 	ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
1978 	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
1979 
1980 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
1981 	                            (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
1982 	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
1983 	              (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
1984 
1985 	/*
1986 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1987 	 * are zero, then assume a full mask for that field.  Also assume that
1988 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1989 	 * cannot be masked out in this implementation.
1990 	 *
1991 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1992 	 * point in time.
1993 	 */
1994 	if (src_ipv4 == 0)
1995 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
1996 	else
1997 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
1998 
1999 	if (dst_ipv4 == 0)
2000 		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
2001 	else
2002 		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
2003 
2004 	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
2005 	case IXGBE_ATR_L4TYPE_TCP:
2006 		if (src_port == 0)
2007 			IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
2008 		else
2009 			IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
2010 			                input_masks->src_port_mask);
2011 
2012 		if (dst_port == 0)
2013 			IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
2014 			               (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
2015 			                (0xffff << 16)));
2016 		else
2017 			IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
2018 			               (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
2019 			                (input_masks->dst_port_mask << 16)));
2020 		break;
2021 	case IXGBE_ATR_L4TYPE_UDP:
2022 		if (src_port == 0)
2023 			IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
2024 		else
2025 			IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
2026 			                input_masks->src_port_mask);
2027 
2028 		if (dst_port == 0)
2029 			IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
2030 			               (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
2031 			                (0xffff << 16)));
2032 		else
2033 			IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
2034 			               (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
2035 			                (input_masks->src_port_mask << 16)));
2036 		break;
2037 	default:
2038 		/* this already would have failed above */
2039 		break;
2040 	}
2041 
2042 	/* Program the last mask register, FDIRM */
2043 	if (input_masks->vlan_id_mask || !vlan_id)
2044 		/* Mask both VLAN and VLANP - bits 0 and 1 */
2045 		fdirm |= (IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP);
2046 
2047 	if (input_masks->data_mask || !flex_bytes)
2048 		/* Flex bytes need masking, so mask the whole thing - bit 4 */
2049 		fdirm |= IXGBE_FDIRM_FLEX;
2050 
2051 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
2052 	fdirm |= (IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6);
2053 
2054 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
2055 
2056 	fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
2057 	fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
2058 	fdircmd |= IXGBE_FDIRCMD_LAST;
2059 	fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
2060 	fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
2061 
2062 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
2063 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
2064 
2065 	return IXGBE_SUCCESS;
2066 }
2067 
2068 /**
2069  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2070  *  @hw: pointer to hardware structure
2071  *  @reg: analog register to read
2072  *  @val: read value
2073  *
2074  *  Performs read operation to Omer analog register specified.
2075  **/
2076 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2077 {
2078 	u32  core_ctl;
2079 
2080 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2081 
2082 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2083 	                (reg << 8));
2084 	IXGBE_WRITE_FLUSH(hw);
2085 	usec_delay(10);
2086 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2087 	*val = (u8)core_ctl;
2088 
2089 	return IXGBE_SUCCESS;
2090 }
2091 
2092 /**
2093  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2094  *  @hw: pointer to hardware structure
2095  *  @reg: atlas register to write
2096  *  @val: value to write
2097  *
2098  *  Performs write operation to Omer analog register specified.
2099  **/
2100 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2101 {
2102 	u32  core_ctl;
2103 
2104 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2105 
2106 	core_ctl = (reg << 8) | val;
2107 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2108 	IXGBE_WRITE_FLUSH(hw);
2109 	usec_delay(10);
2110 
2111 	return IXGBE_SUCCESS;
2112 }
2113 
2114 /**
2115  *  ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
2116  *  @hw: pointer to hardware structure
2117  *
2118  *  Starts the hardware using the generic start_hw function.
2119  *  Then performs revision-specific operations:
2120  *  Clears the rate limiter registers.
2121  **/
2122 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
2123 {
2124 	u32 i;
2125 	u32 regval;
2126 	s32 ret_val = IXGBE_SUCCESS;
2127 
2128 	DEBUGFUNC("ixgbe_start_hw_rev_1__82599");
2129 
2130 	ret_val = ixgbe_start_hw_generic(hw);
2131 
2132 	/* Clear the rate limiters */
2133 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
2134 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
2135 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
2136 	}
2137 	IXGBE_WRITE_FLUSH(hw);
2138 
2139 	/* Disable relaxed ordering */
2140 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
2141 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2142 		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2143 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
2144 	}
2145 
2146 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
2147 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
2148 		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
2149 		            IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
2150 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
2151 	}
2152 
2153 	/* We need to run link autotry after the driver loads */
2154 	hw->mac.autotry_restart = TRUE;
2155 
2156 	if (ret_val == IXGBE_SUCCESS)
2157 		ret_val = ixgbe_verify_fw_version_82599(hw);
2158 	return ret_val;
2159 }
2160 
2161 /**
2162  *  ixgbe_identify_phy_82599 - Get physical layer module
2163  *  @hw: pointer to hardware structure
2164  *
2165  *  Determines the physical layer module found on the current adapter.
2166  *  If PHY already detected, maintains current PHY type in hw struct,
2167  *  otherwise executes the PHY detection routine.
2168  **/
2169 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2170 {
2171 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2172 
2173 	DEBUGFUNC("ixgbe_identify_phy_82599");
2174 
2175 	/* Detect PHY if not unknown - returns success if already detected. */
2176 	status = ixgbe_identify_phy_generic(hw);
2177 	if (status != IXGBE_SUCCESS)
2178 		status = ixgbe_identify_sfp_module_generic(hw);
2179 	/* Set PHY type none if no PHY detected */
2180 	if (hw->phy.type == ixgbe_phy_unknown) {
2181 		hw->phy.type = ixgbe_phy_none;
2182 		status = IXGBE_SUCCESS;
2183 	}
2184 
2185 	/* Return error if SFP module has been detected but is not supported */
2186 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2187 		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2188 
2189 	return status;
2190 }
2191 
2192 /**
2193  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2194  *  @hw: pointer to hardware structure
2195  *
2196  *  Determines physical layer capabilities of the current configuration.
2197  **/
2198 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2199 {
2200 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2201 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2202 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2203 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2204 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2205 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2206 	u16 ext_ability = 0;
2207 	u8 comp_codes_10g = 0;
2208 
2209 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2210 
2211 	hw->phy.ops.identify(hw);
2212 
2213 	if (hw->phy.type == ixgbe_phy_tn ||
2214 	    hw->phy.type == ixgbe_phy_aq ||
2215 	    hw->phy.type == ixgbe_phy_cu_unknown) {
2216 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2217 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2218 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2219 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2220 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2221 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2222 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2223 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2224 		goto out;
2225 	}
2226 
2227 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2228 	case IXGBE_AUTOC_LMS_1G_AN:
2229 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2230 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2231 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2232 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2233 			goto out;
2234 		} else
2235 			/* SFI mode so read SFP module */
2236 			goto sfp_check;
2237 		break;
2238 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2239 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2240 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2241 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2242 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2243 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2244 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2245 		goto out;
2246 		break;
2247 	case IXGBE_AUTOC_LMS_10G_SERIAL:
2248 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2249 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2250 			goto out;
2251 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2252 			goto sfp_check;
2253 		break;
2254 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
2255 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2256 		if (autoc & IXGBE_AUTOC_KX_SUPP)
2257 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2258 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
2259 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2260 		if (autoc & IXGBE_AUTOC_KR_SUPP)
2261 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2262 		goto out;
2263 		break;
2264 	default:
2265 		goto out;
2266 		break;
2267 	}
2268 
2269 sfp_check:
2270 	/* SFP check must be done last since DA modules are sometimes used to
2271 	 * test KR mode -  we need to id KR mode correctly before SFP module.
2272 	 * Call identify_sfp because the pluggable module may have changed */
2273 	hw->phy.ops.identify_sfp(hw);
2274 	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2275 		goto out;
2276 
2277 	switch (hw->phy.type) {
2278 	case ixgbe_phy_sfp_passive_tyco:
2279 	case ixgbe_phy_sfp_passive_unknown:
2280 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2281 		break;
2282 	case ixgbe_phy_sfp_ftl_active:
2283 	case ixgbe_phy_sfp_active_unknown:
2284 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2285 		break;
2286 	case ixgbe_phy_sfp_avago:
2287 	case ixgbe_phy_sfp_ftl:
2288 	case ixgbe_phy_sfp_intel:
2289 	case ixgbe_phy_sfp_unknown:
2290 		hw->phy.ops.read_i2c_eeprom(hw,
2291 		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2292 		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2293 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2294 		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2295 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2296 		break;
2297 	default:
2298 		break;
2299 	}
2300 
2301 out:
2302 	return physical_layer;
2303 }
2304 
2305 /**
2306  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2307  *  @hw: pointer to hardware structure
2308  *  @regval: register value to write to RXCTRL
2309  *
2310  *  Enables the Rx DMA unit for 82599
2311  **/
2312 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2313 {
2314 #define IXGBE_MAX_SECRX_POLL 30
2315 	int i;
2316 	int secrxreg;
2317 
2318 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2319 
2320 	/*
2321 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2322 	 * If traffic is incoming before we enable the Rx unit, it could hang
2323 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2324 	 * completely disabled prior to enabling the Rx unit.
2325 	 */
2326 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2327 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2328 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2329 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2330 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2331 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2332 			break;
2333 		else
2334 			/* Use interrupt-safe sleep just in case */
2335 			usec_delay(10);
2336 	}
2337 
2338 	/* For informational purposes only */
2339 	if (i >= IXGBE_MAX_SECRX_POLL)
2340 		DEBUGOUT("Rx unit being enabled before security "
2341 		         "path fully disabled.  Continuing with init.\n");
2342 
2343 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2344 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2345 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2346 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2347 	IXGBE_WRITE_FLUSH(hw);
2348 
2349 	return IXGBE_SUCCESS;
2350 }
2351 
2352 /**
2353  *  ixgbe_get_device_caps_82599 - Get additional device capabilities
2354  *  @hw: pointer to hardware structure
2355  *  @device_caps: the EEPROM word with the extra device capabilities
2356  *
2357  *  This function will read the EEPROM location for the device capabilities,
2358  *  and return the word through device_caps.
2359  **/
2360 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
2361 {
2362 	DEBUGFUNC("ixgbe_get_device_caps_82599");
2363 
2364 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
2365 
2366 	return IXGBE_SUCCESS;
2367 }
2368 
2369 /**
2370  *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
2371  *  @hw: pointer to hardware structure
2372  *
2373  *  Verifies that installed the firmware version is 0.6 or higher
2374  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2375  *
2376  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2377  *  if the FW version is not supported.
2378  **/
2379 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2380 {
2381 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2382 	u16 fw_offset, fw_ptp_cfg_offset;
2383 	u16 fw_version = 0;
2384 
2385 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
2386 
2387 	/* firmware check is only necessary for SFI devices */
2388 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
2389 		status = IXGBE_SUCCESS;
2390 		goto fw_version_out;
2391 	}
2392 
2393 	/* get the offset to the Firmware Module block */
2394 	hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2395 
2396 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2397 		goto fw_version_out;
2398 
2399 	/* get the offset to the Pass Through Patch Configuration block */
2400 	hw->eeprom.ops.read(hw, (fw_offset +
2401 	                         IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2402 	                         &fw_ptp_cfg_offset);
2403 
2404 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2405 		goto fw_version_out;
2406 
2407 	/* get the firmware version */
2408 	hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2409 	                         IXGBE_FW_PATCH_VERSION_4),
2410 	                         &fw_version);
2411 
2412 	if (fw_version > 0x5)
2413 		status = IXGBE_SUCCESS;
2414 
2415 fw_version_out:
2416 	return status;
2417 }
2418 /**
2419  *  ixgbe_enable_relaxed_ordering_82599 - Enable relaxed ordering
2420  *  @hw: pointer to hardware structure
2421  *
2422  **/
2423 void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw)
2424 {
2425 	u32 regval;
2426 	u32 i;
2427 
2428 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82599");
2429 
2430 	/* Enable relaxed ordering */
2431 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
2432 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2433 		regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2434 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
2435 	}
2436 
2437 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
2438 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
2439 		regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
2440 		           IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
2441 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
2442 	}
2443 
2444 }
2445