xref: /titanic_41/usr/src/uts/common/io/ixgbe/ixgbe_82599.c (revision d24234c24aeaca4ca56ee3ac2794507968f274c4)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at:
10  *      http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When using or redistributing this file, you may do so under the
15  * License only. No other modification of this header is permitted.
16  *
17  * If applicable, add the following below this CDDL HEADER, with the
18  * fields enclosed by brackets "[]" replaced with your own identifying
19  * information: Portions Copyright [yyyy] [name of copyright owner]
20  *
21  * CDDL HEADER END
22  */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 /* IntelVersion: 1.197 scm_100309_002210 */
30 
31 #include "ixgbe_type.h"
32 #include "ixgbe_api.h"
33 #include "ixgbe_common.h"
34 #include "ixgbe_phy.h"
35 
36 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
37 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
38     ixgbe_link_speed *speed, bool *autoneg);
39 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
40 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
41     ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete);
42 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
43     ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete);
44 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
45     bool autoneg_wait_to_complete);
46 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
47     ixgbe_link_speed speed, bool autoneg,
48     bool autoneg_wait_to_complete);
49 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
50     ixgbe_link_speed speed, bool autoneg,
51     bool autoneg_wait_to_complete);
52 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
53 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
54 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
55 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
56 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
57 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
58 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
59 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
60 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
61 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
62 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps);
63 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
64 
65 void
66 ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
67 {
68 	struct ixgbe_mac_info *mac = &hw->mac;
69 
70 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
71 
72 	if (hw->phy.multispeed_fiber) {
73 		/* Set up dual speed SFP+ support */
74 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
75 	} else {
76 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
77 		    (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
78 		    hw->phy.smart_speed == ixgbe_smart_speed_on))
79 			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
80 		else
81 			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
82 	}
83 }
84 
85 /*
86  * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
87  * @hw: pointer to hardware structure
88  *
89  * Initialize any function pointers that were not able to be
90  * set during init_shared_code because the PHY/SFP type was
91  * not known.  Perform the SFP init if necessary.
92  *
93  */
94 s32
95 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
96 {
97 	struct ixgbe_mac_info *mac = &hw->mac;
98 	struct ixgbe_phy_info *phy = &hw->phy;
99 	s32 ret_val = IXGBE_SUCCESS;
100 
101 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
102 
103 	/* Identify the PHY or SFP module */
104 	ret_val = phy->ops.identify(hw);
105 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
106 		goto init_phy_ops_out;
107 
108 	/* Setup function pointers based on detected SFP module and speeds */
109 	ixgbe_init_mac_link_ops_82599(hw);
110 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
111 		hw->phy.ops.reset = NULL;
112 
113 	/* If copper media, overwrite with copper function pointers */
114 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
115 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
116 		mac->ops.get_link_capabilities =
117 		    &ixgbe_get_copper_link_capabilities_generic;
118 	}
119 
120 	/* Set necessary function pointers based on phy type */
121 	switch (hw->phy.type) {
122 	case ixgbe_phy_tn:
123 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
124 		phy->ops.get_firmware_version =
125 		    &ixgbe_get_phy_firmware_version_tnx;
126 		break;
127 	case ixgbe_phy_aq:
128 		phy->ops.get_firmware_version =
129 		    &ixgbe_get_phy_firmware_version_generic;
130 		break;
131 	default:
132 		break;
133 	}
134 
135 init_phy_ops_out:
136 	return (ret_val);
137 }
138 
139 s32
140 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
141 {
142 	s32 ret_val = IXGBE_SUCCESS;
143 	u16 list_offset, data_offset, data_value;
144 
145 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
146 
147 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
148 		ixgbe_init_mac_link_ops_82599(hw);
149 
150 		hw->phy.ops.reset = NULL;
151 
152 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
153 		    &data_offset);
154 
155 		if (ret_val != IXGBE_SUCCESS)
156 			goto setup_sfp_out;
157 
158 		/* PHY config will finish before releasing the semaphore */
159 		ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
160 		if (ret_val != IXGBE_SUCCESS) {
161 			ret_val = IXGBE_ERR_SWFW_SYNC;
162 			goto setup_sfp_out;
163 		}
164 
165 		hw->eeprom.ops.read(hw, ++data_offset, &data_value);
166 		while (data_value != 0xffff) {
167 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
168 			IXGBE_WRITE_FLUSH(hw);
169 			hw->eeprom.ops.read(hw, ++data_offset, &data_value);
170 		}
171 		/* Now restart DSP by setting Restart_AN */
172 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
173 		    (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
174 
175 		/* Release the semaphore */
176 		ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
177 		/* Delay obtaining semaphore again to allow FW access */
178 		msec_delay(hw->eeprom.semaphore_delay);
179 	}
180 
181 setup_sfp_out:
182 	return (ret_val);
183 }
184 
185 /*
186  * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
187  * @hw: pointer to hardware structure
188  *
189  * Initialize the function pointers and assign the MAC type for 82599.
190  * Does not touch the hardware.
191  */
192 
193 s32
194 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
195 {
196 	struct ixgbe_mac_info *mac = &hw->mac;
197 	struct ixgbe_phy_info *phy = &hw->phy;
198 	s32 ret_val;
199 
200 	DEBUGFUNC("ixgbe_init_ops_82599");
201 
202 	ret_val = ixgbe_init_phy_ops_generic(hw);
203 	ret_val = ixgbe_init_ops_generic(hw);
204 
205 	/* PHY */
206 	phy->ops.identify = &ixgbe_identify_phy_82599;
207 	phy->ops.init = &ixgbe_init_phy_ops_82599;
208 
209 	/* MAC */
210 	mac->ops.reset_hw = &ixgbe_reset_hw_82599;
211 	mac->ops.get_media_type = &ixgbe_get_media_type_82599;
212 	mac->ops.get_supported_physical_layer =
213 	    &ixgbe_get_supported_physical_layer_82599;
214 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
215 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
216 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
217 	mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
218 	mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
219 	mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
220 	mac->ops.get_device_caps = &ixgbe_get_device_caps_82599;
221 	mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
222 
223 	/* RAR, Multicast, VLAN */
224 	mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
225 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
226 	mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
227 	mac->rar_highwater = 1;
228 	mac->ops.set_vfta = &ixgbe_set_vfta_generic;
229 	mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
230 	mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
231 	mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
232 
233 	/* Link */
234 	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
235 	mac->ops.check_link = &ixgbe_check_mac_link_generic;
236 	ixgbe_init_mac_link_ops_82599(hw);
237 
238 	mac->mcft_size = 128;
239 	mac->vft_size = 128;
240 	mac->num_rar_entries = 128;
241 	mac->max_tx_queues = 128;
242 	mac->max_rx_queues = 128;
243 	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
244 
245 	return (ret_val);
246 }
247 
248 /*
249  * ixgbe_get_link_capabilities_82599 - Determines link capabilities
250  * @hw: pointer to hardware structure
251  * @speed: pointer to link speed
252  * @negotiation: true when autoneg or autotry is enabled
253  *
254  * Determines the link capabilities by reading the AUTOC register.
255  */
256 s32
257 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
258     ixgbe_link_speed *speed, bool *negotiation)
259 {
260 	s32 status = IXGBE_SUCCESS;
261 	u32 autoc = 0;
262 
263 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
264 
265 	/*
266 	 * Determine link capabilities based on the stored value of AUTOC,
267 	 * which represents EEPROM defaults.  If AUTOC value has not
268 	 * been stored, use the current register values.
269 	 */
270 	if (hw->mac.orig_link_settings_stored)
271 		autoc = hw->mac.orig_autoc;
272 	else
273 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
274 
275 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
276 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
277 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
278 		*negotiation = false;
279 		break;
280 
281 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
282 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
283 		*negotiation = false;
284 		break;
285 
286 	case IXGBE_AUTOC_LMS_1G_AN:
287 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
288 		*negotiation = true;
289 		break;
290 
291 	case IXGBE_AUTOC_LMS_10G_SERIAL:
292 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
293 		*negotiation = false;
294 		break;
295 
296 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
297 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
298 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
299 		if (autoc & IXGBE_AUTOC_KR_SUPP)
300 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
301 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
302 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
303 		if (autoc & IXGBE_AUTOC_KX_SUPP)
304 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
305 		*negotiation = true;
306 		break;
307 
308 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
309 		*speed = IXGBE_LINK_SPEED_100_FULL;
310 		if (autoc & IXGBE_AUTOC_KR_SUPP)
311 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
312 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
313 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
314 		if (autoc & IXGBE_AUTOC_KX_SUPP)
315 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
316 		*negotiation = true;
317 		break;
318 
319 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
320 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
321 		*negotiation = false;
322 		break;
323 
324 	default:
325 		status = IXGBE_ERR_LINK_SETUP;
326 		goto out;
327 	}
328 
329 	if (hw->phy.multispeed_fiber) {
330 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
331 		    IXGBE_LINK_SPEED_1GB_FULL;
332 		*negotiation = true;
333 	}
334 
335 out:
336 	return (status);
337 }
338 
339 /*
340  * ixgbe_get_media_type_82599 - Get media type
341  * @hw: pointer to hardware structure
342  *
343  * Returns the media type (fiber, copper, backplane)
344  */
345 enum ixgbe_media_type
346 ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
347 {
348 	enum ixgbe_media_type media_type;
349 
350 	DEBUGFUNC("ixgbe_get_media_type_82599");
351 
352 	/* Detect if there is a copper PHY attached. */
353 	if (hw->phy.type == ixgbe_phy_cu_unknown ||
354 	    hw->phy.type == ixgbe_phy_tn ||
355 	    hw->phy.type == ixgbe_phy_aq) {
356 		media_type = ixgbe_media_type_copper;
357 		goto out;
358 	}
359 
360 	switch (hw->device_id) {
361 	case IXGBE_DEV_ID_82599_KX4:
362 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
363 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
364 	case IXGBE_DEV_ID_82599_XAUI_LOM:
365 		/* Default device ID is mezzanine card KX/KX4 */
366 		media_type = ixgbe_media_type_backplane;
367 		break;
368 	case IXGBE_DEV_ID_82599_SFP:
369 	case IXGBE_DEV_ID_82599_SFP_EM:
370 		media_type = ixgbe_media_type_fiber;
371 		break;
372 	case IXGBE_DEV_ID_82599_CX4:
373 		media_type = ixgbe_media_type_cx4;
374 		break;
375 	default:
376 		media_type = ixgbe_media_type_unknown;
377 		break;
378 	}
379 out:
380 	return (media_type);
381 }
382 
383 /*
384  * ixgbe_start_mac_link_82599 - Setup MAC link settings
385  * @hw: pointer to hardware structure
386  *
387  * Configures link settings based on values in the ixgbe_hw struct.
388  * Restarts the link.  Performs autonegotiation if needed.
389  */
390 s32
391 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete)
392 {
393 	u32 autoc_reg;
394 	u32 links_reg;
395 	u32 i;
396 	s32 status = IXGBE_SUCCESS;
397 
398 	DEBUGFUNC("ixgbe_start_mac_link_82599");
399 
400 	/* Restart link */
401 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
402 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
403 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
404 
405 	/* Only poll for autoneg to complete if specified to do so */
406 	if (autoneg_wait_to_complete) {
407 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
408 		    IXGBE_AUTOC_LMS_KX4_KX_KR ||
409 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
410 		    IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
411 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
412 		    IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
413 			links_reg = 0; /* Just in case Autoneg time = 0 */
414 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
415 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
416 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
417 					break;
418 				msec_delay(100);
419 			}
420 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
421 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
422 				DEBUGOUT("Autoneg did not complete.\n");
423 			}
424 		}
425 	}
426 
427 	/* Add delay to filter out noises during initial link setup */
428 	msec_delay(50);
429 
430 	return (status);
431 }
432 
433 /*
434  * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
435  * @hw: pointer to hardware structure
436  * @speed: new link speed
437  * @autoneg: true if autonegotiation enabled
438  * @autoneg_wait_to_complete: true when waiting for completion is needed
439  *
440  * Set the link speed in the AUTOC register and restarts link.
441  */
442 s32
443 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
444     ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete)
445 {
446 	s32 status = IXGBE_SUCCESS;
447 	ixgbe_link_speed link_speed;
448 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
449 	u32 speedcnt = 0;
450 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
451 	u32 i = 0;
452 	bool link_up = false;
453 	bool negotiation;
454 
455 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
456 
457 	/* Mask off requested but non-supported speeds */
458 	status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
459 	if (status != IXGBE_SUCCESS)
460 		return (status);
461 
462 	speed &= link_speed;
463 
464 	/*
465 	 * When the driver changes the link speeds that it can support,
466 	 * it sets autotry_restart to true to indicate that we need to
467 	 * initiate a new autotry session with the link partner.  To do
468 	 * so, we set the speed then disable and re-enable the tx laser, to
469 	 * alert the link partner that it also needs to restart autotry on its
470 	 * end.  This is consistent with true clause 37 autoneg, which also
471 	 * involves a loss of signal.
472 	 */
473 
474 	/*
475 	 * Try each speed one by one, highest priority first.  We do this in
476 	 * software because 10gb fiber doesn't support speed autonegotiation.
477 	 */
478 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
479 		speedcnt++;
480 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
481 
482 		/* If we already have link at this speed, just jump out */
483 		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
484 		if (status != IXGBE_SUCCESS)
485 			return (status);
486 
487 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
488 			goto out;
489 
490 		/* Set the module link speed */
491 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
492 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
493 
494 		/* Allow module to change analog characteristics (1G->10G) */
495 		msec_delay(40);
496 
497 		status = ixgbe_setup_mac_link_82599(
498 		    hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg,
499 		    autoneg_wait_to_complete);
500 		if (status != IXGBE_SUCCESS)
501 			return (status);
502 
503 		/* Flap the tx laser if it has not already been done */
504 		if (hw->mac.autotry_restart) {
505 			/* Disable tx laser; allow 100us to go dark per spec */
506 			esdp_reg |= IXGBE_ESDP_SDP3;
507 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
508 			usec_delay(100);
509 
510 			/* Enable tx laser; allow 2ms to light up per spec */
511 			esdp_reg &= ~IXGBE_ESDP_SDP3;
512 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
513 			msec_delay(2);
514 
515 			hw->mac.autotry_restart = false;
516 		}
517 
518 		/*
519 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
520 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
521 		 * attempted.  82599 uses the same timing for 10g SFI.
522 		 */
523 		for (i = 0; i < 5; i++) {
524 			/* Wait for the link partner to also set speed */
525 			msec_delay(100);
526 
527 			/* If we have link, just jump out */
528 			status = ixgbe_check_link(hw, &link_speed,
529 			    &link_up, false);
530 			if (status != IXGBE_SUCCESS)
531 				return (status);
532 
533 			if (link_up)
534 				goto out;
535 		}
536 	}
537 
538 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
539 		speedcnt++;
540 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
541 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
542 
543 		/* If we already have link at this speed, just jump out */
544 		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
545 		if (status != IXGBE_SUCCESS)
546 			return (status);
547 
548 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
549 			goto out;
550 
551 		/* Set the module link speed */
552 		esdp_reg &= ~IXGBE_ESDP_SDP5;
553 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
554 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
555 
556 		/* Allow module to change analog characteristics (10G->1G) */
557 		msec_delay(40);
558 
559 		status = ixgbe_setup_mac_link_82599(
560 		    hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
561 		    autoneg_wait_to_complete);
562 		if (status != IXGBE_SUCCESS)
563 			return (status);
564 
565 		/* Flap the tx laser if it has not already been done */
566 		if (hw->mac.autotry_restart) {
567 			/* Disable tx laser; allow 100us to go dark per spec */
568 			esdp_reg |= IXGBE_ESDP_SDP3;
569 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
570 			usec_delay(100);
571 
572 			/* Enable tx laser; allow 2ms to light up per spec */
573 			esdp_reg &= ~IXGBE_ESDP_SDP3;
574 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
575 			msec_delay(2);
576 
577 			hw->mac.autotry_restart = false;
578 		}
579 
580 		/* Wait for the link partner to also set speed */
581 		msec_delay(100);
582 
583 		/* If we have link, just jump out */
584 		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
585 		if (status != IXGBE_SUCCESS)
586 			return (status);
587 
588 		if (link_up)
589 			goto out;
590 	}
591 
592 	/*
593 	 * We didn't get link.  Configure back to the highest speed we tried,
594 	 * (if there was more than one).  We call ourselves back with just the
595 	 * single highest speed that the user requested.
596 	 */
597 	if (speedcnt > 1)
598 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
599 		    highest_link_speed, autoneg, autoneg_wait_to_complete);
600 
601 out:
602 	/* Set autoneg_advertised value based on input link speed */
603 	hw->phy.autoneg_advertised = 0;
604 
605 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
606 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
607 
608 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
609 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
610 
611 	return (status);
612 }
613 
614 /*
615  * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
616  * @hw: pointer to hardware structure
617  * @speed: new link speed
618  * @autoneg: true if autonegotiation enabled
619  * @autoneg_wait_to_complete: true when waiting for completion is needed
620  *
621  * Implements the Intel SmartSpeed algorithm.
622  */
623 s32
624 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
625     ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete)
626 {
627 	s32 status = IXGBE_SUCCESS;
628 	ixgbe_link_speed link_speed;
629 	s32 i, j;
630 	bool link_up = false;
631 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
632 
633 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
634 
635 	/* Set autoneg_advertised value based on input link speed */
636 	hw->phy.autoneg_advertised = 0;
637 
638 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
639 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
640 
641 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
642 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
643 
644 	if (speed & IXGBE_LINK_SPEED_100_FULL)
645 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
646 
647 	/*
648 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
649 	 * autoneg advertisement if link is unable to be established at the
650 	 * highest negotiated rate.  This can sometimes happen due to integrity
651 	 * issues with the physical media connection.
652 	 */
653 
654 	/* First, try to get link with full advertisement */
655 	hw->phy.smart_speed_active = false;
656 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
657 		status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
658 		    autoneg_wait_to_complete);
659 		if (status != IXGBE_SUCCESS)
660 			goto out;
661 
662 		/*
663 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
664 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
665 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
666 		 * Table 9 in the AN MAS.
667 		 */
668 		for (i = 0; i < 5; i++) {
669 			msec_delay(100);
670 
671 			/* If we have link, just jump out */
672 			status = ixgbe_check_link(hw, &link_speed, &link_up,
673 			    false);
674 			if (status != IXGBE_SUCCESS)
675 				goto out;
676 
677 			if (link_up)
678 				goto out;
679 		}
680 	}
681 
682 	/*
683 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
684 	 * (or BX4/BX), then disable KR and try again.
685 	 */
686 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
687 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
688 		goto out;
689 
690 	/* Turn SmartSpeed on to disable KR support */
691 	hw->phy.smart_speed_active = true;
692 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
693 	    autoneg_wait_to_complete);
694 	if (status != IXGBE_SUCCESS)
695 		goto out;
696 
697 	/*
698 	 * Wait for the controller to acquire link.  600ms will allow for
699 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
700 	 * parallel detect, both 10g and 1g. This allows for the maximum
701 	 * connect attempts as defined in the AN MAS table 73-7.
702 	 */
703 	for (i = 0; i < 6; i++) {
704 		msec_delay(100);
705 
706 		/* If we have link, just jump out */
707 		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
708 		if (status != IXGBE_SUCCESS)
709 			goto out;
710 
711 		if (link_up)
712 			goto out;
713 	}
714 
715 	/* We didn't get link.  Turn SmartSpeed back off. */
716 	hw->phy.smart_speed_active = false;
717 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
718 	    autoneg_wait_to_complete);
719 
720 out:
721 	return (status);
722 }
723 
724 /*
725  * ixgbe_setup_mac_link_82599 - Set MAC link speed
726  * @hw: pointer to hardware structure
727  * @speed: new link speed
728  * @autoneg: true if autonegotiation enabled
729  * @autoneg_wait_to_complete: true when waiting for completion is needed
730  *
731  * Set the link speed in the AUTOC register and restarts link.
732  */
733 s32
734 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
735     ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete)
736 {
737 	s32 status = IXGBE_SUCCESS;
738 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
739 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
740 	u32 start_autoc = autoc;
741 	u32 orig_autoc = 0;
742 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
743 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
744 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
745 	u32 links_reg;
746 	u32 i;
747 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
748 
749 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
750 
751 	/* Check to see if speed passed in is supported. */
752 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
753 	if (status != IXGBE_SUCCESS)
754 		goto out;
755 
756 	speed &= link_capabilities;
757 
758 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
759 		status = IXGBE_ERR_LINK_SETUP;
760 		goto out;
761 	}
762 
763 	/*
764 	 * Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support
765 	 */
766 	if (hw->mac.orig_link_settings_stored)
767 		orig_autoc = hw->mac.orig_autoc;
768 	else
769 		orig_autoc = autoc;
770 
771 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
772 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
773 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
774 		/* Set KX4/KX/KR support according to speed requested */
775 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
776 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
777 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
778 				autoc |= IXGBE_AUTOC_KX4_SUPP;
779 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
780 			    (hw->phy.smart_speed_active == false))
781 				autoc |= IXGBE_AUTOC_KR_SUPP;
782 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
783 			autoc |= IXGBE_AUTOC_KX_SUPP;
784 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
785 	    (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
786 	    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
787 		/* Switch from 1G SFI to 10G SFI if requested */
788 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
789 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
790 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
791 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
792 		}
793 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
794 	    (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
795 		/* Switch from 10G SFI to 1G SFI if requested */
796 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
797 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
798 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
799 			if (autoneg)
800 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
801 			else
802 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
803 			}
804 	}
805 
806 	if (autoc != start_autoc) {
807 		/* Restart link */
808 		autoc |= IXGBE_AUTOC_AN_RESTART;
809 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
810 
811 		/* Only poll for autoneg to complete if specified to do so */
812 		if (autoneg_wait_to_complete) {
813 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
814 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
815 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
816 				links_reg = 0; /* Just in case Autoneg time=0 */
817 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
818 					links_reg =
819 					    IXGBE_READ_REG(hw, IXGBE_LINKS);
820 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
821 						break;
822 					msec_delay(100);
823 				}
824 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
825 					status =
826 					    IXGBE_ERR_AUTONEG_NOT_COMPLETE;
827 					DEBUGOUT("Autoneg did not complete.\n");
828 				}
829 			}
830 		}
831 
832 		/* Add delay to filter out noises during initial link setup */
833 		msec_delay(50);
834 	}
835 
836 out:
837 	return (status);
838 }
839 
840 /*
841  * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
842  * @hw: pointer to hardware structure
843  * @speed: new link speed
844  * @autoneg: true if autonegotiation enabled
845  * @autoneg_wait_to_complete: true if waiting is needed to complete
846  *
847  * Restarts link on PHY and MAC based on settings passed in.
848  */
849 static s32
850 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
851     ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete)
852 {
853 	s32 status;
854 
855 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
856 
857 	/* Setup the PHY according to input speed */
858 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
859 	    autoneg_wait_to_complete);
860 	/* Set up MAC */
861 	(void) ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
862 
863 	return (status);
864 }
865 /*
866  * ixgbe_reset_hw_82599 - Perform hardware reset
867  * @hw: pointer to hardware structure
868  *
869  * Resets the hardware by resetting the transmit and receive units, masks
870  * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
871  * reset.
872  */
873 s32
874 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
875 {
876 	s32 status = IXGBE_SUCCESS;
877 	u32 ctrl, ctrl_ext;
878 	u32 i;
879 	u32 autoc;
880 	u32 autoc2;
881 
882 	DEBUGFUNC("ixgbe_reset_hw_82599");
883 
884 	/* Call adapter stop to disable tx/rx and clear interrupts */
885 	hw->mac.ops.stop_adapter(hw);
886 
887 	/* PHY ops must be identified and initialized prior to reset */
888 
889 	/* Identify PHY and related function pointers */
890 	status = hw->phy.ops.init(hw);
891 
892 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
893 		goto reset_hw_out;
894 
895 	/* Setup SFP module if there is one present. */
896 	if (hw->phy.sfp_setup_needed) {
897 		status = hw->mac.ops.setup_sfp(hw);
898 		hw->phy.sfp_setup_needed = false;
899 	}
900 
901 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
902 		goto reset_hw_out;
903 
904 	/* Reset PHY */
905 	if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
906 		hw->phy.ops.reset(hw);
907 
908 	/*
909 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
910 	 * access and verify no pending requests before reset
911 	 */
912 	status = ixgbe_disable_pcie_master(hw);
913 	if (status != IXGBE_SUCCESS) {
914 		status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
915 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
916 	}
917 
918 	/*
919 	 * Issue global reset to the MAC.  This needs to be a SW reset.
920 	 * If link reset is used, it might reset the MAC when mng is using it
921 	 */
922 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
923 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
924 	IXGBE_WRITE_FLUSH(hw);
925 
926 	/* Poll for reset bit to self-clear indicating reset is complete */
927 	for (i = 0; i < 10; i++) {
928 		usec_delay(1);
929 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
930 		if (!(ctrl & IXGBE_CTRL_RST)) {
931 			break;
932 		}
933 	}
934 	if (ctrl & IXGBE_CTRL_RST) {
935 		status = IXGBE_ERR_RESET_FAILED;
936 		DEBUGOUT("Reset polling failed to complete.\n");
937 	}
938 
939 	/* Clear PF Reset Done bit so PF/VF Mail Ops can work */
940 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
941 	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
942 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
943 
944 	msec_delay(50);
945 
946 	/*
947 	 * Store the original AUTOC/AUTOC2 values if they have not been
948 	 * stored off yet.  Otherwise restore the stored original
949 	 * values since the reset operation sets back to defaults.
950 	 */
951 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
952 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
953 	if (hw->mac.orig_link_settings_stored == false) {
954 		hw->mac.orig_autoc = autoc;
955 		hw->mac.orig_autoc2 = autoc2;
956 		hw->mac.orig_link_settings_stored = true;
957 	} else {
958 		if (autoc != hw->mac.orig_autoc) {
959 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
960 			    IXGBE_AUTOC_AN_RESTART));
961 		}
962 
963 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
964 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
965 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
966 			autoc2 |= (hw->mac.orig_autoc2 &
967 			    IXGBE_AUTOC2_UPPER_MASK);
968 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
969 		}
970 	}
971 
972 	/* Store the permanent mac address */
973 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
974 
975 	/*
976 	 * Store MAC address from RAR0, clear receive address registers, and
977 	 * clear the multicast table.  Also reset num_rar_entries to 128,
978 	 * since we modify this value when programming the SAN MAC address.
979 	 */
980 	hw->mac.num_rar_entries = 128;
981 	hw->mac.ops.init_rx_addrs(hw);
982 
983 	/* Store the permanent SAN mac address */
984 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
985 
986 	/* Add the SAN MAC address to the RAR only if it's a valid address */
987 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
988 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
989 		    hw->mac.san_addr, 0, IXGBE_RAH_AV);
990 
991 		/* Reserve the last RAR for the SAN MAC address */
992 		hw->mac.num_rar_entries--;
993 	}
994 
995 	/* Store the alternative WWNN/WWPN prefix */
996 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
997 	    &hw->mac.wwpn_prefix);
998 
999 reset_hw_out:
1000 	return (status);
1001 }
1002 
1003 /*
1004  * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1005  * @hw: pointer to hardware structure
1006  */
1007 s32
1008 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1009 {
1010 	int i;
1011 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1012 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1013 
1014 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1015 
1016 	/*
1017 	 * Before starting reinitialization process,
1018 	 * FDIRCMD.CMD must be zero.
1019 	 */
1020 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1021 		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1022 		    IXGBE_FDIRCMD_CMD_MASK))
1023 			break;
1024 		usec_delay(10);
1025 	}
1026 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1027 		DEBUGOUT("Flow Director previous command isn't complete, "
1028 		    "aborting table re-initialization. \n");
1029 		return (IXGBE_ERR_FDIR_REINIT_FAILED);
1030 	}
1031 
1032 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1033 	IXGBE_WRITE_FLUSH(hw);
1034 	/*
1035 	 * 82599 adapters flow director init flow cannot be restarted,
1036 	 * Workaround 82599 silicon errata by performing the following steps
1037 	 * before re-writing the FDIRCTRL control register with the same value.
1038 	 * - write 1 to bit 8 of FDIRCMD register &
1039 	 * - write 0 to bit 8 of FDIRCMD register
1040 	 */
1041 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1042 	    (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1043 	    IXGBE_FDIRCMD_CLEARHT));
1044 	IXGBE_WRITE_FLUSH(hw);
1045 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1046 	    (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1047 	    ~IXGBE_FDIRCMD_CLEARHT));
1048 	IXGBE_WRITE_FLUSH(hw);
1049 	/*
1050 	 * Clear FDIR Hash register to clear any leftover hashes
1051 	 * waiting to be programmed.
1052 	 */
1053 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1054 	IXGBE_WRITE_FLUSH(hw);
1055 
1056 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1057 	IXGBE_WRITE_FLUSH(hw);
1058 
1059 	/* Poll init-done after we write FDIRCTRL register */
1060 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1061 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1062 		    IXGBE_FDIRCTRL_INIT_DONE)
1063 			break;
1064 		usec_delay(10);
1065 	}
1066 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1067 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1068 		return (IXGBE_ERR_FDIR_REINIT_FAILED);
1069 	}
1070 
1071 	/* Clear FDIR statistics registers (read to clear) */
1072 	(void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1073 	(void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1074 	(void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1075 	(void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1076 	(void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1077 
1078 	return (IXGBE_SUCCESS);
1079 }
1080 
1081 /*
1082  * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1083  * @hw: pointer to hardware structure
1084  * @pballoc: which mode to allocate filters with
1085  */
1086 s32
1087 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1088 {
1089 	u32 fdirctrl = 0;
1090 	u32 pbsize;
1091 	int i;
1092 
1093 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1094 
1095 	/*
1096 	 * Before enabling Flow Director, the Rx Packet Buffer size
1097 	 * must be reduced.  The new value is the current size minus
1098 	 * flow director memory usage size.
1099 	 */
1100 	pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1101 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1102 	    IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize);
1103 
1104 	/*
1105 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1106 	 * intialized to zero for non DCB mode otherwise actual total RX PB
1107 	 * would be bigger than programmed and filter space would run into
1108 	 * the PB 0 region.
1109 	 */
1110 	for (i = 1; i < 8; i++)
1111 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1112 
1113 	/* Send interrupt when 64 filters are left */
1114 	fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1115 
1116 	/* Set the maximum length per hash bucket to 0xA filters */
1117 	fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
1118 
1119 	switch (pballoc) {
1120 	case IXGBE_FDIR_PBALLOC_64K:
1121 		/* 8k - 1 signature filters */
1122 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1123 		break;
1124 	case IXGBE_FDIR_PBALLOC_128K:
1125 		/* 16k - 1 signature filters */
1126 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1127 		break;
1128 	case IXGBE_FDIR_PBALLOC_256K:
1129 		/* 32k - 1 signature filters */
1130 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1131 		break;
1132 	default:
1133 		/* bad value */
1134 		return (IXGBE_ERR_CONFIG);
1135 	};
1136 
1137 	/* Move the flexible bytes to use the ethertype - shift 6 words */
1138 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1139 
1140 	/* Prime the keys for hashing */
1141 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1142 	    IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1143 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1144 	    IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1145 
1146 	/*
1147 	 * Poll init-done after we write the register.  Estimated times:
1148 	 *   10G: PBALLOC = 11b, timing is 60us
1149 	 *    1G: PBALLOC = 11b, timing is 600us
1150 	 *  100M: PBALLOC = 11b, timing is 6ms
1151 	 *
1152 	 *   Multiple these timings by 4 if under full Rx load
1153 	 *
1154 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1155 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1156 	 * this might not finish in our poll time, but we can live with that
1157 	 * for now.
1158 	 */
1159 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1160 	IXGBE_WRITE_FLUSH(hw);
1161 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1162 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1163 		    IXGBE_FDIRCTRL_INIT_DONE)
1164 			break;
1165 
1166 		msec_delay(1);
1167 	}
1168 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1169 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1170 	}
1171 
1172 	return (IXGBE_SUCCESS);
1173 }
1174 
1175 /*
1176  * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1177  * @hw: pointer to hardware structure
1178  * @pballoc: which mode to allocate filters with
1179  */
1180 s32
1181 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1182 {
1183 	u32 fdirctrl = 0;
1184 	u32 pbsize;
1185 	int i;
1186 
1187 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1188 
1189 	/*
1190 	 * Before enabling Flow Director, the Rx Packet Buffer size
1191 	 * must be reduced.  The new value is the current size minus
1192 	 * flow director memory usage size.
1193 	 */
1194 
1195 	pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1196 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1197 	    IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize);
1198 
1199 	/*
1200 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1201 	 * intialized to zero for non DCB mode otherwise actual total RX PB
1202 	 * would be bigger than programmed and filter space would run into
1203 	 * the PB 0 region.
1204 	 */
1205 	for (i = 1; i < 8; i++)
1206 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1207 
1208 	/* Send interrupt when 64 filters are left */
1209 	fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1210 
1211 	switch (pballoc) {
1212 	case IXGBE_FDIR_PBALLOC_64K:
1213 		/* 2k - 1 perfect filters */
1214 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1215 		break;
1216 	case IXGBE_FDIR_PBALLOC_128K:
1217 		/* 4k - 1 perfect filters */
1218 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1219 		break;
1220 	case IXGBE_FDIR_PBALLOC_256K:
1221 		/* 8k - 1 perfect filters */
1222 		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1223 		break;
1224 	default:
1225 		/* bad value */
1226 		return (IXGBE_ERR_CONFIG);
1227 	};
1228 
1229 	/* Turn perfect match filtering on */
1230 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
1231 	fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1232 
1233 	/* Move the flexible bytes to use the ethertype - shift 6 words */
1234 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1235 
1236 	/* Prime the keys for hashing */
1237 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1238 	    IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1239 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1240 	    IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1241 
1242 	/*
1243 	 * Poll init-done after we write the register.  Estimated times:
1244 	 *   10G: PBALLOC = 11b, timing is 60us
1245 	 *    1G: PBALLOC = 11b, timing is 600us
1246 	 *  100M: PBALLOC = 11b, timing is 6ms
1247 	 *
1248 	 *  Multiple these timings by 4 if under full Rx load
1249 	 *
1250 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1251 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1252 	 * this might not finish in our poll time, but we can live with that
1253 	 * for now.
1254 	 */
1255 
1256 	/* Set the maximum length per hash bucket to 0xA filters */
1257 	fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
1258 
1259 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1260 	IXGBE_WRITE_FLUSH(hw);
1261 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1262 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1263 		    IXGBE_FDIRCTRL_INIT_DONE)
1264 			break;
1265 
1266 		msec_delay(1);
1267 	}
1268 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1269 		DEBUGOUT("Flow Director Perfect poll time exceeded!\n");
1270 	}
1271 
1272 	return (IXGBE_SUCCESS);
1273 }
1274 
1275 /*
1276  * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
1277  * @stream: input bitstream to compute the hash on
1278  * @key: 32-bit hash key
1279  */
1280 u16
1281 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
1282 {
1283 	/*
1284 	 * The algorithm is as follows:
1285 	 *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
1286 	 *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
1287 	 *    and A[n] x B[n] is bitwise AND between same length strings
1288 	 *
1289 	 *    K[n] is 16 bits, defined as:
1290 	 *	for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
1291 	 *	for n modulo 32 < 15, K[n] =
1292 	 *		K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
1293 	 *
1294 	 *    S[n] is 16 bits, defined as:
1295 	 *	for n >= 15, S[n] = S[n:n - 15]
1296 	 *	for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
1297 	 *
1298 	 *    To simplify for programming, the algorithm is implemented
1299 	 *    in software this way:
1300 	 *
1301 	 *    Key[31:0], Stream[335:0]
1302 	 *
1303 	 *    tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
1304 	 *    int_key[350:0] = tmp_key[351:1]
1305 	 *    int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
1306 	 *
1307 	 *    hash[15:0] = 0;
1308 	 *    for (i = 0; i < 351; i++) {
1309 	 *	if (int_key[i])
1310 	 *		hash ^= int_stream[(i + 15):i];
1311 	 *    }
1312 	 */
1313 
1314 	union {
1315 		u64 fill[6];
1316 		u32 key[11];
1317 		u8 key_stream[44];
1318 	} tmp_key;
1319 
1320 	u8 *stream = (u8 *)atr_input;
1321 	u8 int_key[44];		/* upper-most bit unused */
1322 	u8 hash_str[46];	/* upper-most 2 bits unused */
1323 	u16 hash_result = 0;
1324 	int i, j, k, h;
1325 
1326 	DEBUGFUNC("ixgbe_atr_compute_hash_82599");
1327 
1328 	/*
1329 	 * Initialize the fill member to prevent warnings
1330 	 * on some compilers
1331 	 */
1332 	tmp_key.fill[0] = 0;
1333 
1334 	/* First load the temporary key stream */
1335 	for (i = 0; i < 6; i++) {
1336 		u64 fillkey = ((u64)key << 32) | key;
1337 		tmp_key.fill[i] = fillkey;
1338 	}
1339 
1340 	/*
1341 	 * Set the interim key for the hashing.  Bit 352 is unused, so we must
1342 	 * shift and compensate when building the key.
1343 	 */
1344 	int_key[0] = tmp_key.key_stream[0] >> 1;
1345 	for (i = 1, j = 0; i < 44; i++) {
1346 		unsigned int this_key = tmp_key.key_stream[j] << 7;
1347 		j++;
1348 		int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
1349 	}
1350 
1351 	/*
1352 	 * Set the interim bit string for the hashing.  Bits 368 and 367 are
1353 	 * unused, so shift and compensate when building the string.
1354 	 */
1355 	hash_str[0] = (stream[40] & 0x7f) >> 1;
1356 	for (i = 1, j = 40; i < 46; i++) {
1357 		unsigned int this_str = stream[j] << 7;
1358 		j++;
1359 		if (j > 41)
1360 			j = 0;
1361 		hash_str[i] = (u8)(this_str | (stream[j] >> 1));
1362 	}
1363 
1364 	/*
1365 	 * Now compute the hash.  i is the index into hash_str, j is into our
1366 	 * key stream, k is counting the number of bits, and h interates within
1367 	 * each byte.
1368 	 */
1369 	for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
1370 		for (h = 0; h < 8 && k < 351; h++, k++) {
1371 			if (int_key[j] & (1 << h)) {
1372 				/*
1373 				 * Key bit is set, XOR in the current 16-bit
1374 				 * string.  Example of processing:
1375 				 *	h = 0,
1376 				 *	tmp = (hash_str[i - 2] & 0 << 16) |
1377 				 *		(hash_str[i - 1] & 0xff << 8) |
1378 				 *		(hash_str[i] & 0xff >> 0)
1379 				 *	So tmp = hash_str[15 + k:k], since the
1380 				 *	i + 2 clause rolls off the 16-bit value
1381 				 *	h = 7,
1382 				 *	tmp = (hash_str[i - 2] & 0x7f << 9) |
1383 				 *		(hash_str[i - 1] & 0xff << 1) |
1384 				 *		(hash_str[i] & 0x80 >> 7)
1385 				 */
1386 				int tmp = (hash_str[i] >> h);
1387 				tmp |= (hash_str[i - 1] << (8 - h));
1388 				tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
1389 				    << (16 - h);
1390 				hash_result ^= (u16)tmp;
1391 			}
1392 		}
1393 	}
1394 
1395 	return (hash_result);
1396 }
1397 
1398 /*
1399  * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1400  * @input: input stream to modify
1401  * @vlan: the VLAN id to load
1402  */
1403 s32
1404 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
1405 {
1406 	DEBUGFUNC("ixgbe_atr_set_vlan_id_82599");
1407 
1408 	input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
1409 	input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
1410 
1411 	return (IXGBE_SUCCESS);
1412 }
1413 
1414 /*
1415  * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1416  * @input: input stream to modify
1417  * @src_addr: the IP address to load
1418  */
1419 s32
1420 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
1421 {
1422 	DEBUGFUNC("ixgbe_atr_set_src_ipv4_82599");
1423 
1424 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
1425 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
1426 	    (src_addr >> 16) & 0xff;
1427 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
1428 	    (src_addr >> 8) & 0xff;
1429 	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
1430 
1431 	return (IXGBE_SUCCESS);
1432 }
1433 
1434 /*
1435  * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1436  * @input: input stream to modify
1437  * @dst_addr: the IP address to load
1438  */
1439 s32
1440 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1441 {
1442 	DEBUGFUNC("ixgbe_atr_set_dst_ipv4_82599");
1443 
1444 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
1445 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
1446 	    (dst_addr >> 16) & 0xff;
1447 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
1448 	    (dst_addr >> 8) & 0xff;
1449 	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
1450 
1451 	return (IXGBE_SUCCESS);
1452 }
1453 
1454 /*
1455  * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
1456  * @input: input stream to modify
1457  * @src_addr_1: the first 4 bytes of the IP address to load
1458  * @src_addr_2: the second 4 bytes of the IP address to load
1459  * @src_addr_3: the third 4 bytes of the IP address to load
1460  * @src_addr_4: the fourth 4 bytes of the IP address to load
1461  */
1462 s32
1463 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
1464     u32 src_addr_1, u32 src_addr_2, u32 src_addr_3, u32 src_addr_4)
1465 {
1466 	DEBUGFUNC("ixgbe_atr_set_src_ipv6_82599");
1467 
1468 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
1469 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
1470 	    (src_addr_4 >> 8) & 0xff;
1471 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
1472 	    (src_addr_4 >> 16) & 0xff;
1473 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
1474 
1475 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
1476 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
1477 	    (src_addr_3 >> 8) & 0xff;
1478 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
1479 	    (src_addr_3 >> 16) & 0xff;
1480 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
1481 
1482 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
1483 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
1484 	    (src_addr_2 >> 8) & 0xff;
1485 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
1486 	    (src_addr_2 >> 16) & 0xff;
1487 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
1488 
1489 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
1490 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
1491 	    (src_addr_1 >> 8) & 0xff;
1492 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
1493 	    (src_addr_1 >> 16) & 0xff;
1494 	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
1495 
1496 	return (IXGBE_SUCCESS);
1497 }
1498 
1499 /*
1500  * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
1501  * @input: input stream to modify
1502  * @dst_addr_1: the first 4 bytes of the IP address to load
1503  * @dst_addr_2: the second 4 bytes of the IP address to load
1504  * @dst_addr_3: the third 4 bytes of the IP address to load
1505  * @dst_addr_4: the fourth 4 bytes of the IP address to load
1506  */
1507 s32
1508 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
1509     u32 dst_addr_1, u32 dst_addr_2, u32 dst_addr_3, u32 dst_addr_4)
1510 {
1511 	DEBUGFUNC("ixgbe_atr_set_dst_ipv6_82599");
1512 
1513 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
1514 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
1515 	    (dst_addr_4 >> 8) & 0xff;
1516 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
1517 	    (dst_addr_4 >> 16) & 0xff;
1518 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
1519 
1520 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
1521 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
1522 	    (dst_addr_3 >> 8) & 0xff;
1523 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
1524 	    (dst_addr_3 >> 16) & 0xff;
1525 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
1526 
1527 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
1528 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
1529 	    (dst_addr_2 >> 8) & 0xff;
1530 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
1531 	    (dst_addr_2 >> 16) & 0xff;
1532 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
1533 
1534 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
1535 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
1536 	    (dst_addr_1 >> 8) & 0xff;
1537 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
1538 	    (dst_addr_1 >> 16) & 0xff;
1539 	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
1540 
1541 	return (IXGBE_SUCCESS);
1542 }
1543 
1544 /*
1545  * ixgbe_atr_set_src_port_82599 - Sets the source port
1546  * @input: input stream to modify
1547  * @src_port: the source port to load
1548  */
1549 s32
1550 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
1551 {
1552 	DEBUGFUNC("ixgbe_atr_set_src_port_82599");
1553 
1554 	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
1555 	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
1556 
1557 	return (IXGBE_SUCCESS);
1558 }
1559 
1560 /*
1561  * ixgbe_atr_set_dst_port_82599 - Sets the destination port
1562  * @input: input stream to modify
1563  * @dst_port: the destination port to load
1564  */
1565 s32
1566 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
1567 {
1568 	DEBUGFUNC("ixgbe_atr_set_dst_port_82599");
1569 
1570 	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
1571 	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
1572 
1573 	return (IXGBE_SUCCESS);
1574 }
1575 
1576 /*
1577  * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1578  * @input: input stream to modify
1579  * @flex_bytes: the flexible bytes to load
1580  */
1581 s32
1582 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1583 {
1584 	DEBUGFUNC("ixgbe_atr_set_flex_byte_82599");
1585 
1586 	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
1587 	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
1588 
1589 	return (IXGBE_SUCCESS);
1590 }
1591 
1592 /*
1593  * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
1594  * @input: input stream to modify
1595  * @vm_pool: the Virtual Machine pool to load
1596  */
1597 s32
1598 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
1599 {
1600 	DEBUGFUNC("ixgbe_atr_set_vm_pool_82599");
1601 
1602 	input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
1603 
1604 	return (IXGBE_SUCCESS);
1605 }
1606 
1607 /*
1608  * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1609  * @input: input stream to modify
1610  * @l4type: the layer 4 type value to load
1611  */
1612 s32
1613 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1614 {
1615 	DEBUGFUNC("ixgbe_atr_set_l4type_82599");
1616 
1617 	input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
1618 
1619 	return (IXGBE_SUCCESS);
1620 }
1621 
1622 /*
1623  * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1624  * @input: input stream to search
1625  * @vlan: the VLAN id to load
1626  */
1627 s32
1628 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1629 {
1630 	DEBUGFUNC("ixgbe_atr_get_vlan_id_82599");
1631 
1632 	*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1633 	*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
1634 
1635 	return (IXGBE_SUCCESS);
1636 }
1637 
1638 /*
1639  * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
1640  * @input: input stream to search
1641  * @src_addr: the IP address to load
1642  */
1643 s32
1644 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
1645 {
1646 	DEBUGFUNC("ixgbe_atr_get_src_ipv4_82599");
1647 
1648 	*src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
1649 	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
1650 	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
1651 	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
1652 
1653 	return (IXGBE_SUCCESS);
1654 }
1655 
1656 /*
1657  * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
1658  * @input: input stream to search
1659  * @dst_addr: the IP address to load
1660  */
1661 s32
1662 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
1663 {
1664 	DEBUGFUNC("ixgbe_atr_get_dst_ipv4_82599");
1665 
1666 	*dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
1667 	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
1668 	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
1669 	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
1670 
1671 	return (IXGBE_SUCCESS);
1672 }
1673 
1674 /*
1675  * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
1676  * @input: input stream to search
1677  * @src_addr_1: the first 4 bytes of the IP address to load
1678  * @src_addr_2: the second 4 bytes of the IP address to load
1679  * @src_addr_3: the third 4 bytes of the IP address to load
1680  * @src_addr_4: the fourth 4 bytes of the IP address to load
1681  */
1682 s32
1683 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
1684     u32 *src_addr_1, u32 *src_addr_2, u32 *src_addr_3, u32 *src_addr_4)
1685 {
1686 	DEBUGFUNC("ixgbe_atr_get_src_ipv6_82599");
1687 
1688 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
1689 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
1690 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
1691 	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
1692 
1693 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
1694 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
1695 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
1696 	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
1697 
1698 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
1699 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
1700 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
1701 	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
1702 
1703 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
1704 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
1705 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
1706 	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
1707 
1708 	return (IXGBE_SUCCESS);
1709 }
1710 
1711 /*
1712  * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
1713  * @input: input stream to search
1714  * @dst_addr_1: the first 4 bytes of the IP address to load
1715  * @dst_addr_2: the second 4 bytes of the IP address to load
1716  * @dst_addr_3: the third 4 bytes of the IP address to load
1717  * @dst_addr_4: the fourth 4 bytes of the IP address to load
1718  */
1719 s32
1720 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
1721     u32 *dst_addr_1, u32 *dst_addr_2, u32 *dst_addr_3, u32 *dst_addr_4)
1722 {
1723 	DEBUGFUNC("ixgbe_atr_get_dst_ipv6_82599");
1724 
1725 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
1726 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
1727 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
1728 	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
1729 
1730 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
1731 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
1732 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
1733 	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
1734 
1735 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
1736 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
1737 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
1738 	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
1739 
1740 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
1741 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
1742 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
1743 	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
1744 
1745 	return (IXGBE_SUCCESS);
1746 }
1747 
1748 /*
1749  * ixgbe_atr_get_src_port_82599 - Gets the source port
1750  * @input: input stream to modify
1751  * @src_port: the source port to load
1752  *
1753  * Even though the input is given in big-endian, the FDIRPORT registers
1754  * expect the ports to be programmed in little-endian.  Hence the need to swap
1755  * endianness when retrieving the data.  This can be confusing since the
1756  * internal hash engine expects it to be big-endian.
1757  */
1758 s32
1759 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
1760 {
1761 	DEBUGFUNC("ixgbe_atr_get_src_port_82599");
1762 
1763 	*src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
1764 	*src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
1765 
1766 	return (IXGBE_SUCCESS);
1767 }
1768 
1769 /*
1770  * ixgbe_atr_get_dst_port_82599 - Gets the destination port
1771  * @input: input stream to modify
1772  * @dst_port: the destination port to load
1773  *
1774  * Even though the input is given in big-endian, the FDIRPORT registers
1775  * expect the ports to be programmed in little-endian.  Hence the need to swap
1776  * endianness when retrieving the data.  This can be confusing since the
1777  * internal hash engine expects it to be big-endian.
1778  */
1779 s32
1780 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
1781 {
1782 	DEBUGFUNC("ixgbe_atr_get_dst_port_82599");
1783 
1784 	*dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
1785 	*dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
1786 
1787 	return (IXGBE_SUCCESS);
1788 }
1789 
1790 /*
1791  * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
1792  * @input: input stream to modify
1793  * @flex_bytes: the flexible bytes to load
1794  */
1795 s32
1796 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte)
1797 {
1798 	DEBUGFUNC("ixgbe_atr_get_flex_byte_82599");
1799 
1800 	*flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
1801 	*flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
1802 
1803 	return (IXGBE_SUCCESS);
1804 }
1805 
1806 /*
1807  * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
1808  * @input: input stream to modify
1809  * @vm_pool: the Virtual Machine pool to load
1810  */
1811 s32
1812 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
1813 {
1814 	DEBUGFUNC("ixgbe_atr_get_vm_pool_82599");
1815 
1816 	*vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
1817 
1818 	return (IXGBE_SUCCESS);
1819 }
1820 
1821 /*
1822  * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
1823  * @input: input stream to modify
1824  * @l4type: the layer 4 type value to load
1825  */
1826 s32
1827 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
1828 {
1829 	DEBUGFUNC("ixgbe_atr_get_l4type__82599");
1830 
1831 	*l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
1832 
1833 	return (IXGBE_SUCCESS);
1834 }
1835 
1836 /*
1837  * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1838  * @hw: pointer to hardware structure
1839  * @stream: input bitstream
1840  * @queue: queue index to direct traffic to
1841  */
1842 s32
1843 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1844     struct ixgbe_atr_input *input, u8 queue)
1845 {
1846 	u64  fdirhashcmd;
1847 	u64  fdircmd;
1848 	u32  fdirhash;
1849 	u16  bucket_hash, sig_hash;
1850 	u8   l4type;
1851 
1852 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1853 
1854 	bucket_hash = ixgbe_atr_compute_hash_82599(input,
1855 	    IXGBE_ATR_BUCKET_HASH_KEY);
1856 
1857 	/* bucket_hash is only 15 bits */
1858 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1859 
1860 	sig_hash = ixgbe_atr_compute_hash_82599(input,
1861 	    IXGBE_ATR_SIGNATURE_HASH_KEY);
1862 
1863 	/* Get the l4type in order to program FDIRCMD properly */
1864 	/* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
1865 	(void) ixgbe_atr_get_l4type_82599(input, &l4type);
1866 
1867 	/*
1868 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1869 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1870 	 */
1871 	fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1872 
1873 	fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1874 	    IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
1875 
1876 	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1877 	case IXGBE_ATR_L4TYPE_TCP:
1878 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1879 		break;
1880 	case IXGBE_ATR_L4TYPE_UDP:
1881 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1882 		break;
1883 	case IXGBE_ATR_L4TYPE_SCTP:
1884 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1885 		break;
1886 	default:
1887 		DEBUGOUT(" Error on l4type input\n");
1888 		return (IXGBE_ERR_CONFIG);
1889 	}
1890 
1891 	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
1892 		fdircmd |= IXGBE_FDIRCMD_IPV6;
1893 
1894 	fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
1895 	fdirhashcmd = ((fdircmd << 32) | fdirhash);
1896 
1897 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF);
1898 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1899 
1900 	return (IXGBE_SUCCESS);
1901 }
1902 
1903 /*
1904  * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1905  * @hw: pointer to hardware structure
1906  * @input: input bitstream
1907  * @queue: queue index to direct traffic to
1908  *
1909  * Note that the caller to this function must lock before calling, since the
1910  * hardware writes must be protected from one another.
1911  */
1912 s32
1913 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1914     struct ixgbe_atr_input *input, u16 soft_id, u8 queue)
1915 {
1916 	u32 fdircmd = 0;
1917 	u32 fdirhash;
1918 	u32 src_ipv4, dst_ipv4;
1919 	u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
1920 	u16 src_port, dst_port, vlan_id, flex_bytes;
1921 	u16 bucket_hash;
1922 	u8  l4type;
1923 
1924 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1925 
1926 	/* Get our input values */
1927 	(void) ixgbe_atr_get_l4type_82599(input, &l4type);
1928 
1929 	/*
1930 	 * Check l4type formatting, and bail out before we touch the hardware
1931 	 * if there's a configuration issue
1932 	 */
1933 	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1934 	case IXGBE_ATR_L4TYPE_TCP:
1935 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1936 		break;
1937 	case IXGBE_ATR_L4TYPE_UDP:
1938 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1939 		break;
1940 	case IXGBE_ATR_L4TYPE_SCTP:
1941 		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1942 		break;
1943 	default:
1944 		DEBUGOUT(" Error on l4type input\n");
1945 		return (IXGBE_ERR_CONFIG);
1946 	}
1947 
1948 	bucket_hash = ixgbe_atr_compute_hash_82599(input,
1949 	    IXGBE_ATR_BUCKET_HASH_KEY);
1950 
1951 	/* bucket_hash is only 15 bits */
1952 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1953 
1954 	(void) ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
1955 	(void) ixgbe_atr_get_src_port_82599(input, &src_port);
1956 	(void) ixgbe_atr_get_dst_port_82599(input, &dst_port);
1957 	(void) ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
1958 
1959 	fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1960 
1961 	/* Now figure out if we're IPv4 or IPv6 */
1962 	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
1963 		/* IPv6 */
1964 		(void) ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1,
1965 		    &src_ipv6_2, &src_ipv6_3, &src_ipv6_4);
1966 
1967 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
1968 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
1969 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
1970 		/* The last 4 bytes is the same register as IPv4 */
1971 		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
1972 
1973 		fdircmd |= IXGBE_FDIRCMD_IPV6;
1974 		fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
1975 	} else {
1976 		/* IPv4 */
1977 		(void) ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
1978 		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
1979 
1980 	}
1981 
1982 	(void) ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
1983 	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
1984 
1985 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
1986 	    (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
1987 	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
1988 	    (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
1989 
1990 	fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
1991 	fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
1992 	fdircmd |= IXGBE_FDIRCMD_LAST;
1993 	fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
1994 	fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1995 
1996 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1997 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1998 
1999 	return (IXGBE_SUCCESS);
2000 }
2001 
2002 /*
2003  * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2004  * @hw: pointer to hardware structure
2005  * @reg: analog register to read
2006  * @val: read value
2007  *
2008  * Performs read operation to Omer analog register specified.
2009  */
2010 s32
2011 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2012 {
2013 	u32  core_ctl;
2014 
2015 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2016 
2017 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2018 	    (reg << 8));
2019 	IXGBE_WRITE_FLUSH(hw);
2020 	usec_delay(10);
2021 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2022 	*val = (u8)core_ctl;
2023 
2024 	return (IXGBE_SUCCESS);
2025 }
2026 
2027 /*
2028  * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2029  * @hw: pointer to hardware structure
2030  * @reg: atlas register to write
2031  * @val: value to write
2032  *
2033  * Performs write operation to Omer analog register specified.
2034  */
2035 s32
2036 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2037 {
2038 	u32  core_ctl;
2039 
2040 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2041 
2042 	core_ctl = (reg << 8) | val;
2043 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2044 	IXGBE_WRITE_FLUSH(hw);
2045 	usec_delay(10);
2046 
2047 	return (IXGBE_SUCCESS);
2048 }
2049 
2050 /*
2051  * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
2052  * @hw: pointer to hardware structure
2053  *
2054  * Starts the hardware using the generic start_hw function.
2055  * Then performs revision-specific operations:
2056  * Clears the rate limiter registers.
2057  */
2058 s32
2059 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
2060 {
2061 	u32 i;
2062 	s32 ret_val = IXGBE_SUCCESS;
2063 
2064 	DEBUGFUNC("ixgbe_start_hw_rev_1__82599");
2065 
2066 	ret_val = ixgbe_start_hw_generic(hw);
2067 
2068 	/* Clear the rate limiters */
2069 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
2070 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
2071 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
2072 	}
2073 	IXGBE_WRITE_FLUSH(hw);
2074 
2075 	/* We need to run link autotry after the driver loads */
2076 	hw->mac.autotry_restart = true;
2077 
2078 	if (ret_val == IXGBE_SUCCESS)
2079 		ret_val = ixgbe_verify_fw_version_82599(hw);
2080 
2081 	return (ret_val);
2082 }
2083 
2084 /*
2085  * ixgbe_identify_phy_82599 - Get physical layer module
2086  * @hw: pointer to hardware structure
2087  *
2088  * Determines the physical layer module found on the current adapter.
2089  * If PHY already detected, maintains current PHY type in hw struct,
2090  * otherwise executes the PHY detection routine.
2091  */
2092 s32
2093 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2094 {
2095 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2096 
2097 	DEBUGFUNC("ixgbe_identify_phy_82599");
2098 
2099 	/* Detect PHY if not unknown - returns success if already detected. */
2100 	status = ixgbe_identify_phy_generic(hw);
2101 	if (status != IXGBE_SUCCESS)
2102 		status = ixgbe_identify_sfp_module_generic(hw);
2103 	/* Set PHY type none if no PHY detected */
2104 	if (hw->phy.type == ixgbe_phy_unknown) {
2105 		hw->phy.type = ixgbe_phy_none;
2106 		status = IXGBE_SUCCESS;
2107 	}
2108 
2109 	/* Return error if SFP module has been detected but is not supported */
2110 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2111 		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2112 
2113 	return (status);
2114 }
2115 
2116 /*
2117  * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2118  * @hw: pointer to hardware structure
2119  *
2120  * Determines physical layer capabilities of the current configuration.
2121  */
2122 u32
2123 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2124 {
2125 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2126 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2127 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2128 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2129 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2130 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2131 	u16 ext_ability = 0;
2132 	u8 comp_codes_10g = 0;
2133 
2134 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2135 
2136 	hw->phy.ops.identify(hw);
2137 
2138 	if (hw->phy.type == ixgbe_phy_tn ||
2139 	    hw->phy.type == ixgbe_phy_aq ||
2140 	    hw->phy.type == ixgbe_phy_cu_unknown) {
2141 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2142 		    IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2143 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2144 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2145 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2146 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2147 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2148 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2149 		goto out;
2150 	}
2151 
2152 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2153 	case IXGBE_AUTOC_LMS_1G_AN:
2154 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2155 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2156 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2157 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2158 			goto out;
2159 		} else {
2160 			/* SFI mode so read SFP module */
2161 			goto sfp_check;
2162 		}
2163 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2164 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2165 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2166 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2167 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2168 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2169 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2170 		goto out;
2171 	case IXGBE_AUTOC_LMS_10G_SERIAL:
2172 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2173 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2174 			goto out;
2175 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2176 			goto sfp_check;
2177 		break;
2178 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
2179 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2180 		if (autoc & IXGBE_AUTOC_KX_SUPP)
2181 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2182 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
2183 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2184 		if (autoc & IXGBE_AUTOC_KR_SUPP)
2185 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2186 		goto out;
2187 	default:
2188 		goto out;
2189 	}
2190 
2191 sfp_check:
2192 	/*
2193 	 * SFP check must be done last since DA modules are sometimes used to
2194 	 * test KR mode -  we need to id KR mode correctly before SFP module.
2195 	 * Call identify_sfp because the pluggable module may have changed
2196 	 */
2197 	hw->phy.ops.identify_sfp(hw);
2198 	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2199 		goto out;
2200 
2201 	switch (hw->phy.type) {
2202 	case ixgbe_phy_tw_tyco:
2203 	case ixgbe_phy_tw_unknown:
2204 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2205 		break;
2206 	case ixgbe_phy_sfp_avago:
2207 	case ixgbe_phy_sfp_ftl:
2208 	case ixgbe_phy_sfp_intel:
2209 	case ixgbe_phy_sfp_unknown:
2210 		hw->phy.ops.read_i2c_eeprom(hw,
2211 		    IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2212 		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2213 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2214 		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2215 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2216 		break;
2217 	default:
2218 		break;
2219 	}
2220 
2221 out:
2222 	return (physical_layer);
2223 }
2224 
2225 /*
2226  * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2227  * @hw: pointer to hardware structure
2228  * @regval: register value to write to RXCTRL
2229  *
2230  * Enables the Rx DMA unit for 82599
2231  */
2232 s32
2233 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2234 {
2235 #define	IXGBE_MAX_SECRX_POLL	30
2236 	int i;
2237 	int secrxreg;
2238 
2239 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2240 
2241 	/*
2242 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2243 	 * If traffic is incoming before we enable the Rx unit, it could hang
2244 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2245 	 * completely disabled prior to enabling the Rx unit.
2246 	 */
2247 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2248 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2249 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2250 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2251 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2252 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2253 			break;
2254 		else
2255 			/* Use interrupt-safe sleep just in case */
2256 			usec_delay(10);
2257 	}
2258 
2259 	/* For informational purposes only */
2260 	if (i >= IXGBE_MAX_SECRX_POLL)
2261 		DEBUGOUT("Rx unit being enabled before security "
2262 		    "path fully disabled.	Continuing with init.\n");
2263 
2264 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2265 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2266 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2267 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2268 	IXGBE_WRITE_FLUSH(hw);
2269 
2270 	return (IXGBE_SUCCESS);
2271 }
2272 
2273 /*
2274  * ixgbe_get_device_caps_82599 - Get additional device capabilities
2275  * @hw: pointer to hardware structure
2276  * @device_caps: the EEPROM word with the extra device capabilities
2277  *
2278  * This function will read the EEPROM location for the device capabilities,
2279  * and return the word through device_caps.
2280  */
2281 s32
2282 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
2283 {
2284 	DEBUGFUNC("ixgbe_get_device_caps_82599");
2285 
2286 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
2287 
2288 	return (IXGBE_SUCCESS);
2289 }
2290 
2291 /*
2292  * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2293  * @hw: pointer to hardware structure
2294  *
2295  * Verifies that installed the firmware version is 0.6 or higher
2296  * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2297  *
2298  * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2299  * if the FW version is not supported.
2300  */
2301 static s32
2302 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2303 {
2304 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2305 	u16 fw_offset, fw_ptp_cfg_offset;
2306 	u16 fw_version = 0;
2307 
2308 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
2309 
2310 	/* firmware check is only necessary for SFI devices */
2311 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
2312 		status = IXGBE_SUCCESS;
2313 		goto fw_version_out;
2314 	}
2315 
2316 	/* get the offset to the Firmware Module block */
2317 	hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2318 
2319 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2320 		goto fw_version_out;
2321 
2322 	/* get the offset to the Pass Through Patch Configuration block */
2323 	hw->eeprom.ops.read(hw, (fw_offset +
2324 	    IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), &fw_ptp_cfg_offset);
2325 
2326 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2327 		goto fw_version_out;
2328 
2329 	/* get the firmware version */
2330 	hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4),
2331 	    &fw_version);
2332 
2333 	if (fw_version > 0x5)
2334 		status = IXGBE_SUCCESS;
2335 
2336 fw_version_out:
2337 	return (status);
2338 }
2339