xref: /titanic_41/usr/src/uts/common/io/ixgbe/ixgbe_82598.c (revision 45e662eb8429b38c18931ebeed30f2e5287ae51b)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2010, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_api.h"
37 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h"
39 
40 u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
41 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
42 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
43                                              ixgbe_link_speed *speed,
44                                              bool *autoneg);
45 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
46 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
47 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
48 					bool autoneg_wait_to_complete);
49 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
50                                       ixgbe_link_speed *speed, bool *link_up,
51                                       bool link_up_wait_to_complete);
52 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
53                                             ixgbe_link_speed speed,
54                                             bool autoneg,
55                                             bool autoneg_wait_to_complete);
56 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
57                                                ixgbe_link_speed speed,
58                                                bool autoneg,
59                                                bool autoneg_wait_to_complete);
60 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
61 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
62 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
63 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
64 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
65 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
66                          u32 vind, bool vlan_on);
67 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
68 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
69 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
70 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
71                                 u8 *eeprom_data);
72 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
73 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
74 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
75 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
76 
77 /**
78  *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
79  *  @hw: pointer to the HW structure
80  *
81  *  The defaults for 82598 should be in the range of 50us to 50ms,
82  *  however the hardware default for these parts is 500us to 1ms which is less
83  *  than the 10ms recommended by the pci-e spec.  To address this we need to
84  *  increase the value to either 10ms to 250ms for capability version 1 config,
85  *  or 16ms to 55ms for version 2.
86  **/
87 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
88 {
89 	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
90 	u16 pcie_devctl2;
91 
92 	/* only take action if timeout value is defaulted to 0 */
93 	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
94 		goto out;
95 
96 	/*
97 	 * if capababilities version is type 1 we can write the
98 	 * timeout of 10ms to 250ms through the GCR register
99 	 */
100 	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
101 		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
102 		goto out;
103 	}
104 
105 	/*
106 	 * for version 2 capabilities we need to write the config space
107 	 * directly in order to set the completion timeout value for
108 	 * 16ms to 55ms
109 	 */
110 	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
111 	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
112 	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
113 out:
114 	/* disable completion timeout resend */
115 	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
116 	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
117 }
118 
119 /**
120  *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
121  *  @hw: pointer to hardware structure
122  *
123  *  Read PCIe configuration space, and get the MSI-X vector count from
124  *  the capabilities table.
125  **/
126 u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
127 {
128 	u32 msix_count = 18;
129 
130 	DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
131 
132 	if (hw->mac.msix_vectors_from_pcie) {
133 		msix_count = IXGBE_READ_PCIE_WORD(hw,
134 		                                  IXGBE_PCIE_MSIX_82598_CAPS);
135 		msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
136 
137 		/* MSI-X count is zero-based in HW, so increment to give
138 		 * proper value */
139 		msix_count++;
140 	}
141 	return msix_count;
142 }
143 
144 /**
145  *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
146  *  @hw: pointer to hardware structure
147  *
148  *  Initialize the function pointers and assign the MAC type for 82598.
149  *  Does not touch the hardware.
150  **/
151 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
152 {
153 	struct ixgbe_mac_info *mac = &hw->mac;
154 	struct ixgbe_phy_info *phy = &hw->phy;
155 	s32 ret_val;
156 
157 	DEBUGFUNC("ixgbe_init_ops_82598");
158 
159 	ret_val = ixgbe_init_phy_ops_generic(hw);
160 	ret_val = ixgbe_init_ops_generic(hw);
161 
162 	/* PHY */
163 	phy->ops.init = &ixgbe_init_phy_ops_82598;
164 
165 	/* MAC */
166 	mac->ops.start_hw = &ixgbe_start_hw_82598;
167 	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
168 	mac->ops.reset_hw = &ixgbe_reset_hw_82598;
169 	mac->ops.get_media_type = &ixgbe_get_media_type_82598;
170 	mac->ops.get_supported_physical_layer =
171 	                            &ixgbe_get_supported_physical_layer_82598;
172 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
173 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
174 	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
175 
176 	/* RAR, Multicast, VLAN */
177 	mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
178 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
179 	mac->ops.set_vfta = &ixgbe_set_vfta_82598;
180 	mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
181 
182 	/* Flow Control */
183 	mac->ops.fc_enable = &ixgbe_fc_enable_82598;
184 
185 	mac->mcft_size       = 128;
186 	mac->vft_size        = 128;
187 	mac->num_rar_entries = 16;
188 	mac->rx_pb_size      = 512;
189 	mac->max_tx_queues   = 32;
190 	mac->max_rx_queues   = 64;
191 	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
192 
193 	/* SFP+ Module */
194 	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
195 
196 	/* Link */
197 	mac->ops.check_link = &ixgbe_check_mac_link_82598;
198 	mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
199 	mac->ops.flap_tx_laser = NULL;
200 	mac->ops.get_link_capabilities =
201 	                       &ixgbe_get_link_capabilities_82598;
202 
203 	return ret_val;
204 }
205 
206 /**
207  *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
208  *  @hw: pointer to hardware structure
209  *
210  *  Initialize any function pointers that were not able to be
211  *  set during init_shared_code because the PHY/SFP type was
212  *  not known.  Perform the SFP init if necessary.
213  *
214  **/
215 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
216 {
217 	struct ixgbe_mac_info *mac = &hw->mac;
218 	struct ixgbe_phy_info *phy = &hw->phy;
219 	s32 ret_val = IXGBE_SUCCESS;
220 	u16 list_offset, data_offset;
221 
222 	DEBUGFUNC("ixgbe_init_phy_ops_82598");
223 
224 	/* Identify the PHY */
225 	phy->ops.identify(hw);
226 
227 	/* Overwrite the link function pointers if copper PHY */
228 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
229 		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
230 		mac->ops.get_link_capabilities =
231 		                  &ixgbe_get_copper_link_capabilities_generic;
232 	}
233 
234 	switch (hw->phy.type) {
235 	case ixgbe_phy_tn:
236 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
237 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
238 		phy->ops.get_firmware_version =
239 		             &ixgbe_get_phy_firmware_version_tnx;
240 		break;
241 	case ixgbe_phy_aq:
242 		phy->ops.get_firmware_version =
243 		             &ixgbe_get_phy_firmware_version_generic;
244 		break;
245 	case ixgbe_phy_nl:
246 		phy->ops.reset = &ixgbe_reset_phy_nl;
247 
248 		/* Call SFP+ identify routine to get the SFP+ module type */
249 		ret_val = phy->ops.identify_sfp(hw);
250 		if (ret_val != IXGBE_SUCCESS)
251 			goto out;
252 		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
253 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
254 			goto out;
255 		}
256 
257 		/* Check to see if SFP+ module is supported */
258 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
259 		                                            &list_offset,
260 		                                            &data_offset);
261 		if (ret_val != IXGBE_SUCCESS) {
262 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
263 			goto out;
264 		}
265 		break;
266 	default:
267 		break;
268 	}
269 
270 out:
271 	return ret_val;
272 }
273 
274 /**
275  *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
276  *  @hw: pointer to hardware structure
277  *
278  *  Starts the hardware using the generic start_hw function.
279  *  Disables relaxed ordering Then set pcie completion timeout
280  *
281  **/
282 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
283 {
284 	u32 regval;
285 	u32 i;
286 	s32 ret_val = IXGBE_SUCCESS;
287 
288 	DEBUGFUNC("ixgbe_start_hw_82598");
289 
290 	ret_val = ixgbe_start_hw_generic(hw);
291 
292 	/* Disable relaxed ordering */
293 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
294 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
295 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
296 		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
297 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
298 	}
299 
300 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
301 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
302 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
303 		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
304 		            IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
305 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
306 	}
307 
308 	/* set the completion timeout for interface */
309 	if (ret_val == IXGBE_SUCCESS)
310 		ixgbe_set_pcie_completion_timeout(hw);
311 
312 	return ret_val;
313 }
314 
315 /**
316  *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
317  *  @hw: pointer to hardware structure
318  *  @speed: pointer to link speed
319  *  @autoneg: boolean auto-negotiation value
320  *
321  *  Determines the link capabilities by reading the AUTOC register.
322  **/
323 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
324                                              ixgbe_link_speed *speed,
325                                              bool *autoneg)
326 {
327 	s32 status = IXGBE_SUCCESS;
328 	u32 autoc = 0;
329 
330 	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
331 
332 	/*
333 	 * Determine link capabilities based on the stored value of AUTOC,
334 	 * which represents EEPROM defaults.  If AUTOC value has not been
335 	 * stored, use the current register value.
336 	 */
337 	if (hw->mac.orig_link_settings_stored)
338 		autoc = hw->mac.orig_autoc;
339 	else
340 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
341 
342 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
343 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
344 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
345 		*autoneg = FALSE;
346 		break;
347 
348 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
349 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
350 		*autoneg = FALSE;
351 		break;
352 
353 	case IXGBE_AUTOC_LMS_1G_AN:
354 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
355 		*autoneg = TRUE;
356 		break;
357 
358 	case IXGBE_AUTOC_LMS_KX4_AN:
359 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
360 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
361 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
362 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
363 		if (autoc & IXGBE_AUTOC_KX_SUPP)
364 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
365 		*autoneg = TRUE;
366 		break;
367 
368 	default:
369 		status = IXGBE_ERR_LINK_SETUP;
370 		break;
371 	}
372 
373 	return status;
374 }
375 
376 /**
377  *  ixgbe_get_media_type_82598 - Determines media type
378  *  @hw: pointer to hardware structure
379  *
380  *  Returns the media type (fiber, copper, backplane)
381  **/
382 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
383 {
384 	enum ixgbe_media_type media_type;
385 
386 	DEBUGFUNC("ixgbe_get_media_type_82598");
387 
388 	/* Detect if there is a copper PHY attached. */
389 	switch (hw->phy.type) {
390 	case ixgbe_phy_cu_unknown:
391 	case ixgbe_phy_tn:
392 	case ixgbe_phy_aq:
393 		media_type = ixgbe_media_type_copper;
394 		goto out;
395 	default:
396 		break;
397 	}
398 
399 	/* Media type for I82598 is based on device ID */
400 	switch (hw->device_id) {
401 	case IXGBE_DEV_ID_82598:
402 	case IXGBE_DEV_ID_82598_BX:
403 		/* Default device ID is mezzanine card KX/KX4 */
404 		media_type = ixgbe_media_type_backplane;
405 		break;
406 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
407 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
408 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
409 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
410 	case IXGBE_DEV_ID_82598EB_XF_LR:
411 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
412 		media_type = ixgbe_media_type_fiber;
413 		break;
414 	case IXGBE_DEV_ID_82598EB_CX4:
415 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
416 		media_type = ixgbe_media_type_cx4;
417 		break;
418 	case IXGBE_DEV_ID_82598AT:
419 	case IXGBE_DEV_ID_82598AT2:
420 		media_type = ixgbe_media_type_copper;
421 		break;
422 	default:
423 		media_type = ixgbe_media_type_unknown;
424 		break;
425 	}
426 out:
427 	return media_type;
428 }
429 
430 /**
431  *  ixgbe_fc_enable_82598 - Enable flow control
432  *  @hw: pointer to hardware structure
433  *  @packetbuf_num: packet buffer number (0-7)
434  *
435  *  Enable flow control according to the current settings.
436  **/
437 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
438 {
439 	s32 ret_val = IXGBE_SUCCESS;
440 	u32 fctrl_reg;
441 	u32 rmcs_reg;
442 	u32 reg;
443 	u32 rx_pba_size;
444 	u32 link_speed = 0;
445 	bool link_up;
446 
447 	DEBUGFUNC("ixgbe_fc_enable_82598");
448 
449 	/*
450 	 * On 82598 having Rx FC on causes resets while doing 1G
451 	 * so if it's on turn it off once we know link_speed. For
452 	 * more details see 82598 Specification update.
453 	 */
454 	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
455 	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
456 		switch (hw->fc.requested_mode) {
457 		case ixgbe_fc_full:
458 			hw->fc.requested_mode = ixgbe_fc_tx_pause;
459 			break;
460 		case ixgbe_fc_rx_pause:
461 			hw->fc.requested_mode = ixgbe_fc_none;
462 			break;
463 		default:
464 			/* no change */
465 			break;
466 		}
467 	}
468 
469 	/* Negotiate the fc mode to use */
470 	ret_val = ixgbe_fc_autoneg(hw);
471 	if (ret_val == IXGBE_ERR_FLOW_CONTROL)
472 		goto out;
473 
474 	/* Disable any previous flow control settings */
475 	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
476 	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
477 
478 	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
479 	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
480 
481 	/*
482 	 * The possible values of fc.current_mode are:
483 	 * 0: Flow control is completely disabled
484 	 * 1: Rx flow control is enabled (we can receive pause frames,
485 	 *    but not send pause frames).
486 	 * 2: Tx flow control is enabled (we can send pause frames but
487 	 *     we do not support receiving pause frames).
488 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
489 	 * other: Invalid.
490 	 */
491 	switch (hw->fc.current_mode) {
492 	case ixgbe_fc_none:
493 		/*
494 		 * Flow control is disabled by software override or autoneg.
495 		 * The code below will actually disable it in the HW.
496 		 */
497 		break;
498 	case ixgbe_fc_rx_pause:
499 		/*
500 		 * Rx Flow control is enabled and Tx Flow control is
501 		 * disabled by software override. Since there really
502 		 * isn't a way to advertise that we are capable of RX
503 		 * Pause ONLY, we will advertise that we support both
504 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
505 		 * disable the adapter's ability to send PAUSE frames.
506 		 */
507 		fctrl_reg |= IXGBE_FCTRL_RFCE;
508 		break;
509 	case ixgbe_fc_tx_pause:
510 		/*
511 		 * Tx Flow control is enabled, and Rx Flow control is
512 		 * disabled by software override.
513 		 */
514 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
515 		break;
516 	case ixgbe_fc_full:
517 		/* Flow control (both Rx and Tx) is enabled by SW override. */
518 		fctrl_reg |= IXGBE_FCTRL_RFCE;
519 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
520 		break;
521 	default:
522 		DEBUGOUT("Flow control param set incorrectly\n");
523 		ret_val = IXGBE_ERR_CONFIG;
524 		goto out;
525 	}
526 
527 	/* Set 802.3x based flow control settings. */
528 	fctrl_reg |= IXGBE_FCTRL_DPF;
529 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
530 	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
531 
532 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
533 	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
534 		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
535 		rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
536 
537 		reg = (rx_pba_size - hw->fc.low_water) << 6;
538 		if (hw->fc.send_xon)
539 			reg |= IXGBE_FCRTL_XONE;
540 
541 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
542 
543 		reg = (rx_pba_size - hw->fc.high_water) << 6;
544 		reg |= IXGBE_FCRTH_FCEN;
545 
546 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
547 	}
548 
549 	/* Configure pause time (2 TCs per register) */
550 	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
551 	if ((packetbuf_num & 1) == 0)
552 		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
553 	else
554 		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
555 	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
556 
557 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
558 
559 out:
560 	return ret_val;
561 }
562 
563 /**
564  *  ixgbe_start_mac_link_82598 - Configures MAC link settings
565  *  @hw: pointer to hardware structure
566  *
567  *  Configures link settings based on values in the ixgbe_hw struct.
568  *  Restarts the link.  Performs autonegotiation if needed.
569  **/
570 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
571                                       bool autoneg_wait_to_complete)
572 {
573 	u32 autoc_reg;
574 	u32 links_reg;
575 	u32 i;
576 	s32 status = IXGBE_SUCCESS;
577 
578 	DEBUGFUNC("ixgbe_start_mac_link_82598");
579 
580 	/* Restart link */
581 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
582 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
583 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
584 
585 	/* Only poll for autoneg to complete if specified to do so */
586 	if (autoneg_wait_to_complete) {
587 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
588 		     IXGBE_AUTOC_LMS_KX4_AN ||
589 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
590 		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
591 			links_reg = 0; /* Just in case Autoneg time = 0 */
592 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
593 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
594 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
595 					break;
596 				msec_delay(100);
597 			}
598 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
599 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
600 				DEBUGOUT("Autonegotiation did not complete.\n");
601 			}
602 		}
603 	}
604 
605 	/* Add delay to filter out noises during initial link setup */
606 	msec_delay(50);
607 
608 	return status;
609 }
610 
611 /**
612  *  ixgbe_validate_link_ready - Function looks for phy link
613  *  @hw: pointer to hardware structure
614  *
615  *  Function indicates success when phy link is available. If phy is not ready
616  *  within 5 seconds of MAC indicating link, the function returns error.
617  **/
618 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
619 {
620 	u32 timeout;
621 	u16 an_reg;
622 
623 	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
624 		return IXGBE_SUCCESS;
625 
626 	for (timeout = 0;
627 	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
628 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
629 		                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
630 
631 		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
632 		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
633 			break;
634 
635 		msec_delay(100);
636 	}
637 
638 	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
639 		DEBUGOUT("Link was indicated but link is down\n");
640 		return IXGBE_ERR_LINK_SETUP;
641 	}
642 
643 	return IXGBE_SUCCESS;
644 }
645 
646 /**
647  *  ixgbe_check_mac_link_82598 - Get link/speed status
648  *  @hw: pointer to hardware structure
649  *  @speed: pointer to link speed
650  *  @link_up: TRUE is link is up, FALSE otherwise
651  *  @link_up_wait_to_complete: bool used to wait for link up or not
652  *
653  *  Reads the links register to determine if link is up and the current speed
654  **/
655 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
656                                       ixgbe_link_speed *speed, bool *link_up,
657                                       bool link_up_wait_to_complete)
658 {
659 	u32 links_reg;
660 	u32 i;
661 	u16 link_reg, adapt_comp_reg;
662 
663 	DEBUGFUNC("ixgbe_check_mac_link_82598");
664 
665 	/*
666 	 * SERDES PHY requires us to read link status from undocumented
667 	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
668 	 * indicates link down.  OxC00C is read to check that the XAUI lanes
669 	 * are active.  Bit 0 clear indicates active; set indicates inactive.
670 	 */
671 	if (hw->phy.type == ixgbe_phy_nl) {
672 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
673 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
674 		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
675 		                     &adapt_comp_reg);
676 		if (link_up_wait_to_complete) {
677 			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
678 				if ((link_reg & 1) &&
679 				    ((adapt_comp_reg & 1) == 0)) {
680 					*link_up = TRUE;
681 					break;
682 				} else {
683 					*link_up = FALSE;
684 				}
685 				msec_delay(100);
686 				hw->phy.ops.read_reg(hw, 0xC79F,
687 				                     IXGBE_TWINAX_DEV,
688 				                     &link_reg);
689 				hw->phy.ops.read_reg(hw, 0xC00C,
690 				                     IXGBE_TWINAX_DEV,
691 				                     &adapt_comp_reg);
692 			}
693 		} else {
694 			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
695 				*link_up = TRUE;
696 			else
697 				*link_up = FALSE;
698 		}
699 
700 		if (*link_up == FALSE)
701 			goto out;
702 	}
703 
704 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
705 	if (link_up_wait_to_complete) {
706 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
707 			if (links_reg & IXGBE_LINKS_UP) {
708 				*link_up = TRUE;
709 				break;
710 			} else {
711 				*link_up = FALSE;
712 			}
713 			msec_delay(100);
714 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
715 		}
716 	} else {
717 		if (links_reg & IXGBE_LINKS_UP)
718 			*link_up = TRUE;
719 		else
720 			*link_up = FALSE;
721 	}
722 
723 	if (links_reg & IXGBE_LINKS_SPEED)
724 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
725 	else
726 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
727 
728 	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
729 	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
730 		*link_up = FALSE;
731 
732 	/* if link is down, zero out the current_mode */
733 	if (*link_up == FALSE) {
734 		hw->fc.current_mode = ixgbe_fc_none;
735 		hw->fc.fc_was_autonegged = FALSE;
736 	}
737 out:
738 	return IXGBE_SUCCESS;
739 }
740 
741 /**
742  *  ixgbe_setup_mac_link_82598 - Set MAC link speed
743  *  @hw: pointer to hardware structure
744  *  @speed: new link speed
745  *  @autoneg: TRUE if autonegotiation enabled
746  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
747  *
748  *  Set the link speed in the AUTOC register and restarts link.
749  **/
750 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
751                                            ixgbe_link_speed speed, bool autoneg,
752                                            bool autoneg_wait_to_complete)
753 {
754 	s32              status            = IXGBE_SUCCESS;
755 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
756 	u32              curr_autoc        = IXGBE_READ_REG(hw, IXGBE_AUTOC);
757 	u32              autoc             = curr_autoc;
758 	u32              link_mode         = autoc & IXGBE_AUTOC_LMS_MASK;
759 
760 	DEBUGFUNC("ixgbe_setup_mac_link_82598");
761 
762 	/* Check to see if speed passed in is supported. */
763 	(void) ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
764 	speed &= link_capabilities;
765 
766 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
767 		status = IXGBE_ERR_LINK_SETUP;
768 
769 	/* Set KX4/KX support according to speed requested */
770 	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
771 	         link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
772 		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
773 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
774 			autoc |= IXGBE_AUTOC_KX4_SUPP;
775 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
776 			autoc |= IXGBE_AUTOC_KX_SUPP;
777 		if (autoc != curr_autoc)
778 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
779 	}
780 
781 	if (status == IXGBE_SUCCESS) {
782 		/*
783 		 * Setup and restart the link based on the new values in
784 		 * ixgbe_hw This will write the AUTOC register based on the new
785 		 * stored values
786 		 */
787 		status = ixgbe_start_mac_link_82598(hw,
788 		                                    autoneg_wait_to_complete);
789 	}
790 
791 	return status;
792 }
793 
794 
795 /**
796  *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
797  *  @hw: pointer to hardware structure
798  *  @speed: new link speed
799  *  @autoneg: TRUE if autonegotiation enabled
800  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
801  *
802  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
803  **/
804 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
805                                                ixgbe_link_speed speed,
806                                                bool autoneg,
807                                                bool autoneg_wait_to_complete)
808 {
809 	s32 status;
810 
811 	DEBUGFUNC("ixgbe_setup_copper_link_82598");
812 
813 	/* Setup the PHY according to input speed */
814 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
815 	                                      autoneg_wait_to_complete);
816 	/* Set up MAC */
817 	(void) ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
818 
819 	return status;
820 }
821 
822 /**
823  *  ixgbe_reset_hw_82598 - Performs hardware reset
824  *  @hw: pointer to hardware structure
825  *
826  *  Resets the hardware by resetting the transmit and receive units, masks and
827  *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
828  *  reset.
829  **/
830 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
831 {
832 	s32 status = IXGBE_SUCCESS;
833 	s32 phy_status = IXGBE_SUCCESS;
834 	u32 ctrl;
835 	u32 gheccr;
836 	u32 i;
837 	u32 autoc;
838 	u8  analog_val;
839 
840 	DEBUGFUNC("ixgbe_reset_hw_82598");
841 
842 	/* Call adapter stop to disable tx/rx and clear interrupts */
843 	hw->mac.ops.stop_adapter(hw);
844 
845 	/*
846 	 * Power up the Atlas Tx lanes if they are currently powered down.
847 	 * Atlas Tx lanes are powered down for MAC loopback tests, but
848 	 * they are not automatically restored on reset.
849 	 */
850 	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
851 	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
852 		/* Enable Tx Atlas so packets can be transmitted again */
853 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
854 		                             &analog_val);
855 		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
856 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
857 		                              analog_val);
858 
859 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
860 		                             &analog_val);
861 		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
862 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
863 		                              analog_val);
864 
865 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
866 		                             &analog_val);
867 		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
868 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
869 		                              analog_val);
870 
871 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
872 		                             &analog_val);
873 		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
874 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
875 		                              analog_val);
876 	}
877 
878 	/* Reset PHY */
879 	if (hw->phy.reset_disable == FALSE) {
880 		/* PHY ops must be identified and initialized prior to reset */
881 
882 		/* Init PHY and function pointers, perform SFP setup */
883 		phy_status = hw->phy.ops.init(hw);
884 		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
885 			goto reset_hw_out;
886 		else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
887 			goto no_phy_reset;
888 
889 		hw->phy.ops.reset(hw);
890 	}
891 
892 no_phy_reset:
893 	/*
894 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
895 	 * access and verify no pending requests before reset
896 	 */
897 	(void) ixgbe_disable_pcie_master(hw);
898 
899 mac_reset_top:
900 	/*
901 	 * Issue global reset to the MAC.  This needs to be a SW reset.
902 	 * If link reset is used, it might reset the MAC when mng is using it
903 	 */
904 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
905 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
906 	IXGBE_WRITE_FLUSH(hw);
907 
908 	/* Poll for reset bit to self-clear indicating reset is complete */
909 	for (i = 0; i < 10; i++) {
910 		usec_delay(1);
911 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
912 		if (!(ctrl & IXGBE_CTRL_RST))
913 			break;
914 	}
915 	if (ctrl & IXGBE_CTRL_RST) {
916 		status = IXGBE_ERR_RESET_FAILED;
917 		DEBUGOUT("Reset polling failed to complete.\n");
918 	}
919 
920 	/*
921 	 * Double resets are required for recovery from certain error
922 	 * conditions.  Between resets, it is necessary to stall to allow time
923 	 * for any pending HW events to complete.  We use 1usec since that is
924 	 * what is needed for ixgbe_disable_pcie_master().  The second reset
925 	 * then clears out any effects of those events.
926 	 */
927 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
928 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
929 		usec_delay(1);
930 		goto mac_reset_top;
931 	}
932 
933 	msec_delay(50);
934 
935 	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
936 	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
937 	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
938 
939 	/*
940 	 * Store the original AUTOC value if it has not been
941 	 * stored off yet.  Otherwise restore the stored original
942 	 * AUTOC value since the reset operation sets back to deaults.
943 	 */
944 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
945 	if (hw->mac.orig_link_settings_stored == FALSE) {
946 		hw->mac.orig_autoc = autoc;
947 		hw->mac.orig_link_settings_stored = TRUE;
948 	} else if (autoc != hw->mac.orig_autoc) {
949 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
950 	}
951 
952 	/* Store the permanent mac address */
953 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
954 
955 	/*
956 	 * Store MAC address from RAR0, clear receive address registers, and
957 	 * clear the multicast table
958 	 */
959 	hw->mac.ops.init_rx_addrs(hw);
960 
961 reset_hw_out:
962 	if (phy_status != IXGBE_SUCCESS)
963 		status = phy_status;
964 
965 	return status;
966 }
967 
968 /**
969  *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
970  *  @hw: pointer to hardware struct
971  *  @rar: receive address register index to associate with a VMDq index
972  *  @vmdq: VMDq set index
973  **/
974 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
975 {
976 	u32 rar_high;
977 	u32 rar_entries = hw->mac.num_rar_entries;
978 
979 	DEBUGFUNC("ixgbe_set_vmdq_82598");
980 
981 	/* Make sure we are using a valid rar index range */
982 	if (rar >= rar_entries) {
983 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
984 		return IXGBE_ERR_INVALID_ARGUMENT;
985 	}
986 
987 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
988 	rar_high &= ~IXGBE_RAH_VIND_MASK;
989 	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
990 	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
991 	return IXGBE_SUCCESS;
992 }
993 
994 /**
995  *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
996  *  @hw: pointer to hardware struct
997  *  @rar: receive address register index to associate with a VMDq index
998  *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
999  **/
1000 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
1001 {
1002 	u32 rar_high;
1003 	u32 rar_entries = hw->mac.num_rar_entries;
1004 
1005 	UNREFERENCED_PARAMETER(vmdq);
1006 
1007 	/* Make sure we are using a valid rar index range */
1008 	if (rar >= rar_entries) {
1009 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
1010 		return IXGBE_ERR_INVALID_ARGUMENT;
1011 	}
1012 
1013 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
1014 	if (rar_high & IXGBE_RAH_VIND_MASK) {
1015 		rar_high &= ~IXGBE_RAH_VIND_MASK;
1016 		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
1017 	}
1018 
1019 	return IXGBE_SUCCESS;
1020 }
1021 
1022 /**
1023  *  ixgbe_set_vfta_82598 - Set VLAN filter table
1024  *  @hw: pointer to hardware structure
1025  *  @vlan: VLAN id to write to VLAN filter
1026  *  @vind: VMDq output index that maps queue to VLAN id in VFTA
1027  *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
1028  *
1029  *  Turn on/off specified VLAN in the VLAN filter table.
1030  **/
1031 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1032 	                                              bool vlan_on)
1033 {
1034 	u32 regindex;
1035 	u32 bitindex;
1036 	u32 bits;
1037 	u32 vftabyte;
1038 
1039 	DEBUGFUNC("ixgbe_set_vfta_82598");
1040 
1041 	if (vlan > 4095)
1042 		return IXGBE_ERR_PARAM;
1043 
1044 	/* Determine 32-bit word position in array */
1045 	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
1046 
1047 	/* Determine the location of the (VMD) queue index */
1048 	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1049 	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1050 
1051 	/* Set the nibble for VMD queue index */
1052 	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1053 	bits &= (~(0x0F << bitindex));
1054 	bits |= (vind << bitindex);
1055 	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1056 
1057 	/* Determine the location of the bit for this VLAN id */
1058 	bitindex = vlan & 0x1F;   /* lower five bits */
1059 
1060 	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1061 	if (vlan_on)
1062 		/* Turn on this VLAN id */
1063 		bits |= (1 << bitindex);
1064 	else
1065 		/* Turn off this VLAN id */
1066 		bits &= ~(1 << bitindex);
1067 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1068 
1069 	return IXGBE_SUCCESS;
1070 }
1071 
1072 /**
1073  *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
1074  *  @hw: pointer to hardware structure
1075  *
1076  *  Clears the VLAN filer table, and the VMDq index associated with the filter
1077  **/
1078 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1079 {
1080 	u32 offset;
1081 	u32 vlanbyte;
1082 
1083 	DEBUGFUNC("ixgbe_clear_vfta_82598");
1084 
1085 	for (offset = 0; offset < hw->mac.vft_size; offset++)
1086 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1087 
1088 	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1089 		for (offset = 0; offset < hw->mac.vft_size; offset++)
1090 			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1091 			                0);
1092 
1093 	return IXGBE_SUCCESS;
1094 }
1095 
1096 /**
1097  *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1098  *  @hw: pointer to hardware structure
1099  *  @reg: analog register to read
1100  *  @val: read value
1101  *
1102  *  Performs read operation to Atlas analog register specified.
1103  **/
1104 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1105 {
1106 	u32  atlas_ctl;
1107 
1108 	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1109 
1110 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1111 	                IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1112 	IXGBE_WRITE_FLUSH(hw);
1113 	usec_delay(10);
1114 	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1115 	*val = (u8)atlas_ctl;
1116 
1117 	return IXGBE_SUCCESS;
1118 }
1119 
1120 /**
1121  *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1122  *  @hw: pointer to hardware structure
1123  *  @reg: atlas register to write
1124  *  @val: value to write
1125  *
1126  *  Performs write operation to Atlas analog register specified.
1127  **/
1128 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1129 {
1130 	u32  atlas_ctl;
1131 
1132 	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1133 
1134 	atlas_ctl = (reg << 8) | val;
1135 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1136 	IXGBE_WRITE_FLUSH(hw);
1137 	usec_delay(10);
1138 
1139 	return IXGBE_SUCCESS;
1140 }
1141 
1142 /**
1143  *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1144  *  @hw: pointer to hardware structure
1145  *  @byte_offset: EEPROM byte offset to read
1146  *  @eeprom_data: value read
1147  *
1148  *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1149  **/
1150 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1151                                 u8 *eeprom_data)
1152 {
1153 	s32 status = IXGBE_SUCCESS;
1154 	u16 sfp_addr = 0;
1155 	u16 sfp_data = 0;
1156 	u16 sfp_stat = 0;
1157 	u32 i;
1158 
1159 	DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1160 
1161 	if (hw->phy.type == ixgbe_phy_nl) {
1162 		/*
1163 		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1164 		 * 0xC30D. These registers are used to talk to the SFP+
1165 		 * module's EEPROM through the SDA/SCL (I2C) interface.
1166 		 */
1167 		sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1168 		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1169 		hw->phy.ops.write_reg(hw,
1170 		                      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1171 		                      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1172 		                      sfp_addr);
1173 
1174 		/* Poll status */
1175 		for (i = 0; i < 100; i++) {
1176 			hw->phy.ops.read_reg(hw,
1177 			                     IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1178 			                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1179 			                     &sfp_stat);
1180 			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1181 			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1182 				break;
1183 			msec_delay(10);
1184 		}
1185 
1186 		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1187 			DEBUGOUT("EEPROM read did not pass.\n");
1188 			status = IXGBE_ERR_SFP_NOT_PRESENT;
1189 			goto out;
1190 		}
1191 
1192 		/* Read data */
1193 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1194 		                     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1195 
1196 		*eeprom_data = (u8)(sfp_data >> 8);
1197 	} else {
1198 		status = IXGBE_ERR_PHY;
1199 		goto out;
1200 	}
1201 
1202 out:
1203 	return status;
1204 }
1205 
1206 /**
1207  *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1208  *  @hw: pointer to hardware structure
1209  *
1210  *  Determines physical layer capabilities of the current configuration.
1211  **/
1212 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1213 {
1214 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1215 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1216 	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1217 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1218 	u16 ext_ability = 0;
1219 
1220 	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1221 
1222 	hw->phy.ops.identify(hw);
1223 
1224 	/* Copper PHY must be checked before AUTOC LMS to determine correct
1225 	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1226 	switch (hw->phy.type) {
1227 	case ixgbe_phy_tn:
1228 	case ixgbe_phy_aq:
1229 	case ixgbe_phy_cu_unknown:
1230 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1231 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1232 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1233 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1234 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1235 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1236 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1237 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1238 		goto out;
1239 	default:
1240 		break;
1241 	}
1242 
1243 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1244 	case IXGBE_AUTOC_LMS_1G_AN:
1245 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1246 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1247 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1248 		else
1249 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1250 		break;
1251 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1252 		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1253 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1254 		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1255 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1256 		else /* XAUI */
1257 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1258 		break;
1259 	case IXGBE_AUTOC_LMS_KX4_AN:
1260 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1261 		if (autoc & IXGBE_AUTOC_KX_SUPP)
1262 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1263 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1264 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1265 		break;
1266 	default:
1267 		break;
1268 	}
1269 
1270 	if (hw->phy.type == ixgbe_phy_nl) {
1271 		hw->phy.ops.identify_sfp(hw);
1272 
1273 		switch (hw->phy.sfp_type) {
1274 		case ixgbe_sfp_type_da_cu:
1275 			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1276 			break;
1277 		case ixgbe_sfp_type_sr:
1278 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1279 			break;
1280 		case ixgbe_sfp_type_lr:
1281 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1282 			break;
1283 		default:
1284 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1285 			break;
1286 		}
1287 	}
1288 
1289 	switch (hw->device_id) {
1290 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1291 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1292 		break;
1293 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1294 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1295 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1296 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1297 		break;
1298 	case IXGBE_DEV_ID_82598EB_XF_LR:
1299 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1300 		break;
1301 	default:
1302 		break;
1303 	}
1304 
1305 out:
1306 	return physical_layer;
1307 }
1308 
1309 /**
1310  *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1311  *  port devices.
1312  *  @hw: pointer to the HW structure
1313  *
1314  *  Calls common function and corrects issue with some single port devices
1315  *  that enable LAN1 but not LAN0.
1316  **/
1317 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1318 {
1319 	struct ixgbe_bus_info *bus = &hw->bus;
1320 	u16 pci_gen = 0;
1321 	u16 pci_ctrl2 = 0;
1322 
1323 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1324 
1325 	ixgbe_set_lan_id_multi_port_pcie(hw);
1326 
1327 	/* check if LAN0 is disabled */
1328 	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1329 	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1330 
1331 		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1332 
1333 		/* if LAN0 is completely disabled force function to 0 */
1334 		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1335 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1336 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1337 
1338 			bus->func = 0;
1339 		}
1340 	}
1341 }
1342 
1343 /**
1344  *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1345  *  @hw: pointer to hardware structure
1346  *
1347  **/
1348 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1349 {
1350 	u32 regval;
1351 	u32 i;
1352 
1353 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1354 
1355 	/* Enable relaxed ordering */
1356 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1357 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1358 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1359 		regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1360 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1361 	}
1362 
1363 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1364 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1365 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1366 		regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1367 		           IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1368 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1369 	}
1370 
1371 }
1372