xref: /freebsd/sys/dev/ixgbe/ixgbe_82598.c (revision ec0e626bafb335b30c499d06066997f54b10c092)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2014, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_82598.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 
41 #define IXGBE_82598_MAX_TX_QUEUES 32
42 #define IXGBE_82598_MAX_RX_QUEUES 64
43 #define IXGBE_82598_RAR_ENTRIES   16
44 #define IXGBE_82598_MC_TBL_SIZE  128
45 #define IXGBE_82598_VFT_TBL_SIZE 128
46 #define IXGBE_82598_RX_PB_SIZE   512
47 
48 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
49 					     ixgbe_link_speed *speed,
50 					     bool *autoneg);
51 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
52 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
53 				      bool autoneg_wait_to_complete);
54 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
55 				      ixgbe_link_speed *speed, bool *link_up,
56 				      bool link_up_wait_to_complete);
57 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
58 				      ixgbe_link_speed speed,
59 				      bool autoneg_wait_to_complete);
60 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
61 					 ixgbe_link_speed speed,
62 					 bool autoneg_wait_to_complete);
63 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
64 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
65 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
66 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
67 				  u32 headroom, int strategy);
68 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
69 					u8 *sff8472_data);
70 /**
71  *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
72  *  @hw: pointer to the HW structure
73  *
74  *  The defaults for 82598 should be in the range of 50us to 50ms,
75  *  however the hardware default for these parts is 500us to 1ms which is less
76  *  than the 10ms recommended by the pci-e spec.  To address this we need to
77  *  increase the value to either 10ms to 250ms for capability version 1 config,
78  *  or 16ms to 55ms for version 2.
79  **/
80 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
81 {
82 	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
83 	u16 pcie_devctl2;
84 
85 	/* only take action if timeout value is defaulted to 0 */
86 	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
87 		goto out;
88 
89 	/*
90 	 * if capababilities version is type 1 we can write the
91 	 * timeout of 10ms to 250ms through the GCR register
92 	 */
93 	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
94 		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
95 		goto out;
96 	}
97 
98 	/*
99 	 * for version 2 capabilities we need to write the config space
100 	 * directly in order to set the completion timeout value for
101 	 * 16ms to 55ms
102 	 */
103 	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
104 	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
105 	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
106 out:
107 	/* disable completion timeout resend */
108 	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
109 	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
110 }
111 
112 /**
113  *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
114  *  @hw: pointer to hardware structure
115  *
116  *  Initialize the function pointers and assign the MAC type for 82598.
117  *  Does not touch the hardware.
118  **/
119 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
120 {
121 	struct ixgbe_mac_info *mac = &hw->mac;
122 	struct ixgbe_phy_info *phy = &hw->phy;
123 	s32 ret_val;
124 
125 	DEBUGFUNC("ixgbe_init_ops_82598");
126 
127 	ret_val = ixgbe_init_phy_ops_generic(hw);
128 	ret_val = ixgbe_init_ops_generic(hw);
129 
130 	/* PHY */
131 	phy->ops.init = ixgbe_init_phy_ops_82598;
132 
133 	/* MAC */
134 	mac->ops.start_hw = ixgbe_start_hw_82598;
135 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
136 	mac->ops.reset_hw = ixgbe_reset_hw_82598;
137 	mac->ops.get_media_type = ixgbe_get_media_type_82598;
138 	mac->ops.get_supported_physical_layer =
139 				ixgbe_get_supported_physical_layer_82598;
140 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
141 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
142 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
143 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
144 
145 	/* RAR, Multicast, VLAN */
146 	mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
147 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
148 	mac->ops.set_vfta = ixgbe_set_vfta_82598;
149 	mac->ops.set_vlvf = NULL;
150 	mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
151 
152 	/* Flow Control */
153 	mac->ops.fc_enable = ixgbe_fc_enable_82598;
154 
155 	mac->mcft_size		= IXGBE_82598_MC_TBL_SIZE;
156 	mac->vft_size		= IXGBE_82598_VFT_TBL_SIZE;
157 	mac->num_rar_entries	= IXGBE_82598_RAR_ENTRIES;
158 	mac->rx_pb_size		= IXGBE_82598_RX_PB_SIZE;
159 	mac->max_rx_queues	= IXGBE_82598_MAX_RX_QUEUES;
160 	mac->max_tx_queues	= IXGBE_82598_MAX_TX_QUEUES;
161 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
162 
163 	/* SFP+ Module */
164 	phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
165 	phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
166 
167 	/* Link */
168 	mac->ops.check_link = ixgbe_check_mac_link_82598;
169 	mac->ops.setup_link = ixgbe_setup_mac_link_82598;
170 	mac->ops.flap_tx_laser = NULL;
171 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
172 	mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
173 
174 	/* Manageability interface */
175 	mac->ops.set_fw_drv_ver = NULL;
176 
177 	mac->ops.get_rtrup2tc = NULL;
178 
179 	return ret_val;
180 }
181 
182 /**
183  *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
184  *  @hw: pointer to hardware structure
185  *
186  *  Initialize any function pointers that were not able to be
187  *  set during init_shared_code because the PHY/SFP type was
188  *  not known.  Perform the SFP init if necessary.
189  *
190  **/
191 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
192 {
193 	struct ixgbe_mac_info *mac = &hw->mac;
194 	struct ixgbe_phy_info *phy = &hw->phy;
195 	s32 ret_val = IXGBE_SUCCESS;
196 	u16 list_offset, data_offset;
197 
198 	DEBUGFUNC("ixgbe_init_phy_ops_82598");
199 
200 	/* Identify the PHY */
201 	phy->ops.identify(hw);
202 
203 	/* Overwrite the link function pointers if copper PHY */
204 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
205 		mac->ops.setup_link = ixgbe_setup_copper_link_82598;
206 		mac->ops.get_link_capabilities =
207 				ixgbe_get_copper_link_capabilities_generic;
208 	}
209 
210 	switch (hw->phy.type) {
211 	case ixgbe_phy_tn:
212 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
213 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
214 		phy->ops.get_firmware_version =
215 					ixgbe_get_phy_firmware_version_tnx;
216 		break;
217 	case ixgbe_phy_nl:
218 		phy->ops.reset = ixgbe_reset_phy_nl;
219 
220 		/* Call SFP+ identify routine to get the SFP+ module type */
221 		ret_val = phy->ops.identify_sfp(hw);
222 		if (ret_val != IXGBE_SUCCESS)
223 			goto out;
224 		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
225 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
226 			goto out;
227 		}
228 
229 		/* Check to see if SFP+ module is supported */
230 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
231 							      &list_offset,
232 							      &data_offset);
233 		if (ret_val != IXGBE_SUCCESS) {
234 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
235 			goto out;
236 		}
237 		break;
238 	default:
239 		break;
240 	}
241 
242 out:
243 	return ret_val;
244 }
245 
246 /**
247  *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
248  *  @hw: pointer to hardware structure
249  *
250  *  Starts the hardware using the generic start_hw function.
251  *  Disables relaxed ordering Then set pcie completion timeout
252  *
253  **/
254 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
255 {
256 	u32 regval;
257 	u32 i;
258 	s32 ret_val = IXGBE_SUCCESS;
259 
260 	DEBUGFUNC("ixgbe_start_hw_82598");
261 
262 	ret_val = ixgbe_start_hw_generic(hw);
263 
264 	/* Disable relaxed ordering */
265 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
266 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
267 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
268 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
269 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
270 	}
271 
272 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
273 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
274 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
275 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
276 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
277 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
278 	}
279 
280 	/* set the completion timeout for interface */
281 	if (ret_val == IXGBE_SUCCESS)
282 		ixgbe_set_pcie_completion_timeout(hw);
283 
284 	return ret_val;
285 }
286 
287 /**
288  *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
289  *  @hw: pointer to hardware structure
290  *  @speed: pointer to link speed
291  *  @autoneg: boolean auto-negotiation value
292  *
293  *  Determines the link capabilities by reading the AUTOC register.
294  **/
295 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
296 					     ixgbe_link_speed *speed,
297 					     bool *autoneg)
298 {
299 	s32 status = IXGBE_SUCCESS;
300 	u32 autoc = 0;
301 
302 	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
303 
304 	/*
305 	 * Determine link capabilities based on the stored value of AUTOC,
306 	 * which represents EEPROM defaults.  If AUTOC value has not been
307 	 * stored, use the current register value.
308 	 */
309 	if (hw->mac.orig_link_settings_stored)
310 		autoc = hw->mac.orig_autoc;
311 	else
312 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
313 
314 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
315 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
316 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
317 		*autoneg = FALSE;
318 		break;
319 
320 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
321 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
322 		*autoneg = FALSE;
323 		break;
324 
325 	case IXGBE_AUTOC_LMS_1G_AN:
326 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
327 		*autoneg = TRUE;
328 		break;
329 
330 	case IXGBE_AUTOC_LMS_KX4_AN:
331 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
332 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
333 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
334 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
335 		if (autoc & IXGBE_AUTOC_KX_SUPP)
336 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
337 		*autoneg = TRUE;
338 		break;
339 
340 	default:
341 		status = IXGBE_ERR_LINK_SETUP;
342 		break;
343 	}
344 
345 	return status;
346 }
347 
348 /**
349  *  ixgbe_get_media_type_82598 - Determines media type
350  *  @hw: pointer to hardware structure
351  *
352  *  Returns the media type (fiber, copper, backplane)
353  **/
354 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
355 {
356 	enum ixgbe_media_type media_type;
357 
358 	DEBUGFUNC("ixgbe_get_media_type_82598");
359 
360 	/* Detect if there is a copper PHY attached. */
361 	switch (hw->phy.type) {
362 	case ixgbe_phy_cu_unknown:
363 	case ixgbe_phy_tn:
364 		media_type = ixgbe_media_type_copper;
365 		goto out;
366 	default:
367 		break;
368 	}
369 
370 	/* Media type for I82598 is based on device ID */
371 	switch (hw->device_id) {
372 	case IXGBE_DEV_ID_82598:
373 	case IXGBE_DEV_ID_82598_BX:
374 		/* Default device ID is mezzanine card KX/KX4 */
375 		media_type = ixgbe_media_type_backplane;
376 		break;
377 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
378 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
379 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
380 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
381 	case IXGBE_DEV_ID_82598EB_XF_LR:
382 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
383 		media_type = ixgbe_media_type_fiber;
384 		break;
385 	case IXGBE_DEV_ID_82598EB_CX4:
386 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
387 		media_type = ixgbe_media_type_cx4;
388 		break;
389 	case IXGBE_DEV_ID_82598AT:
390 	case IXGBE_DEV_ID_82598AT2:
391 		media_type = ixgbe_media_type_copper;
392 		break;
393 	default:
394 		media_type = ixgbe_media_type_unknown;
395 		break;
396 	}
397 out:
398 	return media_type;
399 }
400 
401 /**
402  *  ixgbe_fc_enable_82598 - Enable flow control
403  *  @hw: pointer to hardware structure
404  *
405  *  Enable flow control according to the current settings.
406  **/
407 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
408 {
409 	s32 ret_val = IXGBE_SUCCESS;
410 	u32 fctrl_reg;
411 	u32 rmcs_reg;
412 	u32 reg;
413 	u32 fcrtl, fcrth;
414 	u32 link_speed = 0;
415 	int i;
416 	bool link_up;
417 
418 	DEBUGFUNC("ixgbe_fc_enable_82598");
419 
420 	/* Validate the water mark configuration */
421 	if (!hw->fc.pause_time) {
422 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
423 		goto out;
424 	}
425 
426 	/* Low water mark of zero causes XOFF floods */
427 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
428 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
429 		    hw->fc.high_water[i]) {
430 			if (!hw->fc.low_water[i] ||
431 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
432 				DEBUGOUT("Invalid water mark configuration\n");
433 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
434 				goto out;
435 			}
436 		}
437 	}
438 
439 	/*
440 	 * On 82598 having Rx FC on causes resets while doing 1G
441 	 * so if it's on turn it off once we know link_speed. For
442 	 * more details see 82598 Specification update.
443 	 */
444 	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
445 	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
446 		switch (hw->fc.requested_mode) {
447 		case ixgbe_fc_full:
448 			hw->fc.requested_mode = ixgbe_fc_tx_pause;
449 			break;
450 		case ixgbe_fc_rx_pause:
451 			hw->fc.requested_mode = ixgbe_fc_none;
452 			break;
453 		default:
454 			/* no change */
455 			break;
456 		}
457 	}
458 
459 	/* Negotiate the fc mode to use */
460 	ixgbe_fc_autoneg(hw);
461 
462 	/* Disable any previous flow control settings */
463 	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
464 	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
465 
466 	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
467 	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
468 
469 	/*
470 	 * The possible values of fc.current_mode are:
471 	 * 0: Flow control is completely disabled
472 	 * 1: Rx flow control is enabled (we can receive pause frames,
473 	 *    but not send pause frames).
474 	 * 2: Tx flow control is enabled (we can send pause frames but
475 	 *     we do not support receiving pause frames).
476 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
477 	 * other: Invalid.
478 	 */
479 	switch (hw->fc.current_mode) {
480 	case ixgbe_fc_none:
481 		/*
482 		 * Flow control is disabled by software override or autoneg.
483 		 * The code below will actually disable it in the HW.
484 		 */
485 		break;
486 	case ixgbe_fc_rx_pause:
487 		/*
488 		 * Rx Flow control is enabled and Tx Flow control is
489 		 * disabled by software override. Since there really
490 		 * isn't a way to advertise that we are capable of RX
491 		 * Pause ONLY, we will advertise that we support both
492 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
493 		 * disable the adapter's ability to send PAUSE frames.
494 		 */
495 		fctrl_reg |= IXGBE_FCTRL_RFCE;
496 		break;
497 	case ixgbe_fc_tx_pause:
498 		/*
499 		 * Tx Flow control is enabled, and Rx Flow control is
500 		 * disabled by software override.
501 		 */
502 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
503 		break;
504 	case ixgbe_fc_full:
505 		/* Flow control (both Rx and Tx) is enabled by SW override. */
506 		fctrl_reg |= IXGBE_FCTRL_RFCE;
507 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
508 		break;
509 	default:
510 		DEBUGOUT("Flow control param set incorrectly\n");
511 		ret_val = IXGBE_ERR_CONFIG;
512 		goto out;
513 		break;
514 	}
515 
516 	/* Set 802.3x based flow control settings. */
517 	fctrl_reg |= IXGBE_FCTRL_DPF;
518 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
519 	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
520 
521 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
522 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
523 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
524 		    hw->fc.high_water[i]) {
525 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
526 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
527 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
528 			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
529 		} else {
530 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
531 			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
532 		}
533 
534 	}
535 
536 	/* Configure pause time (2 TCs per register) */
537 	reg = hw->fc.pause_time * 0x00010001;
538 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
539 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
540 
541 	/* Configure flow control refresh threshold value */
542 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
543 
544 out:
545 	return ret_val;
546 }
547 
548 /**
549  *  ixgbe_start_mac_link_82598 - Configures MAC link settings
550  *  @hw: pointer to hardware structure
551  *
552  *  Configures link settings based on values in the ixgbe_hw struct.
553  *  Restarts the link.  Performs autonegotiation if needed.
554  **/
555 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
556 				      bool autoneg_wait_to_complete)
557 {
558 	u32 autoc_reg;
559 	u32 links_reg;
560 	u32 i;
561 	s32 status = IXGBE_SUCCESS;
562 
563 	DEBUGFUNC("ixgbe_start_mac_link_82598");
564 
565 	/* Restart link */
566 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
567 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
568 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
569 
570 	/* Only poll for autoneg to complete if specified to do so */
571 	if (autoneg_wait_to_complete) {
572 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
573 		     IXGBE_AUTOC_LMS_KX4_AN ||
574 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
575 		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
576 			links_reg = 0; /* Just in case Autoneg time = 0 */
577 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
578 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
579 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
580 					break;
581 				msec_delay(100);
582 			}
583 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
584 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
585 				DEBUGOUT("Autonegotiation did not complete.\n");
586 			}
587 		}
588 	}
589 
590 	/* Add delay to filter out noises during initial link setup */
591 	msec_delay(50);
592 
593 	return status;
594 }
595 
596 /**
597  *  ixgbe_validate_link_ready - Function looks for phy link
598  *  @hw: pointer to hardware structure
599  *
600  *  Function indicates success when phy link is available. If phy is not ready
601  *  within 5 seconds of MAC indicating link, the function returns error.
602  **/
603 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
604 {
605 	u32 timeout;
606 	u16 an_reg;
607 
608 	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
609 		return IXGBE_SUCCESS;
610 
611 	for (timeout = 0;
612 	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
613 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
614 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
615 
616 		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
617 		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
618 			break;
619 
620 		msec_delay(100);
621 	}
622 
623 	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
624 		DEBUGOUT("Link was indicated but link is down\n");
625 		return IXGBE_ERR_LINK_SETUP;
626 	}
627 
628 	return IXGBE_SUCCESS;
629 }
630 
631 /**
632  *  ixgbe_check_mac_link_82598 - Get link/speed status
633  *  @hw: pointer to hardware structure
634  *  @speed: pointer to link speed
635  *  @link_up: TRUE is link is up, FALSE otherwise
636  *  @link_up_wait_to_complete: bool used to wait for link up or not
637  *
638  *  Reads the links register to determine if link is up and the current speed
639  **/
640 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
641 				      ixgbe_link_speed *speed, bool *link_up,
642 				      bool link_up_wait_to_complete)
643 {
644 	u32 links_reg;
645 	u32 i;
646 	u16 link_reg, adapt_comp_reg;
647 
648 	DEBUGFUNC("ixgbe_check_mac_link_82598");
649 
650 	/*
651 	 * SERDES PHY requires us to read link status from undocumented
652 	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
653 	 * indicates link down.  OxC00C is read to check that the XAUI lanes
654 	 * are active.  Bit 0 clear indicates active; set indicates inactive.
655 	 */
656 	if (hw->phy.type == ixgbe_phy_nl) {
657 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
658 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
659 		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
660 				     &adapt_comp_reg);
661 		if (link_up_wait_to_complete) {
662 			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
663 				if ((link_reg & 1) &&
664 				    ((adapt_comp_reg & 1) == 0)) {
665 					*link_up = TRUE;
666 					break;
667 				} else {
668 					*link_up = FALSE;
669 				}
670 				msec_delay(100);
671 				hw->phy.ops.read_reg(hw, 0xC79F,
672 						     IXGBE_TWINAX_DEV,
673 						     &link_reg);
674 				hw->phy.ops.read_reg(hw, 0xC00C,
675 						     IXGBE_TWINAX_DEV,
676 						     &adapt_comp_reg);
677 			}
678 		} else {
679 			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
680 				*link_up = TRUE;
681 			else
682 				*link_up = FALSE;
683 		}
684 
685 		if (*link_up == FALSE)
686 			goto out;
687 	}
688 
689 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
690 	if (link_up_wait_to_complete) {
691 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
692 			if (links_reg & IXGBE_LINKS_UP) {
693 				*link_up = TRUE;
694 				break;
695 			} else {
696 				*link_up = FALSE;
697 			}
698 			msec_delay(100);
699 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
700 		}
701 	} else {
702 		if (links_reg & IXGBE_LINKS_UP)
703 			*link_up = TRUE;
704 		else
705 			*link_up = FALSE;
706 	}
707 
708 	if (links_reg & IXGBE_LINKS_SPEED)
709 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
710 	else
711 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
712 
713 	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
714 	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
715 		*link_up = FALSE;
716 
717 out:
718 	return IXGBE_SUCCESS;
719 }
720 
721 /**
722  *  ixgbe_setup_mac_link_82598 - Set MAC link speed
723  *  @hw: pointer to hardware structure
724  *  @speed: new link speed
725  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
726  *
727  *  Set the link speed in the AUTOC register and restarts link.
728  **/
729 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
730 				      ixgbe_link_speed speed,
731 				      bool autoneg_wait_to_complete)
732 {
733 	bool autoneg = FALSE;
734 	s32 status = IXGBE_SUCCESS;
735 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
736 	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
737 	u32 autoc = curr_autoc;
738 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
739 
740 	DEBUGFUNC("ixgbe_setup_mac_link_82598");
741 
742 	/* Check to see if speed passed in is supported. */
743 	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
744 	speed &= link_capabilities;
745 
746 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
747 		status = IXGBE_ERR_LINK_SETUP;
748 
749 	/* Set KX4/KX support according to speed requested */
750 	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
751 		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
752 		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
753 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
754 			autoc |= IXGBE_AUTOC_KX4_SUPP;
755 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
756 			autoc |= IXGBE_AUTOC_KX_SUPP;
757 		if (autoc != curr_autoc)
758 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
759 	}
760 
761 	if (status == IXGBE_SUCCESS) {
762 		/*
763 		 * Setup and restart the link based on the new values in
764 		 * ixgbe_hw This will write the AUTOC register based on the new
765 		 * stored values
766 		 */
767 		status = ixgbe_start_mac_link_82598(hw,
768 						    autoneg_wait_to_complete);
769 	}
770 
771 	return status;
772 }
773 
774 
775 /**
776  *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
777  *  @hw: pointer to hardware structure
778  *  @speed: new link speed
779  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
780  *
781  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
782  **/
783 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
784 					 ixgbe_link_speed speed,
785 					 bool autoneg_wait_to_complete)
786 {
787 	s32 status;
788 
789 	DEBUGFUNC("ixgbe_setup_copper_link_82598");
790 
791 	/* Setup the PHY according to input speed */
792 	status = hw->phy.ops.setup_link_speed(hw, speed,
793 					      autoneg_wait_to_complete);
794 	/* Set up MAC */
795 	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
796 
797 	return status;
798 }
799 
800 /**
801  *  ixgbe_reset_hw_82598 - Performs hardware reset
802  *  @hw: pointer to hardware structure
803  *
804  *  Resets the hardware by resetting the transmit and receive units, masks and
805  *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
806  *  reset.
807  **/
808 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
809 {
810 	s32 status = IXGBE_SUCCESS;
811 	s32 phy_status = IXGBE_SUCCESS;
812 	u32 ctrl;
813 	u32 gheccr;
814 	u32 i;
815 	u32 autoc;
816 	u8  analog_val;
817 
818 	DEBUGFUNC("ixgbe_reset_hw_82598");
819 
820 	/* Call adapter stop to disable tx/rx and clear interrupts */
821 	status = hw->mac.ops.stop_adapter(hw);
822 	if (status != IXGBE_SUCCESS)
823 		goto reset_hw_out;
824 
825 	/*
826 	 * Power up the Atlas Tx lanes if they are currently powered down.
827 	 * Atlas Tx lanes are powered down for MAC loopback tests, but
828 	 * they are not automatically restored on reset.
829 	 */
830 	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
831 	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
832 		/* Enable Tx Atlas so packets can be transmitted again */
833 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
834 					     &analog_val);
835 		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
836 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
837 					      analog_val);
838 
839 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
840 					     &analog_val);
841 		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
842 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
843 					      analog_val);
844 
845 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
846 					     &analog_val);
847 		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
848 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
849 					      analog_val);
850 
851 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
852 					     &analog_val);
853 		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
854 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
855 					      analog_val);
856 	}
857 
858 	/* Reset PHY */
859 	if (hw->phy.reset_disable == FALSE) {
860 		/* PHY ops must be identified and initialized prior to reset */
861 
862 		/* Init PHY and function pointers, perform SFP setup */
863 		phy_status = hw->phy.ops.init(hw);
864 		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
865 			goto reset_hw_out;
866 		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
867 			goto mac_reset_top;
868 
869 		hw->phy.ops.reset(hw);
870 	}
871 
872 mac_reset_top:
873 	/*
874 	 * Issue global reset to the MAC.  This needs to be a SW reset.
875 	 * If link reset is used, it might reset the MAC when mng is using it
876 	 */
877 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
878 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
879 	IXGBE_WRITE_FLUSH(hw);
880 
881 	/* Poll for reset bit to self-clear indicating reset is complete */
882 	for (i = 0; i < 10; i++) {
883 		usec_delay(1);
884 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
885 		if (!(ctrl & IXGBE_CTRL_RST))
886 			break;
887 	}
888 	if (ctrl & IXGBE_CTRL_RST) {
889 		status = IXGBE_ERR_RESET_FAILED;
890 		DEBUGOUT("Reset polling failed to complete.\n");
891 	}
892 
893 	msec_delay(50);
894 
895 	/*
896 	 * Double resets are required for recovery from certain error
897 	 * conditions.  Between resets, it is necessary to stall to allow time
898 	 * for any pending HW events to complete.
899 	 */
900 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
901 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
902 		goto mac_reset_top;
903 	}
904 
905 	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
906 	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
907 	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
908 
909 	/*
910 	 * Store the original AUTOC value if it has not been
911 	 * stored off yet.  Otherwise restore the stored original
912 	 * AUTOC value since the reset operation sets back to deaults.
913 	 */
914 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
915 	if (hw->mac.orig_link_settings_stored == FALSE) {
916 		hw->mac.orig_autoc = autoc;
917 		hw->mac.orig_link_settings_stored = TRUE;
918 	} else if (autoc != hw->mac.orig_autoc) {
919 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
920 	}
921 
922 	/* Store the permanent mac address */
923 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
924 
925 	/*
926 	 * Store MAC address from RAR0, clear receive address registers, and
927 	 * clear the multicast table
928 	 */
929 	hw->mac.ops.init_rx_addrs(hw);
930 
931 reset_hw_out:
932 	if (phy_status != IXGBE_SUCCESS)
933 		status = phy_status;
934 
935 	return status;
936 }
937 
938 /**
939  *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
940  *  @hw: pointer to hardware struct
941  *  @rar: receive address register index to associate with a VMDq index
942  *  @vmdq: VMDq set index
943  **/
944 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
945 {
946 	u32 rar_high;
947 	u32 rar_entries = hw->mac.num_rar_entries;
948 
949 	DEBUGFUNC("ixgbe_set_vmdq_82598");
950 
951 	/* Make sure we are using a valid rar index range */
952 	if (rar >= rar_entries) {
953 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
954 		return IXGBE_ERR_INVALID_ARGUMENT;
955 	}
956 
957 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
958 	rar_high &= ~IXGBE_RAH_VIND_MASK;
959 	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
960 	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
961 	return IXGBE_SUCCESS;
962 }
963 
964 /**
965  *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
966  *  @hw: pointer to hardware struct
967  *  @rar: receive address register index to associate with a VMDq index
968  *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
969  **/
970 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
971 {
972 	u32 rar_high;
973 	u32 rar_entries = hw->mac.num_rar_entries;
974 
975 	UNREFERENCED_1PARAMETER(vmdq);
976 
977 	/* Make sure we are using a valid rar index range */
978 	if (rar >= rar_entries) {
979 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
980 		return IXGBE_ERR_INVALID_ARGUMENT;
981 	}
982 
983 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
984 	if (rar_high & IXGBE_RAH_VIND_MASK) {
985 		rar_high &= ~IXGBE_RAH_VIND_MASK;
986 		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
987 	}
988 
989 	return IXGBE_SUCCESS;
990 }
991 
992 /**
993  *  ixgbe_set_vfta_82598 - Set VLAN filter table
994  *  @hw: pointer to hardware structure
995  *  @vlan: VLAN id to write to VLAN filter
996  *  @vind: VMDq output index that maps queue to VLAN id in VFTA
997  *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
998  *
999  *  Turn on/off specified VLAN in the VLAN filter table.
1000  **/
1001 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1002 			 bool vlan_on)
1003 {
1004 	u32 regindex;
1005 	u32 bitindex;
1006 	u32 bits;
1007 	u32 vftabyte;
1008 
1009 	DEBUGFUNC("ixgbe_set_vfta_82598");
1010 
1011 	if (vlan > 4095)
1012 		return IXGBE_ERR_PARAM;
1013 
1014 	/* Determine 32-bit word position in array */
1015 	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
1016 
1017 	/* Determine the location of the (VMD) queue index */
1018 	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1019 	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1020 
1021 	/* Set the nibble for VMD queue index */
1022 	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1023 	bits &= (~(0x0F << bitindex));
1024 	bits |= (vind << bitindex);
1025 	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1026 
1027 	/* Determine the location of the bit for this VLAN id */
1028 	bitindex = vlan & 0x1F;   /* lower five bits */
1029 
1030 	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1031 	if (vlan_on)
1032 		/* Turn on this VLAN id */
1033 		bits |= (1 << bitindex);
1034 	else
1035 		/* Turn off this VLAN id */
1036 		bits &= ~(1 << bitindex);
1037 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1038 
1039 	return IXGBE_SUCCESS;
1040 }
1041 
1042 /**
1043  *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
1044  *  @hw: pointer to hardware structure
1045  *
1046  *  Clears the VLAN filer table, and the VMDq index associated with the filter
1047  **/
1048 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1049 {
1050 	u32 offset;
1051 	u32 vlanbyte;
1052 
1053 	DEBUGFUNC("ixgbe_clear_vfta_82598");
1054 
1055 	for (offset = 0; offset < hw->mac.vft_size; offset++)
1056 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1057 
1058 	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1059 		for (offset = 0; offset < hw->mac.vft_size; offset++)
1060 			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1061 					0);
1062 
1063 	return IXGBE_SUCCESS;
1064 }
1065 
1066 /**
1067  *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1068  *  @hw: pointer to hardware structure
1069  *  @reg: analog register to read
1070  *  @val: read value
1071  *
1072  *  Performs read operation to Atlas analog register specified.
1073  **/
1074 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1075 {
1076 	u32  atlas_ctl;
1077 
1078 	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1079 
1080 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1081 			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1082 	IXGBE_WRITE_FLUSH(hw);
1083 	usec_delay(10);
1084 	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1085 	*val = (u8)atlas_ctl;
1086 
1087 	return IXGBE_SUCCESS;
1088 }
1089 
1090 /**
1091  *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1092  *  @hw: pointer to hardware structure
1093  *  @reg: atlas register to write
1094  *  @val: value to write
1095  *
1096  *  Performs write operation to Atlas analog register specified.
1097  **/
1098 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1099 {
1100 	u32  atlas_ctl;
1101 
1102 	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1103 
1104 	atlas_ctl = (reg << 8) | val;
1105 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1106 	IXGBE_WRITE_FLUSH(hw);
1107 	usec_delay(10);
1108 
1109 	return IXGBE_SUCCESS;
1110 }
1111 
1112 /**
1113  *  ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1114  *  @hw: pointer to hardware structure
1115  *  @dev_addr: address to read from
1116  *  @byte_offset: byte offset to read from dev_addr
1117  *  @eeprom_data: value read
1118  *
1119  *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1120  **/
1121 static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1122 				    u8 byte_offset, u8 *eeprom_data)
1123 {
1124 	s32 status = IXGBE_SUCCESS;
1125 	u16 sfp_addr = 0;
1126 	u16 sfp_data = 0;
1127 	u16 sfp_stat = 0;
1128 	u16 gssr;
1129 	u32 i;
1130 
1131 	DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1132 
1133 	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1134 		gssr = IXGBE_GSSR_PHY1_SM;
1135 	else
1136 		gssr = IXGBE_GSSR_PHY0_SM;
1137 
1138 	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
1139 		return IXGBE_ERR_SWFW_SYNC;
1140 
1141 	if (hw->phy.type == ixgbe_phy_nl) {
1142 		/*
1143 		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1144 		 * 0xC30D. These registers are used to talk to the SFP+
1145 		 * module's EEPROM through the SDA/SCL (I2C) interface.
1146 		 */
1147 		sfp_addr = (dev_addr << 8) + byte_offset;
1148 		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1149 		hw->phy.ops.write_reg_mdi(hw,
1150 					  IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1151 					  IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1152 					  sfp_addr);
1153 
1154 		/* Poll status */
1155 		for (i = 0; i < 100; i++) {
1156 			hw->phy.ops.read_reg_mdi(hw,
1157 						IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1158 						IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1159 						&sfp_stat);
1160 			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1161 			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1162 				break;
1163 			msec_delay(10);
1164 		}
1165 
1166 		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1167 			DEBUGOUT("EEPROM read did not pass.\n");
1168 			status = IXGBE_ERR_SFP_NOT_PRESENT;
1169 			goto out;
1170 		}
1171 
1172 		/* Read data */
1173 		hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1174 					IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1175 
1176 		*eeprom_data = (u8)(sfp_data >> 8);
1177 	} else {
1178 		status = IXGBE_ERR_PHY;
1179 	}
1180 
1181 out:
1182 	hw->mac.ops.release_swfw_sync(hw, gssr);
1183 	return status;
1184 }
1185 
1186 /**
1187  *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1188  *  @hw: pointer to hardware structure
1189  *  @byte_offset: EEPROM byte offset to read
1190  *  @eeprom_data: value read
1191  *
1192  *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1193  **/
1194 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1195 				u8 *eeprom_data)
1196 {
1197 	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1198 					byte_offset, eeprom_data);
1199 }
1200 
1201 /**
1202  *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1203  *  @hw: pointer to hardware structure
1204  *  @byte_offset: byte offset at address 0xA2
1205  *  @eeprom_data: value read
1206  *
1207  *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1208  **/
1209 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1210 					u8 *sff8472_data)
1211 {
1212 	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1213 					byte_offset, sff8472_data);
1214 }
1215 
1216 /**
1217  *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1218  *  @hw: pointer to hardware structure
1219  *
1220  *  Determines physical layer capabilities of the current configuration.
1221  **/
1222 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1223 {
1224 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1225 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1226 	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1227 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1228 	u16 ext_ability = 0;
1229 
1230 	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1231 
1232 	hw->phy.ops.identify(hw);
1233 
1234 	/* Copper PHY must be checked before AUTOC LMS to determine correct
1235 	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1236 	switch (hw->phy.type) {
1237 	case ixgbe_phy_tn:
1238 	case ixgbe_phy_cu_unknown:
1239 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1240 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1241 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1242 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1243 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1244 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1245 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1246 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1247 		goto out;
1248 	default:
1249 		break;
1250 	}
1251 
1252 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1253 	case IXGBE_AUTOC_LMS_1G_AN:
1254 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1255 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1256 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1257 		else
1258 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1259 		break;
1260 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1261 		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1262 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1263 		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1264 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1265 		else /* XAUI */
1266 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1267 		break;
1268 	case IXGBE_AUTOC_LMS_KX4_AN:
1269 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1270 		if (autoc & IXGBE_AUTOC_KX_SUPP)
1271 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1272 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1273 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1274 		break;
1275 	default:
1276 		break;
1277 	}
1278 
1279 	if (hw->phy.type == ixgbe_phy_nl) {
1280 		hw->phy.ops.identify_sfp(hw);
1281 
1282 		switch (hw->phy.sfp_type) {
1283 		case ixgbe_sfp_type_da_cu:
1284 			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1285 			break;
1286 		case ixgbe_sfp_type_sr:
1287 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1288 			break;
1289 		case ixgbe_sfp_type_lr:
1290 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1291 			break;
1292 		default:
1293 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1294 			break;
1295 		}
1296 	}
1297 
1298 	switch (hw->device_id) {
1299 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1300 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1301 		break;
1302 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1303 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1304 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1305 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1306 		break;
1307 	case IXGBE_DEV_ID_82598EB_XF_LR:
1308 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1309 		break;
1310 	default:
1311 		break;
1312 	}
1313 
1314 out:
1315 	return physical_layer;
1316 }
1317 
1318 /**
1319  *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1320  *  port devices.
1321  *  @hw: pointer to the HW structure
1322  *
1323  *  Calls common function and corrects issue with some single port devices
1324  *  that enable LAN1 but not LAN0.
1325  **/
1326 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1327 {
1328 	struct ixgbe_bus_info *bus = &hw->bus;
1329 	u16 pci_gen = 0;
1330 	u16 pci_ctrl2 = 0;
1331 
1332 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1333 
1334 	ixgbe_set_lan_id_multi_port_pcie(hw);
1335 
1336 	/* check if LAN0 is disabled */
1337 	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1338 	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1339 
1340 		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1341 
1342 		/* if LAN0 is completely disabled force function to 0 */
1343 		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1344 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1345 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1346 
1347 			bus->func = 0;
1348 		}
1349 	}
1350 }
1351 
1352 /**
1353  *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1354  *  @hw: pointer to hardware structure
1355  *
1356  **/
1357 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1358 {
1359 	u32 regval;
1360 	u32 i;
1361 
1362 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1363 
1364 	/* Enable relaxed ordering */
1365 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1366 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1367 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1368 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1369 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1370 	}
1371 
1372 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1373 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1374 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1375 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1376 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1377 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1378 	}
1379 
1380 }
1381 
1382 /**
1383  * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1384  * @hw: pointer to hardware structure
1385  * @num_pb: number of packet buffers to allocate
1386  * @headroom: reserve n KB of headroom
1387  * @strategy: packet buffer allocation strategy
1388  **/
1389 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1390 				  u32 headroom, int strategy)
1391 {
1392 	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1393 	u8 i = 0;
1394 	UNREFERENCED_1PARAMETER(headroom);
1395 
1396 	if (!num_pb)
1397 		return;
1398 
1399 	/* Setup Rx packet buffer sizes */
1400 	switch (strategy) {
1401 	case PBA_STRATEGY_WEIGHTED:
1402 		/* Setup the first four at 80KB */
1403 		rxpktsize = IXGBE_RXPBSIZE_80KB;
1404 		for (; i < 4; i++)
1405 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1406 		/* Setup the last four at 48KB...don't re-init i */
1407 		rxpktsize = IXGBE_RXPBSIZE_48KB;
1408 		/* Fall Through */
1409 	case PBA_STRATEGY_EQUAL:
1410 	default:
1411 		/* Divide the remaining Rx packet buffer evenly among the TCs */
1412 		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1413 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1414 		break;
1415 	}
1416 
1417 	/* Setup Tx packet buffer sizes */
1418 	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1419 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1420 }
1421 
1422 /**
1423  *  ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
1424  *  @hw: pointer to hardware structure
1425  *  @regval: register value to write to RXCTRL
1426  *
1427  *  Enables the Rx DMA unit
1428  **/
1429 s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
1430 {
1431 	DEBUGFUNC("ixgbe_enable_rx_dma_82598");
1432 
1433 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
1434 
1435 	return IXGBE_SUCCESS;
1436 }
1437