xref: /freebsd/sys/dev/ixgbe/ixgbe_82598.c (revision 7750ad47a9a7dbc83f87158464170c8640723293)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2012, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_82598.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 
41 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
42 					     ixgbe_link_speed *speed,
43 					     bool *autoneg);
44 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
45 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
46 				      bool autoneg_wait_to_complete);
47 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
48 				      ixgbe_link_speed *speed, bool *link_up,
49 				      bool link_up_wait_to_complete);
50 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
51 				      ixgbe_link_speed speed,
52 				      bool autoneg,
53 				      bool autoneg_wait_to_complete);
54 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
55 					 ixgbe_link_speed speed,
56 					 bool autoneg,
57 					 bool autoneg_wait_to_complete);
58 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
59 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
60 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
61 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
62 				  u32 headroom, int strategy);
63 
64 /**
65  *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
66  *  @hw: pointer to the HW structure
67  *
68  *  The defaults for 82598 should be in the range of 50us to 50ms,
69  *  however the hardware default for these parts is 500us to 1ms which is less
70  *  than the 10ms recommended by the pci-e spec.  To address this we need to
71  *  increase the value to either 10ms to 250ms for capability version 1 config,
72  *  or 16ms to 55ms for version 2.
73  **/
74 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
75 {
76 	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
77 	u16 pcie_devctl2;
78 
79 	/* only take action if timeout value is defaulted to 0 */
80 	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
81 		goto out;
82 
83 	/*
84 	 * if capababilities version is type 1 we can write the
85 	 * timeout of 10ms to 250ms through the GCR register
86 	 */
87 	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
88 		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
89 		goto out;
90 	}
91 
92 	/*
93 	 * for version 2 capabilities we need to write the config space
94 	 * directly in order to set the completion timeout value for
95 	 * 16ms to 55ms
96 	 */
97 	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
98 	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
99 	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
100 out:
101 	/* disable completion timeout resend */
102 	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
103 	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
104 }
105 
106 /**
107  *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
108  *  @hw: pointer to hardware structure
109  *
110  *  Read PCIe configuration space, and get the MSI-X vector count from
111  *  the capabilities table.
112  **/
113 u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
114 {
115 	u32 msix_count = 18;
116 
117 	DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
118 
119 	if (hw->mac.msix_vectors_from_pcie) {
120 		msix_count = IXGBE_READ_PCIE_WORD(hw,
121 						  IXGBE_PCIE_MSIX_82598_CAPS);
122 		msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
123 
124 		/* MSI-X count is zero-based in HW, so increment to give
125 		 * proper value */
126 		msix_count++;
127 	}
128 	return msix_count;
129 }
130 
131 /**
132  *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
133  *  @hw: pointer to hardware structure
134  *
135  *  Initialize the function pointers and assign the MAC type for 82598.
136  *  Does not touch the hardware.
137  **/
138 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
139 {
140 	struct ixgbe_mac_info *mac = &hw->mac;
141 	struct ixgbe_phy_info *phy = &hw->phy;
142 	s32 ret_val;
143 
144 	DEBUGFUNC("ixgbe_init_ops_82598");
145 
146 	ret_val = ixgbe_init_phy_ops_generic(hw);
147 	ret_val = ixgbe_init_ops_generic(hw);
148 
149 	/* PHY */
150 	phy->ops.init = &ixgbe_init_phy_ops_82598;
151 
152 	/* MAC */
153 	mac->ops.start_hw = &ixgbe_start_hw_82598;
154 	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
155 	mac->ops.reset_hw = &ixgbe_reset_hw_82598;
156 	mac->ops.get_media_type = &ixgbe_get_media_type_82598;
157 	mac->ops.get_supported_physical_layer =
158 				&ixgbe_get_supported_physical_layer_82598;
159 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
160 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
161 	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
162 
163 	/* RAR, Multicast, VLAN */
164 	mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
165 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
166 	mac->ops.set_vfta = &ixgbe_set_vfta_82598;
167 	mac->ops.set_vlvf = NULL;
168 	mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
169 
170 	/* Flow Control */
171 	mac->ops.fc_enable = &ixgbe_fc_enable_82598;
172 
173 	mac->mcft_size		= 128;
174 	mac->vft_size		= 128;
175 	mac->num_rar_entries	= 16;
176 	mac->rx_pb_size		= 512;
177 	mac->max_tx_queues	= 32;
178 	mac->max_rx_queues	= 64;
179 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_82598(hw);
180 
181 	/* SFP+ Module */
182 	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
183 
184 	/* Link */
185 	mac->ops.check_link = &ixgbe_check_mac_link_82598;
186 	mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
187 	mac->ops.flap_tx_laser = NULL;
188 	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
189 	mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
190 
191 	/* Manageability interface */
192 	mac->ops.set_fw_drv_ver = NULL;
193 
194 	return ret_val;
195 }
196 
197 /**
198  *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
199  *  @hw: pointer to hardware structure
200  *
201  *  Initialize any function pointers that were not able to be
202  *  set during init_shared_code because the PHY/SFP type was
203  *  not known.  Perform the SFP init if necessary.
204  *
205  **/
206 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
207 {
208 	struct ixgbe_mac_info *mac = &hw->mac;
209 	struct ixgbe_phy_info *phy = &hw->phy;
210 	s32 ret_val = IXGBE_SUCCESS;
211 	u16 list_offset, data_offset;
212 
213 	DEBUGFUNC("ixgbe_init_phy_ops_82598");
214 
215 	/* Identify the PHY */
216 	phy->ops.identify(hw);
217 
218 	/* Overwrite the link function pointers if copper PHY */
219 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
220 		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
221 		mac->ops.get_link_capabilities =
222 				&ixgbe_get_copper_link_capabilities_generic;
223 	}
224 
225 	switch (hw->phy.type) {
226 	case ixgbe_phy_tn:
227 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
228 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
229 		phy->ops.get_firmware_version =
230 					&ixgbe_get_phy_firmware_version_tnx;
231 		break;
232 	case ixgbe_phy_nl:
233 		phy->ops.reset = &ixgbe_reset_phy_nl;
234 
235 		/* Call SFP+ identify routine to get the SFP+ module type */
236 		ret_val = phy->ops.identify_sfp(hw);
237 		if (ret_val != IXGBE_SUCCESS)
238 			goto out;
239 		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
240 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
241 			goto out;
242 		}
243 
244 		/* Check to see if SFP+ module is supported */
245 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
246 							      &list_offset,
247 							      &data_offset);
248 		if (ret_val != IXGBE_SUCCESS) {
249 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
250 			goto out;
251 		}
252 		break;
253 	default:
254 		break;
255 	}
256 
257 out:
258 	return ret_val;
259 }
260 
261 /**
262  *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
263  *  @hw: pointer to hardware structure
264  *
265  *  Starts the hardware using the generic start_hw function.
266  *  Disables relaxed ordering Then set pcie completion timeout
267  *
268  **/
269 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
270 {
271 	u32 regval;
272 	u32 i;
273 	s32 ret_val = IXGBE_SUCCESS;
274 
275 	DEBUGFUNC("ixgbe_start_hw_82598");
276 
277 	ret_val = ixgbe_start_hw_generic(hw);
278 
279 	/* Disable relaxed ordering */
280 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
281 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
282 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
283 		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
284 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
285 	}
286 
287 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
288 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
289 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
290 		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
291 			    IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
292 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
293 	}
294 
295 	/* set the completion timeout for interface */
296 	if (ret_val == IXGBE_SUCCESS)
297 		ixgbe_set_pcie_completion_timeout(hw);
298 
299 	return ret_val;
300 }
301 
302 /**
303  *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
304  *  @hw: pointer to hardware structure
305  *  @speed: pointer to link speed
306  *  @autoneg: boolean auto-negotiation value
307  *
308  *  Determines the link capabilities by reading the AUTOC register.
309  **/
310 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
311 					     ixgbe_link_speed *speed,
312 					     bool *autoneg)
313 {
314 	s32 status = IXGBE_SUCCESS;
315 	u32 autoc = 0;
316 
317 	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
318 
319 	/*
320 	 * Determine link capabilities based on the stored value of AUTOC,
321 	 * which represents EEPROM defaults.  If AUTOC value has not been
322 	 * stored, use the current register value.
323 	 */
324 	if (hw->mac.orig_link_settings_stored)
325 		autoc = hw->mac.orig_autoc;
326 	else
327 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
328 
329 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
330 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
331 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
332 		*autoneg = FALSE;
333 		break;
334 
335 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
336 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
337 		*autoneg = FALSE;
338 		break;
339 
340 	case IXGBE_AUTOC_LMS_1G_AN:
341 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
342 		*autoneg = TRUE;
343 		break;
344 
345 	case IXGBE_AUTOC_LMS_KX4_AN:
346 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
347 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
348 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
349 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
350 		if (autoc & IXGBE_AUTOC_KX_SUPP)
351 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
352 		*autoneg = TRUE;
353 		break;
354 
355 	default:
356 		status = IXGBE_ERR_LINK_SETUP;
357 		break;
358 	}
359 
360 	return status;
361 }
362 
363 /**
364  *  ixgbe_get_media_type_82598 - Determines media type
365  *  @hw: pointer to hardware structure
366  *
367  *  Returns the media type (fiber, copper, backplane)
368  **/
369 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
370 {
371 	enum ixgbe_media_type media_type;
372 
373 	DEBUGFUNC("ixgbe_get_media_type_82598");
374 
375 	/* Detect if there is a copper PHY attached. */
376 	switch (hw->phy.type) {
377 	case ixgbe_phy_cu_unknown:
378 	case ixgbe_phy_tn:
379 		media_type = ixgbe_media_type_copper;
380 		goto out;
381 	default:
382 		break;
383 	}
384 
385 	/* Media type for I82598 is based on device ID */
386 	switch (hw->device_id) {
387 	case IXGBE_DEV_ID_82598:
388 	case IXGBE_DEV_ID_82598_BX:
389 		/* Default device ID is mezzanine card KX/KX4 */
390 		media_type = ixgbe_media_type_backplane;
391 		break;
392 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
393 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
394 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
395 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
396 	case IXGBE_DEV_ID_82598EB_XF_LR:
397 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
398 		media_type = ixgbe_media_type_fiber;
399 		break;
400 	case IXGBE_DEV_ID_82598EB_CX4:
401 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
402 		media_type = ixgbe_media_type_cx4;
403 		break;
404 	case IXGBE_DEV_ID_82598AT:
405 	case IXGBE_DEV_ID_82598AT2:
406 		media_type = ixgbe_media_type_copper;
407 		break;
408 	default:
409 		media_type = ixgbe_media_type_unknown;
410 		break;
411 	}
412 out:
413 	return media_type;
414 }
415 
416 /**
417  *  ixgbe_fc_enable_82598 - Enable flow control
418  *  @hw: pointer to hardware structure
419  *  @packetbuf_num: packet buffer number (0-7)
420  *
421  *  Enable flow control according to the current settings.
422  **/
423 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
424 {
425 	s32 ret_val = IXGBE_SUCCESS;
426 	u32 fctrl_reg;
427 	u32 rmcs_reg;
428 	u32 reg;
429 	u32 link_speed = 0;
430 	bool link_up;
431 
432 	DEBUGFUNC("ixgbe_fc_enable_82598");
433 
434 	/*
435 	 * On 82598 having Rx FC on causes resets while doing 1G
436 	 * so if it's on turn it off once we know link_speed. For
437 	 * more details see 82598 Specification update.
438 	 */
439 	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
440 	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
441 		switch (hw->fc.requested_mode) {
442 		case ixgbe_fc_full:
443 			hw->fc.requested_mode = ixgbe_fc_tx_pause;
444 			break;
445 		case ixgbe_fc_rx_pause:
446 			hw->fc.requested_mode = ixgbe_fc_none;
447 			break;
448 		default:
449 			/* no change */
450 			break;
451 		}
452 	}
453 
454 	/* Negotiate the fc mode to use */
455 	ret_val = ixgbe_fc_autoneg(hw);
456 	if (ret_val == IXGBE_ERR_FLOW_CONTROL)
457 		goto out;
458 
459 	/* Disable any previous flow control settings */
460 	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
461 	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
462 
463 	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
464 	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
465 
466 	/*
467 	 * The possible values of fc.current_mode are:
468 	 * 0: Flow control is completely disabled
469 	 * 1: Rx flow control is enabled (we can receive pause frames,
470 	 *    but not send pause frames).
471 	 * 2: Tx flow control is enabled (we can send pause frames but
472 	 *     we do not support receiving pause frames).
473 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
474 	 * other: Invalid.
475 	 */
476 	switch (hw->fc.current_mode) {
477 	case ixgbe_fc_none:
478 		/*
479 		 * Flow control is disabled by software override or autoneg.
480 		 * The code below will actually disable it in the HW.
481 		 */
482 		break;
483 	case ixgbe_fc_rx_pause:
484 		/*
485 		 * Rx Flow control is enabled and Tx Flow control is
486 		 * disabled by software override. Since there really
487 		 * isn't a way to advertise that we are capable of RX
488 		 * Pause ONLY, we will advertise that we support both
489 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
490 		 * disable the adapter's ability to send PAUSE frames.
491 		 */
492 		fctrl_reg |= IXGBE_FCTRL_RFCE;
493 		break;
494 	case ixgbe_fc_tx_pause:
495 		/*
496 		 * Tx Flow control is enabled, and Rx Flow control is
497 		 * disabled by software override.
498 		 */
499 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
500 		break;
501 	case ixgbe_fc_full:
502 		/* Flow control (both Rx and Tx) is enabled by SW override. */
503 		fctrl_reg |= IXGBE_FCTRL_RFCE;
504 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
505 		break;
506 	default:
507 		DEBUGOUT("Flow control param set incorrectly\n");
508 		ret_val = IXGBE_ERR_CONFIG;
509 		goto out;
510 		break;
511 	}
512 
513 	/* Set 802.3x based flow control settings. */
514 	fctrl_reg |= IXGBE_FCTRL_DPF;
515 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
516 	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
517 
518 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
519 	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
520 		reg = hw->fc.low_water << 6;
521 		if (hw->fc.send_xon)
522 			reg |= IXGBE_FCRTL_XONE;
523 
524 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
525 
526 		reg = hw->fc.high_water[packetbuf_num] << 6;
527 		reg |= IXGBE_FCRTH_FCEN;
528 
529 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
530 	}
531 
532 	/* Configure pause time (2 TCs per register) */
533 	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
534 	if ((packetbuf_num & 1) == 0)
535 		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
536 	else
537 		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
538 	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
539 
540 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
541 
542 out:
543 	return ret_val;
544 }
545 
546 /**
547  *  ixgbe_start_mac_link_82598 - Configures MAC link settings
548  *  @hw: pointer to hardware structure
549  *
550  *  Configures link settings based on values in the ixgbe_hw struct.
551  *  Restarts the link.  Performs autonegotiation if needed.
552  **/
553 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
554 				      bool autoneg_wait_to_complete)
555 {
556 	u32 autoc_reg;
557 	u32 links_reg;
558 	u32 i;
559 	s32 status = IXGBE_SUCCESS;
560 
561 	DEBUGFUNC("ixgbe_start_mac_link_82598");
562 
563 	/* Restart link */
564 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
565 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
566 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
567 
568 	/* Only poll for autoneg to complete if specified to do so */
569 	if (autoneg_wait_to_complete) {
570 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
571 		     IXGBE_AUTOC_LMS_KX4_AN ||
572 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
573 		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
574 			links_reg = 0; /* Just in case Autoneg time = 0 */
575 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
576 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
577 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
578 					break;
579 				msec_delay(100);
580 			}
581 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
582 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
583 				DEBUGOUT("Autonegotiation did not complete.\n");
584 			}
585 		}
586 	}
587 
588 	/* Add delay to filter out noises during initial link setup */
589 	msec_delay(50);
590 
591 	return status;
592 }
593 
594 /**
595  *  ixgbe_validate_link_ready - Function looks for phy link
596  *  @hw: pointer to hardware structure
597  *
598  *  Function indicates success when phy link is available. If phy is not ready
599  *  within 5 seconds of MAC indicating link, the function returns error.
600  **/
601 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
602 {
603 	u32 timeout;
604 	u16 an_reg;
605 
606 	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
607 		return IXGBE_SUCCESS;
608 
609 	for (timeout = 0;
610 	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
611 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
612 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
613 
614 		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
615 		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
616 			break;
617 
618 		msec_delay(100);
619 	}
620 
621 	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
622 		DEBUGOUT("Link was indicated but link is down\n");
623 		return IXGBE_ERR_LINK_SETUP;
624 	}
625 
626 	return IXGBE_SUCCESS;
627 }
628 
629 /**
630  *  ixgbe_check_mac_link_82598 - Get link/speed status
631  *  @hw: pointer to hardware structure
632  *  @speed: pointer to link speed
633  *  @link_up: TRUE is link is up, FALSE otherwise
634  *  @link_up_wait_to_complete: bool used to wait for link up or not
635  *
636  *  Reads the links register to determine if link is up and the current speed
637  **/
638 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
639 				      ixgbe_link_speed *speed, bool *link_up,
640 				      bool link_up_wait_to_complete)
641 {
642 	u32 links_reg;
643 	u32 i;
644 	u16 link_reg, adapt_comp_reg;
645 
646 	DEBUGFUNC("ixgbe_check_mac_link_82598");
647 
648 	/*
649 	 * SERDES PHY requires us to read link status from undocumented
650 	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
651 	 * indicates link down.  OxC00C is read to check that the XAUI lanes
652 	 * are active.  Bit 0 clear indicates active; set indicates inactive.
653 	 */
654 	if (hw->phy.type == ixgbe_phy_nl) {
655 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
656 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
657 		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
658 				     &adapt_comp_reg);
659 		if (link_up_wait_to_complete) {
660 			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
661 				if ((link_reg & 1) &&
662 				    ((adapt_comp_reg & 1) == 0)) {
663 					*link_up = TRUE;
664 					break;
665 				} else {
666 					*link_up = FALSE;
667 				}
668 				msec_delay(100);
669 				hw->phy.ops.read_reg(hw, 0xC79F,
670 						     IXGBE_TWINAX_DEV,
671 						     &link_reg);
672 				hw->phy.ops.read_reg(hw, 0xC00C,
673 						     IXGBE_TWINAX_DEV,
674 						     &adapt_comp_reg);
675 			}
676 		} else {
677 			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
678 				*link_up = TRUE;
679 			else
680 				*link_up = FALSE;
681 		}
682 
683 		if (*link_up == FALSE)
684 			goto out;
685 	}
686 
687 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
688 	if (link_up_wait_to_complete) {
689 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
690 			if (links_reg & IXGBE_LINKS_UP) {
691 				*link_up = TRUE;
692 				break;
693 			} else {
694 				*link_up = FALSE;
695 			}
696 			msec_delay(100);
697 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
698 		}
699 	} else {
700 		if (links_reg & IXGBE_LINKS_UP)
701 			*link_up = TRUE;
702 		else
703 			*link_up = FALSE;
704 	}
705 
706 	if (links_reg & IXGBE_LINKS_SPEED)
707 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
708 	else
709 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
710 
711 	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
712 	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
713 		*link_up = FALSE;
714 
715 out:
716 	return IXGBE_SUCCESS;
717 }
718 
719 /**
720  *  ixgbe_setup_mac_link_82598 - Set MAC link speed
721  *  @hw: pointer to hardware structure
722  *  @speed: new link speed
723  *  @autoneg: TRUE if autonegotiation enabled
724  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
725  *
726  *  Set the link speed in the AUTOC register and restarts link.
727  **/
728 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
729 				      ixgbe_link_speed speed, bool autoneg,
730 				      bool autoneg_wait_to_complete)
731 {
732 	s32 status = IXGBE_SUCCESS;
733 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
734 	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
735 	u32 autoc = curr_autoc;
736 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
737 
738 	DEBUGFUNC("ixgbe_setup_mac_link_82598");
739 
740 	/* Check to see if speed passed in is supported. */
741 	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
742 	speed &= link_capabilities;
743 
744 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
745 		status = IXGBE_ERR_LINK_SETUP;
746 
747 	/* Set KX4/KX support according to speed requested */
748 	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
749 		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
750 		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
751 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
752 			autoc |= IXGBE_AUTOC_KX4_SUPP;
753 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
754 			autoc |= IXGBE_AUTOC_KX_SUPP;
755 		if (autoc != curr_autoc)
756 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
757 	}
758 
759 	if (status == IXGBE_SUCCESS) {
760 		/*
761 		 * Setup and restart the link based on the new values in
762 		 * ixgbe_hw This will write the AUTOC register based on the new
763 		 * stored values
764 		 */
765 		status = ixgbe_start_mac_link_82598(hw,
766 						    autoneg_wait_to_complete);
767 	}
768 
769 	return status;
770 }
771 
772 
773 /**
774  *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
775  *  @hw: pointer to hardware structure
776  *  @speed: new link speed
777  *  @autoneg: TRUE if autonegotiation enabled
778  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
779  *
780  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
781  **/
782 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
783 					 ixgbe_link_speed speed,
784 					 bool autoneg,
785 					 bool autoneg_wait_to_complete)
786 {
787 	s32 status;
788 
789 	DEBUGFUNC("ixgbe_setup_copper_link_82598");
790 
791 	/* Setup the PHY according to input speed */
792 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
793 					      autoneg_wait_to_complete);
794 	/* Set up MAC */
795 	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
796 
797 	return status;
798 }
799 
800 /**
801  *  ixgbe_reset_hw_82598 - Performs hardware reset
802  *  @hw: pointer to hardware structure
803  *
804  *  Resets the hardware by resetting the transmit and receive units, masks and
805  *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
806  *  reset.
807  **/
808 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
809 {
810 	s32 status = IXGBE_SUCCESS;
811 	s32 phy_status = IXGBE_SUCCESS;
812 	u32 ctrl;
813 	u32 gheccr;
814 	u32 i;
815 	u32 autoc;
816 	u8  analog_val;
817 
818 	DEBUGFUNC("ixgbe_reset_hw_82598");
819 
820 	/* Call adapter stop to disable tx/rx and clear interrupts */
821 	status = hw->mac.ops.stop_adapter(hw);
822 	if (status != IXGBE_SUCCESS)
823 		goto reset_hw_out;
824 
825 	/*
826 	 * Power up the Atlas Tx lanes if they are currently powered down.
827 	 * Atlas Tx lanes are powered down for MAC loopback tests, but
828 	 * they are not automatically restored on reset.
829 	 */
830 	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
831 	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
832 		/* Enable Tx Atlas so packets can be transmitted again */
833 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
834 					     &analog_val);
835 		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
836 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
837 					      analog_val);
838 
839 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
840 					     &analog_val);
841 		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
842 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
843 					      analog_val);
844 
845 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
846 					     &analog_val);
847 		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
848 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
849 					      analog_val);
850 
851 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
852 					     &analog_val);
853 		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
854 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
855 					      analog_val);
856 	}
857 
858 	/* Reset PHY */
859 	if (hw->phy.reset_disable == FALSE) {
860 		/* PHY ops must be identified and initialized prior to reset */
861 
862 		/* Init PHY and function pointers, perform SFP setup */
863 		phy_status = hw->phy.ops.init(hw);
864 		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
865 			goto reset_hw_out;
866 		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
867 			goto mac_reset_top;
868 
869 		hw->phy.ops.reset(hw);
870 	}
871 
872 mac_reset_top:
873 	/*
874 	 * Issue global reset to the MAC.  This needs to be a SW reset.
875 	 * If link reset is used, it might reset the MAC when mng is using it
876 	 */
877 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
878 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
879 	IXGBE_WRITE_FLUSH(hw);
880 
881 	/* Poll for reset bit to self-clear indicating reset is complete */
882 	for (i = 0; i < 10; i++) {
883 		usec_delay(1);
884 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
885 		if (!(ctrl & IXGBE_CTRL_RST))
886 			break;
887 	}
888 	if (ctrl & IXGBE_CTRL_RST) {
889 		status = IXGBE_ERR_RESET_FAILED;
890 		DEBUGOUT("Reset polling failed to complete.\n");
891 	}
892 
893 	msec_delay(50);
894 
895 	/*
896 	 * Double resets are required for recovery from certain error
897 	 * conditions.  Between resets, it is necessary to stall to allow time
898 	 * for any pending HW events to complete.
899 	 */
900 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
901 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
902 		goto mac_reset_top;
903 	}
904 
905 	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
906 	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
907 	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
908 
909 	/*
910 	 * Store the original AUTOC value if it has not been
911 	 * stored off yet.  Otherwise restore the stored original
912 	 * AUTOC value since the reset operation sets back to deaults.
913 	 */
914 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
915 	if (hw->mac.orig_link_settings_stored == FALSE) {
916 		hw->mac.orig_autoc = autoc;
917 		hw->mac.orig_link_settings_stored = TRUE;
918 	} else if (autoc != hw->mac.orig_autoc) {
919 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
920 	}
921 
922 	/* Store the permanent mac address */
923 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
924 
925 	/*
926 	 * Store MAC address from RAR0, clear receive address registers, and
927 	 * clear the multicast table
928 	 */
929 	hw->mac.ops.init_rx_addrs(hw);
930 
931 reset_hw_out:
932 	if (phy_status != IXGBE_SUCCESS)
933 		status = phy_status;
934 
935 	return status;
936 }
937 
938 /**
939  *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
940  *  @hw: pointer to hardware struct
941  *  @rar: receive address register index to associate with a VMDq index
942  *  @vmdq: VMDq set index
943  **/
944 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
945 {
946 	u32 rar_high;
947 	u32 rar_entries = hw->mac.num_rar_entries;
948 
949 	DEBUGFUNC("ixgbe_set_vmdq_82598");
950 
951 	/* Make sure we are using a valid rar index range */
952 	if (rar >= rar_entries) {
953 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
954 		return IXGBE_ERR_INVALID_ARGUMENT;
955 	}
956 
957 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
958 	rar_high &= ~IXGBE_RAH_VIND_MASK;
959 	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
960 	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
961 	return IXGBE_SUCCESS;
962 }
963 
964 /**
965  *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
966  *  @hw: pointer to hardware struct
967  *  @rar: receive address register index to associate with a VMDq index
968  *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
969  **/
970 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
971 {
972 	u32 rar_high;
973 	u32 rar_entries = hw->mac.num_rar_entries;
974 
975 	UNREFERENCED_1PARAMETER(vmdq);
976 
977 	/* Make sure we are using a valid rar index range */
978 	if (rar >= rar_entries) {
979 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
980 		return IXGBE_ERR_INVALID_ARGUMENT;
981 	}
982 
983 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
984 	if (rar_high & IXGBE_RAH_VIND_MASK) {
985 		rar_high &= ~IXGBE_RAH_VIND_MASK;
986 		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
987 	}
988 
989 	return IXGBE_SUCCESS;
990 }
991 
992 /**
993  *  ixgbe_set_vfta_82598 - Set VLAN filter table
994  *  @hw: pointer to hardware structure
995  *  @vlan: VLAN id to write to VLAN filter
996  *  @vind: VMDq output index that maps queue to VLAN id in VFTA
997  *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
998  *
999  *  Turn on/off specified VLAN in the VLAN filter table.
1000  **/
1001 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1002 			 bool vlan_on)
1003 {
1004 	u32 regindex;
1005 	u32 bitindex;
1006 	u32 bits;
1007 	u32 vftabyte;
1008 
1009 	DEBUGFUNC("ixgbe_set_vfta_82598");
1010 
1011 	if (vlan > 4095)
1012 		return IXGBE_ERR_PARAM;
1013 
1014 	/* Determine 32-bit word position in array */
1015 	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
1016 
1017 	/* Determine the location of the (VMD) queue index */
1018 	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1019 	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1020 
1021 	/* Set the nibble for VMD queue index */
1022 	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1023 	bits &= (~(0x0F << bitindex));
1024 	bits |= (vind << bitindex);
1025 	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1026 
1027 	/* Determine the location of the bit for this VLAN id */
1028 	bitindex = vlan & 0x1F;   /* lower five bits */
1029 
1030 	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1031 	if (vlan_on)
1032 		/* Turn on this VLAN id */
1033 		bits |= (1 << bitindex);
1034 	else
1035 		/* Turn off this VLAN id */
1036 		bits &= ~(1 << bitindex);
1037 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1038 
1039 	return IXGBE_SUCCESS;
1040 }
1041 
1042 /**
1043  *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
1044  *  @hw: pointer to hardware structure
1045  *
1046  *  Clears the VLAN filer table, and the VMDq index associated with the filter
1047  **/
1048 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1049 {
1050 	u32 offset;
1051 	u32 vlanbyte;
1052 
1053 	DEBUGFUNC("ixgbe_clear_vfta_82598");
1054 
1055 	for (offset = 0; offset < hw->mac.vft_size; offset++)
1056 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1057 
1058 	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1059 		for (offset = 0; offset < hw->mac.vft_size; offset++)
1060 			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1061 					0);
1062 
1063 	return IXGBE_SUCCESS;
1064 }
1065 
1066 /**
1067  *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1068  *  @hw: pointer to hardware structure
1069  *  @reg: analog register to read
1070  *  @val: read value
1071  *
1072  *  Performs read operation to Atlas analog register specified.
1073  **/
1074 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1075 {
1076 	u32  atlas_ctl;
1077 
1078 	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1079 
1080 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1081 			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1082 	IXGBE_WRITE_FLUSH(hw);
1083 	usec_delay(10);
1084 	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1085 	*val = (u8)atlas_ctl;
1086 
1087 	return IXGBE_SUCCESS;
1088 }
1089 
1090 /**
1091  *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1092  *  @hw: pointer to hardware structure
1093  *  @reg: atlas register to write
1094  *  @val: value to write
1095  *
1096  *  Performs write operation to Atlas analog register specified.
1097  **/
1098 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1099 {
1100 	u32  atlas_ctl;
1101 
1102 	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1103 
1104 	atlas_ctl = (reg << 8) | val;
1105 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1106 	IXGBE_WRITE_FLUSH(hw);
1107 	usec_delay(10);
1108 
1109 	return IXGBE_SUCCESS;
1110 }
1111 
1112 /**
1113  *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1114  *  @hw: pointer to hardware structure
1115  *  @byte_offset: EEPROM byte offset to read
1116  *  @eeprom_data: value read
1117  *
1118  *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1119  **/
1120 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1121 				u8 *eeprom_data)
1122 {
1123 	s32 status = IXGBE_SUCCESS;
1124 	u16 sfp_addr = 0;
1125 	u16 sfp_data = 0;
1126 	u16 sfp_stat = 0;
1127 	u32 i;
1128 
1129 	DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1130 
1131 	if (hw->phy.type == ixgbe_phy_nl) {
1132 		/*
1133 		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1134 		 * 0xC30D. These registers are used to talk to the SFP+
1135 		 * module's EEPROM through the SDA/SCL (I2C) interface.
1136 		 */
1137 		sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1138 		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1139 		hw->phy.ops.write_reg(hw,
1140 				      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1141 				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1142 				      sfp_addr);
1143 
1144 		/* Poll status */
1145 		for (i = 0; i < 100; i++) {
1146 			hw->phy.ops.read_reg(hw,
1147 					     IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1148 					     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1149 					     &sfp_stat);
1150 			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1151 			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1152 				break;
1153 			msec_delay(10);
1154 		}
1155 
1156 		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1157 			DEBUGOUT("EEPROM read did not pass.\n");
1158 			status = IXGBE_ERR_SFP_NOT_PRESENT;
1159 			goto out;
1160 		}
1161 
1162 		/* Read data */
1163 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1164 				     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1165 
1166 		*eeprom_data = (u8)(sfp_data >> 8);
1167 	} else {
1168 		status = IXGBE_ERR_PHY;
1169 		goto out;
1170 	}
1171 
1172 out:
1173 	return status;
1174 }
1175 
1176 /**
1177  *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1178  *  @hw: pointer to hardware structure
1179  *
1180  *  Determines physical layer capabilities of the current configuration.
1181  **/
1182 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1183 {
1184 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1185 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1186 	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1187 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1188 	u16 ext_ability = 0;
1189 
1190 	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1191 
1192 	hw->phy.ops.identify(hw);
1193 
1194 	/* Copper PHY must be checked before AUTOC LMS to determine correct
1195 	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1196 	switch (hw->phy.type) {
1197 	case ixgbe_phy_tn:
1198 	case ixgbe_phy_cu_unknown:
1199 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1200 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1201 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1202 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1203 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1204 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1205 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1206 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1207 		goto out;
1208 	default:
1209 		break;
1210 	}
1211 
1212 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1213 	case IXGBE_AUTOC_LMS_1G_AN:
1214 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1215 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1216 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1217 		else
1218 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1219 		break;
1220 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1221 		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1222 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1223 		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1224 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1225 		else /* XAUI */
1226 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1227 		break;
1228 	case IXGBE_AUTOC_LMS_KX4_AN:
1229 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1230 		if (autoc & IXGBE_AUTOC_KX_SUPP)
1231 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1232 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1233 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1234 		break;
1235 	default:
1236 		break;
1237 	}
1238 
1239 	if (hw->phy.type == ixgbe_phy_nl) {
1240 		hw->phy.ops.identify_sfp(hw);
1241 
1242 		switch (hw->phy.sfp_type) {
1243 		case ixgbe_sfp_type_da_cu:
1244 			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1245 			break;
1246 		case ixgbe_sfp_type_sr:
1247 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1248 			break;
1249 		case ixgbe_sfp_type_lr:
1250 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1251 			break;
1252 		default:
1253 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1254 			break;
1255 		}
1256 	}
1257 
1258 	switch (hw->device_id) {
1259 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1260 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1261 		break;
1262 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1263 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1264 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1265 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1266 		break;
1267 	case IXGBE_DEV_ID_82598EB_XF_LR:
1268 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1269 		break;
1270 	default:
1271 		break;
1272 	}
1273 
1274 out:
1275 	return physical_layer;
1276 }
1277 
1278 /**
1279  *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1280  *  port devices.
1281  *  @hw: pointer to the HW structure
1282  *
1283  *  Calls common function and corrects issue with some single port devices
1284  *  that enable LAN1 but not LAN0.
1285  **/
1286 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1287 {
1288 	struct ixgbe_bus_info *bus = &hw->bus;
1289 	u16 pci_gen = 0;
1290 	u16 pci_ctrl2 = 0;
1291 
1292 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1293 
1294 	ixgbe_set_lan_id_multi_port_pcie(hw);
1295 
1296 	/* check if LAN0 is disabled */
1297 	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1298 	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1299 
1300 		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1301 
1302 		/* if LAN0 is completely disabled force function to 0 */
1303 		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1304 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1305 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1306 
1307 			bus->func = 0;
1308 		}
1309 	}
1310 }
1311 
1312 /**
1313  *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1314  *  @hw: pointer to hardware structure
1315  *
1316  **/
1317 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1318 {
1319 	u32 regval;
1320 	u32 i;
1321 
1322 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1323 
1324 	/* Enable relaxed ordering */
1325 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1326 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1327 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1328 		regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1329 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1330 	}
1331 
1332 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1333 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1334 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1335 		regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1336 			   IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1337 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1338 	}
1339 
1340 }
1341 
1342 /**
1343  * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1344  * @hw: pointer to hardware structure
1345  * @num_pb: number of packet buffers to allocate
1346  * @headroom: reserve n KB of headroom
1347  * @strategy: packet buffer allocation strategy
1348  **/
1349 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1350 				  u32 headroom, int strategy)
1351 {
1352 	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1353 	u8 i = 0;
1354 	UNREFERENCED_1PARAMETER(headroom);
1355 
1356 	if (!num_pb)
1357 		return;
1358 
1359 	/* Setup Rx packet buffer sizes */
1360 	switch (strategy) {
1361 	case PBA_STRATEGY_WEIGHTED:
1362 		/* Setup the first four at 80KB */
1363 		rxpktsize = IXGBE_RXPBSIZE_80KB;
1364 		for (; i < 4; i++)
1365 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1366 		/* Setup the last four at 48KB...don't re-init i */
1367 		rxpktsize = IXGBE_RXPBSIZE_48KB;
1368 		/* Fall Through */
1369 	case PBA_STRATEGY_EQUAL:
1370 	default:
1371 		/* Divide the remaining Rx packet buffer evenly among the TCs */
1372 		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1373 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1374 		break;
1375 	}
1376 
1377 	/* Setup Tx packet buffer sizes */
1378 	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1379 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1380 
1381 	return;
1382 }
1383