xref: /titanic_44/usr/src/uts/common/io/ixgbe/core/ixgbe_82598.c (revision 313ba7960502dc67eab67213cf5240fb5191b470)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_82598.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 
41 #define IXGBE_82598_MAX_TX_QUEUES 32
42 #define IXGBE_82598_MAX_RX_QUEUES 64
43 #define IXGBE_82598_RAR_ENTRIES   16
44 #define IXGBE_82598_MC_TBL_SIZE  128
45 #define IXGBE_82598_VFT_TBL_SIZE 128
46 #define IXGBE_82598_RX_PB_SIZE   512
47 
48 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
49 					     ixgbe_link_speed *speed,
50 					     bool *autoneg);
51 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
52 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
53 				      bool autoneg_wait_to_complete);
54 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
55 				      ixgbe_link_speed *speed, bool *link_up,
56 				      bool link_up_wait_to_complete);
57 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
58 				      ixgbe_link_speed speed,
59 				      bool autoneg_wait_to_complete);
60 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
61 					 ixgbe_link_speed speed,
62 					 bool autoneg_wait_to_complete);
63 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
64 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
65 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
66 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
67 				  u32 headroom, int strategy);
68 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
69 					u8 *sff8472_data);
70 /**
71  *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
72  *  @hw: pointer to the HW structure
73  *
74  *  The defaults for 82598 should be in the range of 50us to 50ms,
75  *  however the hardware default for these parts is 500us to 1ms which is less
76  *  than the 10ms recommended by the pci-e spec.  To address this we need to
77  *  increase the value to either 10ms to 250ms for capability version 1 config,
78  *  or 16ms to 55ms for version 2.
79  **/
ixgbe_set_pcie_completion_timeout(struct ixgbe_hw * hw)80 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
81 {
82 	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
83 	u16 pcie_devctl2;
84 
85 	/* only take action if timeout value is defaulted to 0 */
86 	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
87 		goto out;
88 
89 	/*
90 	 * if capababilities version is type 1 we can write the
91 	 * timeout of 10ms to 250ms through the GCR register
92 	 */
93 	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
94 		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
95 		goto out;
96 	}
97 
98 	/*
99 	 * for version 2 capabilities we need to write the config space
100 	 * directly in order to set the completion timeout value for
101 	 * 16ms to 55ms
102 	 */
103 	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
104 	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
105 	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
106 out:
107 	/* disable completion timeout resend */
108 	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
109 	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
110 }
111 
112 /**
113  *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
114  *  @hw: pointer to hardware structure
115  *
116  *  Initialize the function pointers and assign the MAC type for 82598.
117  *  Does not touch the hardware.
118  **/
ixgbe_init_ops_82598(struct ixgbe_hw * hw)119 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
120 {
121 	struct ixgbe_mac_info *mac = &hw->mac;
122 	struct ixgbe_phy_info *phy = &hw->phy;
123 	s32 ret_val;
124 
125 	DEBUGFUNC("ixgbe_init_ops_82598");
126 
127 	ret_val = ixgbe_init_phy_ops_generic(hw);
128 	ret_val = ixgbe_init_ops_generic(hw);
129 
130 	/* PHY */
131 	phy->ops.init = ixgbe_init_phy_ops_82598;
132 
133 	/* MAC */
134 	mac->ops.start_hw = ixgbe_start_hw_82598;
135 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
136 	mac->ops.reset_hw = ixgbe_reset_hw_82598;
137 	mac->ops.get_media_type = ixgbe_get_media_type_82598;
138 	mac->ops.get_supported_physical_layer =
139 				ixgbe_get_supported_physical_layer_82598;
140 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
141 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
142 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
143 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
144 
145 	/* RAR, Multicast, VLAN */
146 	mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
147 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
148 	mac->ops.set_vfta = ixgbe_set_vfta_82598;
149 	mac->ops.set_vlvf = NULL;
150 	mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
151 
152 	/* Flow Control */
153 	mac->ops.fc_enable = ixgbe_fc_enable_82598;
154 
155 	mac->mcft_size		= IXGBE_82598_MC_TBL_SIZE;
156 	mac->vft_size		= IXGBE_82598_VFT_TBL_SIZE;
157 	mac->num_rar_entries	= IXGBE_82598_RAR_ENTRIES;
158 	mac->rx_pb_size		= IXGBE_82598_RX_PB_SIZE;
159 	mac->max_rx_queues	= IXGBE_82598_MAX_RX_QUEUES;
160 	mac->max_tx_queues	= IXGBE_82598_MAX_TX_QUEUES;
161 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
162 
163 	/* SFP+ Module */
164 	phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
165 	phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
166 
167 	/* Link */
168 	mac->ops.check_link = ixgbe_check_mac_link_82598;
169 	mac->ops.setup_link = ixgbe_setup_mac_link_82598;
170 	mac->ops.flap_tx_laser = NULL;
171 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
172 	mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
173 
174 	/* Manageability interface */
175 	mac->ops.set_fw_drv_ver = NULL;
176 
177 	mac->ops.get_rtrup2tc = NULL;
178 
179 	return ret_val;
180 }
181 
182 /**
183  *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
184  *  @hw: pointer to hardware structure
185  *
186  *  Initialize any function pointers that were not able to be
187  *  set during init_shared_code because the PHY/SFP type was
188  *  not known.  Perform the SFP init if necessary.
189  *
190  **/
ixgbe_init_phy_ops_82598(struct ixgbe_hw * hw)191 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
192 {
193 	struct ixgbe_mac_info *mac = &hw->mac;
194 	struct ixgbe_phy_info *phy = &hw->phy;
195 	s32 ret_val = IXGBE_SUCCESS;
196 	u16 list_offset, data_offset;
197 
198 	DEBUGFUNC("ixgbe_init_phy_ops_82598");
199 
200 	/* Identify the PHY */
201 	phy->ops.identify(hw);
202 
203 	/* Overwrite the link function pointers if copper PHY */
204 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
205 		mac->ops.setup_link = ixgbe_setup_copper_link_82598;
206 		mac->ops.get_link_capabilities =
207 				ixgbe_get_copper_link_capabilities_generic;
208 	}
209 
210 	switch (hw->phy.type) {
211 	case ixgbe_phy_tn:
212 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
213 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
214 		phy->ops.get_firmware_version =
215 					ixgbe_get_phy_firmware_version_tnx;
216 		break;
217 	case ixgbe_phy_nl:
218 		phy->ops.reset = ixgbe_reset_phy_nl;
219 
220 		/* Call SFP+ identify routine to get the SFP+ module type */
221 		ret_val = phy->ops.identify_sfp(hw);
222 		if (ret_val != IXGBE_SUCCESS)
223 			goto out;
224 		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
225 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
226 			goto out;
227 		}
228 
229 		/* Check to see if SFP+ module is supported */
230 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
231 							      &list_offset,
232 							      &data_offset);
233 		if (ret_val != IXGBE_SUCCESS) {
234 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
235 			goto out;
236 		}
237 		break;
238 	default:
239 		break;
240 	}
241 
242 out:
243 	return ret_val;
244 }
245 
246 /**
247  *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
248  *  @hw: pointer to hardware structure
249  *
250  *  Starts the hardware using the generic start_hw function.
251  *  Disables relaxed ordering Then set pcie completion timeout
252  *
253  **/
ixgbe_start_hw_82598(struct ixgbe_hw * hw)254 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
255 {
256 	u32 regval;
257 	u32 i;
258 	s32 ret_val = IXGBE_SUCCESS;
259 
260 	DEBUGFUNC("ixgbe_start_hw_82598");
261 
262 	ret_val = ixgbe_start_hw_generic(hw);
263 	if (ret_val)
264 		return ret_val;
265 
266 	/* Disable relaxed ordering */
267 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
268 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
269 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
270 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
271 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
272 	}
273 
274 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
275 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
276 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
277 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
278 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
279 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
280 	}
281 
282 	/* set the completion timeout for interface */
283 	ixgbe_set_pcie_completion_timeout(hw);
284 
285 	return ret_val;
286 }
287 
288 /**
289  *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
290  *  @hw: pointer to hardware structure
291  *  @speed: pointer to link speed
292  *  @autoneg: boolean auto-negotiation value
293  *
294  *  Determines the link capabilities by reading the AUTOC register.
295  **/
ixgbe_get_link_capabilities_82598(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)296 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
297 					     ixgbe_link_speed *speed,
298 					     bool *autoneg)
299 {
300 	s32 status = IXGBE_SUCCESS;
301 	u32 autoc = 0;
302 
303 	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
304 
305 	/*
306 	 * Determine link capabilities based on the stored value of AUTOC,
307 	 * which represents EEPROM defaults.  If AUTOC value has not been
308 	 * stored, use the current register value.
309 	 */
310 	if (hw->mac.orig_link_settings_stored)
311 		autoc = hw->mac.orig_autoc;
312 	else
313 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
314 
315 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
316 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
317 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
318 		*autoneg = FALSE;
319 		break;
320 
321 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
322 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
323 		*autoneg = FALSE;
324 		break;
325 
326 	case IXGBE_AUTOC_LMS_1G_AN:
327 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
328 		*autoneg = TRUE;
329 		break;
330 
331 	case IXGBE_AUTOC_LMS_KX4_AN:
332 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
333 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
334 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
335 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
336 		if (autoc & IXGBE_AUTOC_KX_SUPP)
337 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
338 		*autoneg = TRUE;
339 		break;
340 
341 	default:
342 		status = IXGBE_ERR_LINK_SETUP;
343 		break;
344 	}
345 
346 	return status;
347 }
348 
349 /**
350  *  ixgbe_get_media_type_82598 - Determines media type
351  *  @hw: pointer to hardware structure
352  *
353  *  Returns the media type (fiber, copper, backplane)
354  **/
ixgbe_get_media_type_82598(struct ixgbe_hw * hw)355 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
356 {
357 	enum ixgbe_media_type media_type;
358 
359 	DEBUGFUNC("ixgbe_get_media_type_82598");
360 
361 	/* Detect if there is a copper PHY attached. */
362 	switch (hw->phy.type) {
363 	case ixgbe_phy_cu_unknown:
364 	case ixgbe_phy_tn:
365 		media_type = ixgbe_media_type_copper;
366 		goto out;
367 	default:
368 		break;
369 	}
370 
371 	/* Media type for I82598 is based on device ID */
372 	switch (hw->device_id) {
373 	case IXGBE_DEV_ID_82598:
374 	case IXGBE_DEV_ID_82598_BX:
375 		/* Default device ID is mezzanine card KX/KX4 */
376 		media_type = ixgbe_media_type_backplane;
377 		break;
378 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
379 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
380 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
381 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
382 	case IXGBE_DEV_ID_82598EB_XF_LR:
383 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
384 		media_type = ixgbe_media_type_fiber;
385 		break;
386 	case IXGBE_DEV_ID_82598EB_CX4:
387 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
388 		media_type = ixgbe_media_type_cx4;
389 		break;
390 	case IXGBE_DEV_ID_82598AT:
391 	case IXGBE_DEV_ID_82598AT2:
392 		media_type = ixgbe_media_type_copper;
393 		break;
394 	default:
395 		media_type = ixgbe_media_type_unknown;
396 		break;
397 	}
398 out:
399 	return media_type;
400 }
401 
402 /**
403  *  ixgbe_fc_enable_82598 - Enable flow control
404  *  @hw: pointer to hardware structure
405  *
406  *  Enable flow control according to the current settings.
407  **/
ixgbe_fc_enable_82598(struct ixgbe_hw * hw)408 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
409 {
410 	s32 ret_val = IXGBE_SUCCESS;
411 	u32 fctrl_reg;
412 	u32 rmcs_reg;
413 	u32 reg;
414 	u32 fcrtl, fcrth;
415 	u32 link_speed = 0;
416 	int i;
417 	bool link_up;
418 
419 	DEBUGFUNC("ixgbe_fc_enable_82598");
420 
421 	/* Validate the water mark configuration */
422 	if (!hw->fc.pause_time) {
423 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
424 		goto out;
425 	}
426 
427 	/* Low water mark of zero causes XOFF floods */
428 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
429 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
430 		    hw->fc.high_water[i]) {
431 			if (!hw->fc.low_water[i] ||
432 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
433 				DEBUGOUT("Invalid water mark configuration\n");
434 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
435 				goto out;
436 			}
437 		}
438 	}
439 
440 	/*
441 	 * On 82598 having Rx FC on causes resets while doing 1G
442 	 * so if it's on turn it off once we know link_speed. For
443 	 * more details see 82598 Specification update.
444 	 */
445 	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
446 	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
447 		switch (hw->fc.requested_mode) {
448 		case ixgbe_fc_full:
449 			hw->fc.requested_mode = ixgbe_fc_tx_pause;
450 			break;
451 		case ixgbe_fc_rx_pause:
452 			hw->fc.requested_mode = ixgbe_fc_none;
453 			break;
454 		default:
455 			/* no change */
456 			break;
457 		}
458 	}
459 
460 	/* Negotiate the fc mode to use */
461 	ixgbe_fc_autoneg(hw);
462 
463 	/* Disable any previous flow control settings */
464 	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
465 	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
466 
467 	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
468 	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
469 
470 	/*
471 	 * The possible values of fc.current_mode are:
472 	 * 0: Flow control is completely disabled
473 	 * 1: Rx flow control is enabled (we can receive pause frames,
474 	 *    but not send pause frames).
475 	 * 2: Tx flow control is enabled (we can send pause frames but
476 	 *     we do not support receiving pause frames).
477 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
478 	 * other: Invalid.
479 	 */
480 	switch (hw->fc.current_mode) {
481 	case ixgbe_fc_none:
482 		/*
483 		 * Flow control is disabled by software override or autoneg.
484 		 * The code below will actually disable it in the HW.
485 		 */
486 		break;
487 	case ixgbe_fc_rx_pause:
488 		/*
489 		 * Rx Flow control is enabled and Tx Flow control is
490 		 * disabled by software override. Since there really
491 		 * isn't a way to advertise that we are capable of RX
492 		 * Pause ONLY, we will advertise that we support both
493 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
494 		 * disable the adapter's ability to send PAUSE frames.
495 		 */
496 		fctrl_reg |= IXGBE_FCTRL_RFCE;
497 		break;
498 	case ixgbe_fc_tx_pause:
499 		/*
500 		 * Tx Flow control is enabled, and Rx Flow control is
501 		 * disabled by software override.
502 		 */
503 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
504 		break;
505 	case ixgbe_fc_full:
506 		/* Flow control (both Rx and Tx) is enabled by SW override. */
507 		fctrl_reg |= IXGBE_FCTRL_RFCE;
508 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
509 		break;
510 	default:
511 		DEBUGOUT("Flow control param set incorrectly\n");
512 		ret_val = IXGBE_ERR_CONFIG;
513 		goto out;
514 		break;
515 	}
516 
517 	/* Set 802.3x based flow control settings. */
518 	fctrl_reg |= IXGBE_FCTRL_DPF;
519 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
520 	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
521 
522 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
523 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
524 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
525 		    hw->fc.high_water[i]) {
526 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
527 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
528 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
529 			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
530 		} else {
531 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
532 			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
533 		}
534 
535 	}
536 
537 	/* Configure pause time (2 TCs per register) */
538 	reg = hw->fc.pause_time * 0x00010001;
539 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
540 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
541 
542 	/* Configure flow control refresh threshold value */
543 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
544 
545 out:
546 	return ret_val;
547 }
548 
549 /**
550  *  ixgbe_start_mac_link_82598 - Configures MAC link settings
551  *  @hw: pointer to hardware structure
552  *
553  *  Configures link settings based on values in the ixgbe_hw struct.
554  *  Restarts the link.  Performs autonegotiation if needed.
555  **/
ixgbe_start_mac_link_82598(struct ixgbe_hw * hw,bool autoneg_wait_to_complete)556 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
557 				      bool autoneg_wait_to_complete)
558 {
559 	u32 autoc_reg;
560 	u32 links_reg;
561 	u32 i;
562 	s32 status = IXGBE_SUCCESS;
563 
564 	DEBUGFUNC("ixgbe_start_mac_link_82598");
565 
566 	/* Restart link */
567 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
568 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
569 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
570 
571 	/* Only poll for autoneg to complete if specified to do so */
572 	if (autoneg_wait_to_complete) {
573 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
574 		     IXGBE_AUTOC_LMS_KX4_AN ||
575 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
576 		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
577 			links_reg = 0; /* Just in case Autoneg time = 0 */
578 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
579 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
580 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
581 					break;
582 				msec_delay(100);
583 			}
584 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
585 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
586 				DEBUGOUT("Autonegotiation did not complete.\n");
587 			}
588 		}
589 	}
590 
591 	/* Add delay to filter out noises during initial link setup */
592 	msec_delay(50);
593 
594 	return status;
595 }
596 
597 /**
598  *  ixgbe_validate_link_ready - Function looks for phy link
599  *  @hw: pointer to hardware structure
600  *
601  *  Function indicates success when phy link is available. If phy is not ready
602  *  within 5 seconds of MAC indicating link, the function returns error.
603  **/
ixgbe_validate_link_ready(struct ixgbe_hw * hw)604 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
605 {
606 	u32 timeout;
607 	u16 an_reg;
608 
609 	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
610 		return IXGBE_SUCCESS;
611 
612 	for (timeout = 0;
613 	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
614 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
615 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
616 
617 		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
618 		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
619 			break;
620 
621 		msec_delay(100);
622 	}
623 
624 	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
625 		DEBUGOUT("Link was indicated but link is down\n");
626 		return IXGBE_ERR_LINK_SETUP;
627 	}
628 
629 	return IXGBE_SUCCESS;
630 }
631 
632 /**
633  *  ixgbe_check_mac_link_82598 - Get link/speed status
634  *  @hw: pointer to hardware structure
635  *  @speed: pointer to link speed
636  *  @link_up: TRUE is link is up, FALSE otherwise
637  *  @link_up_wait_to_complete: bool used to wait for link up or not
638  *
639  *  Reads the links register to determine if link is up and the current speed
640  **/
ixgbe_check_mac_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)641 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
642 				      ixgbe_link_speed *speed, bool *link_up,
643 				      bool link_up_wait_to_complete)
644 {
645 	u32 links_reg;
646 	u32 i;
647 	u16 link_reg, adapt_comp_reg;
648 
649 	DEBUGFUNC("ixgbe_check_mac_link_82598");
650 
651 	/*
652 	 * SERDES PHY requires us to read link status from undocumented
653 	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
654 	 * indicates link down.  OxC00C is read to check that the XAUI lanes
655 	 * are active.  Bit 0 clear indicates active; set indicates inactive.
656 	 */
657 	if (hw->phy.type == ixgbe_phy_nl) {
658 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
659 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
660 		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
661 				     &adapt_comp_reg);
662 		if (link_up_wait_to_complete) {
663 			for (i = 0; i < hw->mac.max_link_up_time; i++) {
664 				if ((link_reg & 1) &&
665 				    ((adapt_comp_reg & 1) == 0)) {
666 					*link_up = TRUE;
667 					break;
668 				} else {
669 					*link_up = FALSE;
670 				}
671 				msec_delay(100);
672 				hw->phy.ops.read_reg(hw, 0xC79F,
673 						     IXGBE_TWINAX_DEV,
674 						     &link_reg);
675 				hw->phy.ops.read_reg(hw, 0xC00C,
676 						     IXGBE_TWINAX_DEV,
677 						     &adapt_comp_reg);
678 			}
679 		} else {
680 			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
681 				*link_up = TRUE;
682 			else
683 				*link_up = FALSE;
684 		}
685 
686 		if (*link_up == FALSE)
687 			goto out;
688 	}
689 
690 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
691 	if (link_up_wait_to_complete) {
692 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
693 			if (links_reg & IXGBE_LINKS_UP) {
694 				*link_up = TRUE;
695 				break;
696 			} else {
697 				*link_up = FALSE;
698 			}
699 			msec_delay(100);
700 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
701 		}
702 	} else {
703 		if (links_reg & IXGBE_LINKS_UP)
704 			*link_up = TRUE;
705 		else
706 			*link_up = FALSE;
707 	}
708 
709 	if (links_reg & IXGBE_LINKS_SPEED)
710 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
711 	else
712 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
713 
714 	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
715 	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
716 		*link_up = FALSE;
717 
718 out:
719 	return IXGBE_SUCCESS;
720 }
721 
722 /**
723  *  ixgbe_setup_mac_link_82598 - Set MAC link speed
724  *  @hw: pointer to hardware structure
725  *  @speed: new link speed
726  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
727  *
728  *  Set the link speed in the AUTOC register and restarts link.
729  **/
ixgbe_setup_mac_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)730 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
731 				      ixgbe_link_speed speed,
732 				      bool autoneg_wait_to_complete)
733 {
734 	bool autoneg = FALSE;
735 	s32 status = IXGBE_SUCCESS;
736 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
737 	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
738 	u32 autoc = curr_autoc;
739 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
740 
741 	DEBUGFUNC("ixgbe_setup_mac_link_82598");
742 
743 	/* Check to see if speed passed in is supported. */
744 	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
745 	speed &= link_capabilities;
746 
747 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
748 		status = IXGBE_ERR_LINK_SETUP;
749 
750 	/* Set KX4/KX support according to speed requested */
751 	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
752 		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
753 		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
754 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
755 			autoc |= IXGBE_AUTOC_KX4_SUPP;
756 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
757 			autoc |= IXGBE_AUTOC_KX_SUPP;
758 		if (autoc != curr_autoc)
759 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
760 	}
761 
762 	if (status == IXGBE_SUCCESS) {
763 		/*
764 		 * Setup and restart the link based on the new values in
765 		 * ixgbe_hw This will write the AUTOC register based on the new
766 		 * stored values
767 		 */
768 		status = ixgbe_start_mac_link_82598(hw,
769 						    autoneg_wait_to_complete);
770 	}
771 
772 	return status;
773 }
774 
775 
776 /**
777  *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
778  *  @hw: pointer to hardware structure
779  *  @speed: new link speed
780  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
781  *
782  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
783  **/
ixgbe_setup_copper_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)784 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
785 					 ixgbe_link_speed speed,
786 					 bool autoneg_wait_to_complete)
787 {
788 	s32 status;
789 
790 	DEBUGFUNC("ixgbe_setup_copper_link_82598");
791 
792 	/* Setup the PHY according to input speed */
793 	status = hw->phy.ops.setup_link_speed(hw, speed,
794 					      autoneg_wait_to_complete);
795 	/* Set up MAC */
796 	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
797 
798 	return status;
799 }
800 
801 /**
802  *  ixgbe_reset_hw_82598 - Performs hardware reset
803  *  @hw: pointer to hardware structure
804  *
805  *  Resets the hardware by resetting the transmit and receive units, masks and
806  *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
807  *  reset.
808  **/
ixgbe_reset_hw_82598(struct ixgbe_hw * hw)809 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
810 {
811 	s32 status = IXGBE_SUCCESS;
812 	s32 phy_status = IXGBE_SUCCESS;
813 	u32 ctrl;
814 	u32 gheccr;
815 	u32 i;
816 	u32 autoc;
817 	u8  analog_val;
818 
819 	DEBUGFUNC("ixgbe_reset_hw_82598");
820 
821 	/* Call adapter stop to disable tx/rx and clear interrupts */
822 	status = hw->mac.ops.stop_adapter(hw);
823 	if (status != IXGBE_SUCCESS)
824 		goto reset_hw_out;
825 
826 	/*
827 	 * Power up the Atlas Tx lanes if they are currently powered down.
828 	 * Atlas Tx lanes are powered down for MAC loopback tests, but
829 	 * they are not automatically restored on reset.
830 	 */
831 	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
832 	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
833 		/* Enable Tx Atlas so packets can be transmitted again */
834 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
835 					     &analog_val);
836 		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
837 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
838 					      analog_val);
839 
840 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
841 					     &analog_val);
842 		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
843 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
844 					      analog_val);
845 
846 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
847 					     &analog_val);
848 		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
849 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
850 					      analog_val);
851 
852 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
853 					     &analog_val);
854 		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
855 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
856 					      analog_val);
857 	}
858 
859 	/* Reset PHY */
860 	if (hw->phy.reset_disable == FALSE) {
861 		/* PHY ops must be identified and initialized prior to reset */
862 
863 		/* Init PHY and function pointers, perform SFP setup */
864 		phy_status = hw->phy.ops.init(hw);
865 		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
866 			goto reset_hw_out;
867 		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
868 			goto mac_reset_top;
869 
870 		hw->phy.ops.reset(hw);
871 	}
872 
873 mac_reset_top:
874 	/*
875 	 * Issue global reset to the MAC.  This needs to be a SW reset.
876 	 * If link reset is used, it might reset the MAC when mng is using it
877 	 */
878 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
879 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
880 	IXGBE_WRITE_FLUSH(hw);
881 
882 	/* Poll for reset bit to self-clear indicating reset is complete */
883 	for (i = 0; i < 10; i++) {
884 		usec_delay(1);
885 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
886 		if (!(ctrl & IXGBE_CTRL_RST))
887 			break;
888 	}
889 	if (ctrl & IXGBE_CTRL_RST) {
890 		status = IXGBE_ERR_RESET_FAILED;
891 		DEBUGOUT("Reset polling failed to complete.\n");
892 	}
893 
894 	msec_delay(50);
895 
896 	/*
897 	 * Double resets are required for recovery from certain error
898 	 * conditions.  Between resets, it is necessary to stall to allow time
899 	 * for any pending HW events to complete.
900 	 */
901 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
902 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
903 		goto mac_reset_top;
904 	}
905 
906 	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
907 	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
908 	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
909 
910 	/*
911 	 * Store the original AUTOC value if it has not been
912 	 * stored off yet.  Otherwise restore the stored original
913 	 * AUTOC value since the reset operation sets back to deaults.
914 	 */
915 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
916 	if (hw->mac.orig_link_settings_stored == FALSE) {
917 		hw->mac.orig_autoc = autoc;
918 		hw->mac.orig_link_settings_stored = TRUE;
919 	} else if (autoc != hw->mac.orig_autoc) {
920 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
921 	}
922 
923 	/* Store the permanent mac address */
924 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
925 
926 	/*
927 	 * Store MAC address from RAR0, clear receive address registers, and
928 	 * clear the multicast table
929 	 */
930 	hw->mac.ops.init_rx_addrs(hw);
931 
932 reset_hw_out:
933 	if (phy_status != IXGBE_SUCCESS)
934 		status = phy_status;
935 
936 	return status;
937 }
938 
939 /**
940  *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
941  *  @hw: pointer to hardware struct
942  *  @rar: receive address register index to associate with a VMDq index
943  *  @vmdq: VMDq set index
944  **/
ixgbe_set_vmdq_82598(struct ixgbe_hw * hw,u32 rar,u32 vmdq)945 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
946 {
947 	u32 rar_high;
948 	u32 rar_entries = hw->mac.num_rar_entries;
949 
950 	DEBUGFUNC("ixgbe_set_vmdq_82598");
951 
952 	/* Make sure we are using a valid rar index range */
953 	if (rar >= rar_entries) {
954 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
955 		return IXGBE_ERR_INVALID_ARGUMENT;
956 	}
957 
958 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
959 	rar_high &= ~IXGBE_RAH_VIND_MASK;
960 	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
961 	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
962 	return IXGBE_SUCCESS;
963 }
964 
965 /**
966  *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
967  *  @hw: pointer to hardware struct
968  *  @rar: receive address register index to associate with a VMDq index
969  *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
970  **/
ixgbe_clear_vmdq_82598(struct ixgbe_hw * hw,u32 rar,u32 vmdq)971 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
972 {
973 	u32 rar_high;
974 	u32 rar_entries = hw->mac.num_rar_entries;
975 
976 	UNREFERENCED_1PARAMETER(vmdq);
977 
978 	/* Make sure we are using a valid rar index range */
979 	if (rar >= rar_entries) {
980 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
981 		return IXGBE_ERR_INVALID_ARGUMENT;
982 	}
983 
984 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
985 	if (rar_high & IXGBE_RAH_VIND_MASK) {
986 		rar_high &= ~IXGBE_RAH_VIND_MASK;
987 		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
988 	}
989 
990 	return IXGBE_SUCCESS;
991 }
992 
993 /**
994  *  ixgbe_set_vfta_82598 - Set VLAN filter table
995  *  @hw: pointer to hardware structure
996  *  @vlan: VLAN id to write to VLAN filter
997  *  @vind: VMDq output index that maps queue to VLAN id in VFTA
998  *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
999  *
1000  *  Turn on/off specified VLAN in the VLAN filter table.
1001  **/
ixgbe_set_vfta_82598(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)1002 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1003 			 bool vlan_on)
1004 {
1005 	u32 regindex;
1006 	u32 bitindex;
1007 	u32 bits;
1008 	u32 vftabyte;
1009 
1010 	DEBUGFUNC("ixgbe_set_vfta_82598");
1011 
1012 	if (vlan > 4095)
1013 		return IXGBE_ERR_PARAM;
1014 
1015 	/* Determine 32-bit word position in array */
1016 	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
1017 
1018 	/* Determine the location of the (VMD) queue index */
1019 	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1020 	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1021 
1022 	/* Set the nibble for VMD queue index */
1023 	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1024 	bits &= (~(0x0F << bitindex));
1025 	bits |= (vind << bitindex);
1026 	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1027 
1028 	/* Determine the location of the bit for this VLAN id */
1029 	bitindex = vlan & 0x1F;   /* lower five bits */
1030 
1031 	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1032 	if (vlan_on)
1033 		/* Turn on this VLAN id */
1034 		bits |= (1 << bitindex);
1035 	else
1036 		/* Turn off this VLAN id */
1037 		bits &= ~(1 << bitindex);
1038 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1039 
1040 	return IXGBE_SUCCESS;
1041 }
1042 
1043 /**
1044  *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
1045  *  @hw: pointer to hardware structure
1046  *
1047  *  Clears the VLAN filer table, and the VMDq index associated with the filter
1048  **/
ixgbe_clear_vfta_82598(struct ixgbe_hw * hw)1049 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1050 {
1051 	u32 offset;
1052 	u32 vlanbyte;
1053 
1054 	DEBUGFUNC("ixgbe_clear_vfta_82598");
1055 
1056 	for (offset = 0; offset < hw->mac.vft_size; offset++)
1057 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1058 
1059 	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1060 		for (offset = 0; offset < hw->mac.vft_size; offset++)
1061 			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1062 					0);
1063 
1064 	return IXGBE_SUCCESS;
1065 }
1066 
1067 /**
1068  *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1069  *  @hw: pointer to hardware structure
1070  *  @reg: analog register to read
1071  *  @val: read value
1072  *
1073  *  Performs read operation to Atlas analog register specified.
1074  **/
ixgbe_read_analog_reg8_82598(struct ixgbe_hw * hw,u32 reg,u8 * val)1075 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1076 {
1077 	u32  atlas_ctl;
1078 
1079 	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1080 
1081 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1082 			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1083 	IXGBE_WRITE_FLUSH(hw);
1084 	usec_delay(10);
1085 	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1086 	*val = (u8)atlas_ctl;
1087 
1088 	return IXGBE_SUCCESS;
1089 }
1090 
1091 /**
1092  *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1093  *  @hw: pointer to hardware structure
1094  *  @reg: atlas register to write
1095  *  @val: value to write
1096  *
1097  *  Performs write operation to Atlas analog register specified.
1098  **/
ixgbe_write_analog_reg8_82598(struct ixgbe_hw * hw,u32 reg,u8 val)1099 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1100 {
1101 	u32  atlas_ctl;
1102 
1103 	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1104 
1105 	atlas_ctl = (reg << 8) | val;
1106 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1107 	IXGBE_WRITE_FLUSH(hw);
1108 	usec_delay(10);
1109 
1110 	return IXGBE_SUCCESS;
1111 }
1112 
1113 /**
1114  *  ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1115  *  @hw: pointer to hardware structure
1116  *  @dev_addr: address to read from
1117  *  @byte_offset: byte offset to read from dev_addr
1118  *  @eeprom_data: value read
1119  *
1120  *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1121  **/
ixgbe_read_i2c_phy_82598(struct ixgbe_hw * hw,u8 dev_addr,u8 byte_offset,u8 * eeprom_data)1122 static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1123 				    u8 byte_offset, u8 *eeprom_data)
1124 {
1125 	s32 status = IXGBE_SUCCESS;
1126 	u16 sfp_addr = 0;
1127 	u16 sfp_data = 0;
1128 	u16 sfp_stat = 0;
1129 	u16 gssr;
1130 	u32 i;
1131 
1132 	DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1133 
1134 	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1135 		gssr = IXGBE_GSSR_PHY1_SM;
1136 	else
1137 		gssr = IXGBE_GSSR_PHY0_SM;
1138 
1139 	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
1140 		return IXGBE_ERR_SWFW_SYNC;
1141 
1142 	if (hw->phy.type == ixgbe_phy_nl) {
1143 		/*
1144 		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1145 		 * 0xC30D. These registers are used to talk to the SFP+
1146 		 * module's EEPROM through the SDA/SCL (I2C) interface.
1147 		 */
1148 		sfp_addr = (dev_addr << 8) + byte_offset;
1149 		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1150 		hw->phy.ops.write_reg_mdi(hw,
1151 					  IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1152 					  IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1153 					  sfp_addr);
1154 
1155 		/* Poll status */
1156 		for (i = 0; i < 100; i++) {
1157 			hw->phy.ops.read_reg_mdi(hw,
1158 						IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1159 						IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1160 						&sfp_stat);
1161 			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1162 			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1163 				break;
1164 			msec_delay(10);
1165 		}
1166 
1167 		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1168 			DEBUGOUT("EEPROM read did not pass.\n");
1169 			status = IXGBE_ERR_SFP_NOT_PRESENT;
1170 			goto out;
1171 		}
1172 
1173 		/* Read data */
1174 		hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1175 					IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1176 
1177 		*eeprom_data = (u8)(sfp_data >> 8);
1178 	} else {
1179 		status = IXGBE_ERR_PHY;
1180 	}
1181 
1182 out:
1183 	hw->mac.ops.release_swfw_sync(hw, gssr);
1184 	return status;
1185 }
1186 
1187 /**
1188  *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1189  *  @hw: pointer to hardware structure
1190  *  @byte_offset: EEPROM byte offset to read
1191  *  @eeprom_data: value read
1192  *
1193  *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1194  **/
ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw * hw,u8 byte_offset,u8 * eeprom_data)1195 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1196 				u8 *eeprom_data)
1197 {
1198 	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1199 					byte_offset, eeprom_data);
1200 }
1201 
1202 /**
1203  *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1204  *  @hw: pointer to hardware structure
1205  *  @byte_offset: byte offset at address 0xA2
1206  *  @eeprom_data: value read
1207  *
1208  *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1209  **/
ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw * hw,u8 byte_offset,u8 * sff8472_data)1210 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1211 					u8 *sff8472_data)
1212 {
1213 	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1214 					byte_offset, sff8472_data);
1215 }
1216 
1217 /**
1218  *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1219  *  @hw: pointer to hardware structure
1220  *
1221  *  Determines physical layer capabilities of the current configuration.
1222  **/
ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw * hw)1223 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1224 {
1225 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1226 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1227 	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1228 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1229 	u16 ext_ability = 0;
1230 
1231 	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1232 
1233 	hw->phy.ops.identify(hw);
1234 
1235 	/* Copper PHY must be checked before AUTOC LMS to determine correct
1236 	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1237 	switch (hw->phy.type) {
1238 	case ixgbe_phy_tn:
1239 	case ixgbe_phy_cu_unknown:
1240 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1241 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1242 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1243 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1244 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1245 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1246 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1247 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1248 		goto out;
1249 	default:
1250 		break;
1251 	}
1252 
1253 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1254 	case IXGBE_AUTOC_LMS_1G_AN:
1255 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1256 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1257 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1258 		else
1259 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1260 		break;
1261 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1262 		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1263 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1264 		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1265 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1266 		else /* XAUI */
1267 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1268 		break;
1269 	case IXGBE_AUTOC_LMS_KX4_AN:
1270 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1271 		if (autoc & IXGBE_AUTOC_KX_SUPP)
1272 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1273 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1274 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1275 		break;
1276 	default:
1277 		break;
1278 	}
1279 
1280 	if (hw->phy.type == ixgbe_phy_nl) {
1281 		hw->phy.ops.identify_sfp(hw);
1282 
1283 		switch (hw->phy.sfp_type) {
1284 		case ixgbe_sfp_type_da_cu:
1285 			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1286 			break;
1287 		case ixgbe_sfp_type_sr:
1288 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1289 			break;
1290 		case ixgbe_sfp_type_lr:
1291 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1292 			break;
1293 		default:
1294 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1295 			break;
1296 		}
1297 	}
1298 
1299 	switch (hw->device_id) {
1300 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1301 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1302 		break;
1303 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1304 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1305 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1306 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1307 		break;
1308 	case IXGBE_DEV_ID_82598EB_XF_LR:
1309 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1310 		break;
1311 	default:
1312 		break;
1313 	}
1314 
1315 out:
1316 	return physical_layer;
1317 }
1318 
1319 /**
1320  *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1321  *  port devices.
1322  *  @hw: pointer to the HW structure
1323  *
1324  *  Calls common function and corrects issue with some single port devices
1325  *  that enable LAN1 but not LAN0.
1326  **/
ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw * hw)1327 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1328 {
1329 	struct ixgbe_bus_info *bus = &hw->bus;
1330 	u16 pci_gen = 0;
1331 	u16 pci_ctrl2 = 0;
1332 
1333 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1334 
1335 	ixgbe_set_lan_id_multi_port_pcie(hw);
1336 
1337 	/* check if LAN0 is disabled */
1338 	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1339 	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1340 
1341 		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1342 
1343 		/* if LAN0 is completely disabled force function to 0 */
1344 		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1345 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1346 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1347 
1348 			bus->func = 0;
1349 		}
1350 	}
1351 }
1352 
1353 /**
1354  *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1355  *  @hw: pointer to hardware structure
1356  *
1357  **/
ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw * hw)1358 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1359 {
1360 	u32 regval;
1361 	u32 i;
1362 
1363 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1364 
1365 	/* Enable relaxed ordering */
1366 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1367 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1368 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1369 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1370 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1371 	}
1372 
1373 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1374 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1375 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1376 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1377 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1378 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1379 	}
1380 
1381 }
1382 
1383 /**
1384  * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1385  * @hw: pointer to hardware structure
1386  * @num_pb: number of packet buffers to allocate
1387  * @headroom: reserve n KB of headroom
1388  * @strategy: packet buffer allocation strategy
1389  **/
ixgbe_set_rxpba_82598(struct ixgbe_hw * hw,int num_pb,u32 headroom,int strategy)1390 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1391 				  u32 headroom, int strategy)
1392 {
1393 	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1394 	u8 i = 0;
1395 	UNREFERENCED_1PARAMETER(headroom);
1396 
1397 	if (!num_pb)
1398 		return;
1399 
1400 	/* Setup Rx packet buffer sizes */
1401 	switch (strategy) {
1402 	case PBA_STRATEGY_WEIGHTED:
1403 		/* Setup the first four at 80KB */
1404 		rxpktsize = IXGBE_RXPBSIZE_80KB;
1405 		for (; i < 4; i++)
1406 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1407 		/* Setup the last four at 48KB...don't re-init i */
1408 		rxpktsize = IXGBE_RXPBSIZE_48KB;
1409 		/* Fall Through */
1410 	case PBA_STRATEGY_EQUAL:
1411 	default:
1412 		/* Divide the remaining Rx packet buffer evenly among the TCs */
1413 		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1414 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1415 		break;
1416 	}
1417 
1418 	/* Setup Tx packet buffer sizes */
1419 	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1420 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1421 }
1422 
1423 /**
1424  *  ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
1425  *  @hw: pointer to hardware structure
1426  *  @regval: register value to write to RXCTRL
1427  *
1428  *  Enables the Rx DMA unit
1429  **/
ixgbe_enable_rx_dma_82598(struct ixgbe_hw * hw,u32 regval)1430 s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
1431 {
1432 	DEBUGFUNC("ixgbe_enable_rx_dma_82598");
1433 
1434 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
1435 
1436 	return IXGBE_SUCCESS;
1437 }
1438