xref: /freebsd/sys/dev/ixgbe/ixgbe_82598.c (revision 71625ec9ad2a9bc8c09784fbd23b759830e0ee5f)
1 /******************************************************************************
2   SPDX-License-Identifier: BSD-3-Clause
3 
4   Copyright (c) 2001-2020, Intel Corporation
5   All rights reserved.
6 
7   Redistribution and use in source and binary forms, with or without
8   modification, are permitted provided that the following conditions are met:
9 
10    1. Redistributions of source code must retain the above copyright notice,
11       this list of conditions and the following disclaimer.
12 
13    2. Redistributions in binary form must reproduce the above copyright
14       notice, this list of conditions and the following disclaimer in the
15       documentation and/or other materials provided with the distribution.
16 
17    3. Neither the name of the Intel Corporation nor the names of its
18       contributors may be used to endorse or promote products derived from
19       this software without specific prior written permission.
20 
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32 
33 ******************************************************************************/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_82598.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 
41 #define IXGBE_82598_MAX_TX_QUEUES 32
42 #define IXGBE_82598_MAX_RX_QUEUES 64
43 #define IXGBE_82598_RAR_ENTRIES   16
44 #define IXGBE_82598_MC_TBL_SIZE  128
45 #define IXGBE_82598_VFT_TBL_SIZE 128
46 #define IXGBE_82598_RX_PB_SIZE   512
47 
48 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
49 					     ixgbe_link_speed *speed,
50 					     bool *autoneg);
51 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
52 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
53 				      bool autoneg_wait_to_complete);
54 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
55 				      ixgbe_link_speed *speed, bool *link_up,
56 				      bool link_up_wait_to_complete);
57 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
58 				      ixgbe_link_speed speed,
59 				      bool autoneg_wait_to_complete);
60 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
61 					 ixgbe_link_speed speed,
62 					 bool autoneg_wait_to_complete);
63 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
64 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
65 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
66 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
67 				  u32 headroom, int strategy);
68 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
69 					u8 *sff8472_data);
70 /**
71  * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
72  * @hw: pointer to the HW structure
73  *
74  * The defaults for 82598 should be in the range of 50us to 50ms,
75  * however the hardware default for these parts is 500us to 1ms which is less
76  * than the 10ms recommended by the pci-e spec.  To address this we need to
77  * increase the value to either 10ms to 250ms for capability version 1 config,
78  * or 16ms to 55ms for version 2.
79  **/
ixgbe_set_pcie_completion_timeout(struct ixgbe_hw * hw)80 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
81 {
82 	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
83 	u16 pcie_devctl2;
84 
85 	/* only take action if timeout value is defaulted to 0 */
86 	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
87 		goto out;
88 
89 	/*
90 	 * if capababilities version is type 1 we can write the
91 	 * timeout of 10ms to 250ms through the GCR register
92 	 */
93 	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
94 		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
95 		goto out;
96 	}
97 
98 	/*
99 	 * for version 2 capabilities we need to write the config space
100 	 * directly in order to set the completion timeout value for
101 	 * 16ms to 55ms
102 	 */
103 	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
104 	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
105 	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
106 out:
107 	/* disable completion timeout resend */
108 	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
109 	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
110 }
111 
112 /**
113  * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
114  * @hw: pointer to hardware structure
115  *
116  * Initialize the function pointers and assign the MAC type for 82598.
117  * Does not touch the hardware.
118  **/
ixgbe_init_ops_82598(struct ixgbe_hw * hw)119 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
120 {
121 	struct ixgbe_mac_info *mac = &hw->mac;
122 	struct ixgbe_phy_info *phy = &hw->phy;
123 	s32 ret_val;
124 
125 	DEBUGFUNC("ixgbe_init_ops_82598");
126 
127 	ret_val = ixgbe_init_phy_ops_generic(hw);
128 	ret_val = ixgbe_init_ops_generic(hw);
129 
130 	/* PHY */
131 	phy->ops.init = ixgbe_init_phy_ops_82598;
132 
133 	/* MAC */
134 	mac->ops.start_hw = ixgbe_start_hw_82598;
135 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
136 	mac->ops.reset_hw = ixgbe_reset_hw_82598;
137 	mac->ops.get_media_type = ixgbe_get_media_type_82598;
138 	mac->ops.get_supported_physical_layer =
139 				ixgbe_get_supported_physical_layer_82598;
140 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
141 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
142 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
143 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
144 
145 	/* RAR, Multicast, VLAN */
146 	mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
147 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
148 	mac->ops.set_vfta = ixgbe_set_vfta_82598;
149 	mac->ops.set_vlvf = NULL;
150 	mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
151 
152 	/* Flow Control */
153 	mac->ops.fc_enable = ixgbe_fc_enable_82598;
154 
155 	mac->mcft_size		= IXGBE_82598_MC_TBL_SIZE;
156 	mac->vft_size		= IXGBE_82598_VFT_TBL_SIZE;
157 	mac->num_rar_entries	= IXGBE_82598_RAR_ENTRIES;
158 	mac->rx_pb_size		= IXGBE_82598_RX_PB_SIZE;
159 	mac->max_rx_queues	= IXGBE_82598_MAX_RX_QUEUES;
160 	mac->max_tx_queues	= IXGBE_82598_MAX_TX_QUEUES;
161 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
162 
163 	/* SFP+ Module */
164 	phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
165 	phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
166 
167 	/* Link */
168 	mac->ops.check_link = ixgbe_check_mac_link_82598;
169 	mac->ops.setup_link = ixgbe_setup_mac_link_82598;
170 	mac->ops.flap_tx_laser = NULL;
171 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
172 	mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
173 
174 	/* Manageability interface */
175 	mac->ops.set_fw_drv_ver = NULL;
176 
177 	mac->ops.get_rtrup2tc = NULL;
178 
179 	return ret_val;
180 }
181 
182 /**
183  * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
184  * @hw: pointer to hardware structure
185  *
186  * Initialize any function pointers that were not able to be
187  * set during init_shared_code because the PHY/SFP type was
188  * not known.  Perform the SFP init if necessary.
189  *
190  **/
ixgbe_init_phy_ops_82598(struct ixgbe_hw * hw)191 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
192 {
193 	struct ixgbe_mac_info *mac = &hw->mac;
194 	struct ixgbe_phy_info *phy = &hw->phy;
195 	s32 ret_val = IXGBE_SUCCESS;
196 	u16 list_offset, data_offset;
197 
198 	DEBUGFUNC("ixgbe_init_phy_ops_82598");
199 
200 	/* Identify the PHY */
201 	phy->ops.identify(hw);
202 
203 	/* Overwrite the link function pointers if copper PHY */
204 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
205 		mac->ops.setup_link = ixgbe_setup_copper_link_82598;
206 		mac->ops.get_link_capabilities =
207 				ixgbe_get_copper_link_capabilities_generic;
208 	}
209 
210 	switch (hw->phy.type) {
211 	case ixgbe_phy_tn:
212 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
213 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
214 		phy->ops.get_firmware_version =
215 					ixgbe_get_phy_firmware_version_tnx;
216 		break;
217 	case ixgbe_phy_nl:
218 		phy->ops.reset = ixgbe_reset_phy_nl;
219 
220 		/* Call SFP+ identify routine to get the SFP+ module type */
221 		ret_val = phy->ops.identify_sfp(hw);
222 		if (ret_val != IXGBE_SUCCESS)
223 			goto out;
224 		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
225 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
226 			goto out;
227 		}
228 
229 		/* Check to see if SFP+ module is supported */
230 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
231 							      &list_offset,
232 							      &data_offset);
233 		if (ret_val != IXGBE_SUCCESS) {
234 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
235 			goto out;
236 		}
237 		break;
238 	default:
239 		break;
240 	}
241 
242 out:
243 	return ret_val;
244 }
245 
246 /**
247  * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
248  * @hw: pointer to hardware structure
249  *
250  * Starts the hardware using the generic start_hw function.
251  * Disables relaxed ordering Then set pcie completion timeout
252  *
253  **/
ixgbe_start_hw_82598(struct ixgbe_hw * hw)254 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
255 {
256 	u32 regval;
257 	u32 i;
258 	s32 ret_val = IXGBE_SUCCESS;
259 
260 	DEBUGFUNC("ixgbe_start_hw_82598");
261 
262 	ret_val = ixgbe_start_hw_generic(hw);
263 	if (ret_val)
264 		return ret_val;
265 
266 	/* Disable relaxed ordering */
267 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
268 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
269 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
270 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
271 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
272 	}
273 
274 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
275 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
276 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
277 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
278 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
279 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
280 	}
281 
282 	/* set the completion timeout for interface */
283 	ixgbe_set_pcie_completion_timeout(hw);
284 
285 	return ret_val;
286 }
287 
288 /**
289  * ixgbe_get_link_capabilities_82598 - Determines link capabilities
290  * @hw: pointer to hardware structure
291  * @speed: pointer to link speed
292  * @autoneg: boolean auto-negotiation value
293  *
294  * Determines the link capabilities by reading the AUTOC register.
295  **/
ixgbe_get_link_capabilities_82598(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)296 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
297 					     ixgbe_link_speed *speed,
298 					     bool *autoneg)
299 {
300 	s32 status = IXGBE_SUCCESS;
301 	u32 autoc = 0;
302 
303 	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
304 
305 	/*
306 	 * Determine link capabilities based on the stored value of AUTOC,
307 	 * which represents EEPROM defaults.  If AUTOC value has not been
308 	 * stored, use the current register value.
309 	 */
310 	if (hw->mac.orig_link_settings_stored)
311 		autoc = hw->mac.orig_autoc;
312 	else
313 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
314 
315 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
316 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
317 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
318 		*autoneg = false;
319 		break;
320 
321 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
322 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
323 		*autoneg = false;
324 		break;
325 
326 	case IXGBE_AUTOC_LMS_1G_AN:
327 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
328 		*autoneg = true;
329 		break;
330 
331 	case IXGBE_AUTOC_LMS_KX4_AN:
332 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
333 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
334 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
335 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
336 		if (autoc & IXGBE_AUTOC_KX_SUPP)
337 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
338 		*autoneg = true;
339 		break;
340 
341 	default:
342 		status = IXGBE_ERR_LINK_SETUP;
343 		break;
344 	}
345 
346 	return status;
347 }
348 
349 /**
350  * ixgbe_get_media_type_82598 - Determines media type
351  * @hw: pointer to hardware structure
352  *
353  * Returns the media type (fiber, copper, backplane)
354  **/
ixgbe_get_media_type_82598(struct ixgbe_hw * hw)355 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
356 {
357 	enum ixgbe_media_type media_type;
358 
359 	DEBUGFUNC("ixgbe_get_media_type_82598");
360 
361 	/* Detect if there is a copper PHY attached. */
362 	switch (hw->phy.type) {
363 	case ixgbe_phy_cu_unknown:
364 	case ixgbe_phy_tn:
365 		media_type = ixgbe_media_type_copper;
366 		goto out;
367 	default:
368 		break;
369 	}
370 
371 	/* Media type for I82598 is based on device ID */
372 	switch (hw->device_id) {
373 	case IXGBE_DEV_ID_82598:
374 	case IXGBE_DEV_ID_82598_BX:
375 		/* Default device ID is mezzanine card KX/KX4 */
376 		media_type = ixgbe_media_type_backplane;
377 		break;
378 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
379 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
380 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
381 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
382 	case IXGBE_DEV_ID_82598EB_XF_LR:
383 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
384 		media_type = ixgbe_media_type_fiber;
385 		break;
386 	case IXGBE_DEV_ID_82598EB_CX4:
387 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
388 		media_type = ixgbe_media_type_cx4;
389 		break;
390 	case IXGBE_DEV_ID_82598AT:
391 	case IXGBE_DEV_ID_82598AT2:
392 		media_type = ixgbe_media_type_copper;
393 		break;
394 	default:
395 		media_type = ixgbe_media_type_unknown;
396 		break;
397 	}
398 out:
399 	return media_type;
400 }
401 
402 /**
403  * ixgbe_fc_enable_82598 - Enable flow control
404  * @hw: pointer to hardware structure
405  *
406  * Enable flow control according to the current settings.
407  **/
ixgbe_fc_enable_82598(struct ixgbe_hw * hw)408 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
409 {
410 	s32 ret_val = IXGBE_SUCCESS;
411 	u32 fctrl_reg;
412 	u32 rmcs_reg;
413 	u32 reg;
414 	u32 fcrtl, fcrth;
415 	u32 link_speed = 0;
416 	int i;
417 	bool link_up;
418 
419 	DEBUGFUNC("ixgbe_fc_enable_82598");
420 
421 	/* Validate the water mark configuration */
422 	if (!hw->fc.pause_time) {
423 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
424 		goto out;
425 	}
426 
427 	/* Low water mark of zero causes XOFF floods */
428 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
429 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
430 		    hw->fc.high_water[i]) {
431 			if (!hw->fc.low_water[i] ||
432 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
433 				DEBUGOUT("Invalid water mark configuration\n");
434 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
435 				goto out;
436 			}
437 		}
438 	}
439 
440 	/*
441 	 * On 82598 having Rx FC on causes resets while doing 1G
442 	 * so if it's on turn it off once we know link_speed. For
443 	 * more details see 82598 Specification update.
444 	 */
445 	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
446 	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
447 		switch (hw->fc.requested_mode) {
448 		case ixgbe_fc_full:
449 			hw->fc.requested_mode = ixgbe_fc_tx_pause;
450 			break;
451 		case ixgbe_fc_rx_pause:
452 			hw->fc.requested_mode = ixgbe_fc_none;
453 			break;
454 		default:
455 			/* no change */
456 			break;
457 		}
458 	}
459 
460 	/* Negotiate the fc mode to use */
461 	ixgbe_fc_autoneg(hw);
462 
463 	/* Disable any previous flow control settings */
464 	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
465 	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
466 
467 	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
468 	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
469 
470 	/*
471 	 * The possible values of fc.current_mode are:
472 	 * 0: Flow control is completely disabled
473 	 * 1: Rx flow control is enabled (we can receive pause frames,
474 	 *    but not send pause frames).
475 	 * 2: Tx flow control is enabled (we can send pause frames but
476 	 *     we do not support receiving pause frames).
477 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
478 	 * other: Invalid.
479 	 */
480 	switch (hw->fc.current_mode) {
481 	case ixgbe_fc_none:
482 		/*
483 		 * Flow control is disabled by software override or autoneg.
484 		 * The code below will actually disable it in the HW.
485 		 */
486 		break;
487 	case ixgbe_fc_rx_pause:
488 		/*
489 		 * Rx Flow control is enabled and Tx Flow control is
490 		 * disabled by software override. Since there really
491 		 * isn't a way to advertise that we are capable of RX
492 		 * Pause ONLY, we will advertise that we support both
493 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
494 		 * disable the adapter's ability to send PAUSE frames.
495 		 */
496 		fctrl_reg |= IXGBE_FCTRL_RFCE;
497 		break;
498 	case ixgbe_fc_tx_pause:
499 		/*
500 		 * Tx Flow control is enabled, and Rx Flow control is
501 		 * disabled by software override.
502 		 */
503 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
504 		break;
505 	case ixgbe_fc_full:
506 		/* Flow control (both Rx and Tx) is enabled by SW override. */
507 		fctrl_reg |= IXGBE_FCTRL_RFCE;
508 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
509 		break;
510 	default:
511 		DEBUGOUT("Flow control param set incorrectly\n");
512 		ret_val = IXGBE_ERR_CONFIG;
513 		goto out;
514 		break;
515 	}
516 
517 	/* Set 802.3x based flow control settings. */
518 	fctrl_reg |= IXGBE_FCTRL_DPF;
519 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
520 	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
521 
522 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
523 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
524 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
525 		    hw->fc.high_water[i]) {
526 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
527 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
528 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
529 			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
530 		} else {
531 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
532 			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
533 		}
534 
535 	}
536 
537 	/* Configure pause time (2 TCs per register) */
538 	reg = hw->fc.pause_time * 0x00010001;
539 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
540 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
541 
542 	/* Configure flow control refresh threshold value */
543 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
544 
545 out:
546 	return ret_val;
547 }
548 
549 /**
550  * ixgbe_start_mac_link_82598 - Configures MAC link settings
551  * @hw: pointer to hardware structure
552  * @autoneg_wait_to_complete: true when waiting for completion is needed
553  *
554  * Configures link settings based on values in the ixgbe_hw struct.
555  * Restarts the link.  Performs autonegotiation if needed.
556  **/
ixgbe_start_mac_link_82598(struct ixgbe_hw * hw,bool autoneg_wait_to_complete)557 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
558 				      bool autoneg_wait_to_complete)
559 {
560 	u32 autoc_reg;
561 	u32 links_reg;
562 	u32 i;
563 	s32 status = IXGBE_SUCCESS;
564 
565 	DEBUGFUNC("ixgbe_start_mac_link_82598");
566 
567 	/* Restart link */
568 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
569 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
570 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
571 
572 	/* Only poll for autoneg to complete if specified to do so */
573 	if (autoneg_wait_to_complete) {
574 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
575 		     IXGBE_AUTOC_LMS_KX4_AN ||
576 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
577 		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
578 			links_reg = 0; /* Just in case Autoneg time = 0 */
579 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
580 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
581 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
582 					break;
583 				msec_delay(100);
584 			}
585 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
586 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
587 				DEBUGOUT("Autonegotiation did not complete.\n");
588 			}
589 		}
590 	}
591 
592 	/* Add delay to filter out noises during initial link setup */
593 	msec_delay(50);
594 
595 	return status;
596 }
597 
598 /**
599  * ixgbe_validate_link_ready - Function looks for phy link
600  * @hw: pointer to hardware structure
601  *
602  * Function indicates success when phy link is available. If phy is not ready
603  * within 5 seconds of MAC indicating link, the function returns error.
604  **/
ixgbe_validate_link_ready(struct ixgbe_hw * hw)605 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
606 {
607 	u32 timeout;
608 	u16 an_reg;
609 
610 	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
611 		return IXGBE_SUCCESS;
612 
613 	for (timeout = 0;
614 	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
615 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
616 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
617 
618 		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
619 		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
620 			break;
621 
622 		msec_delay(100);
623 	}
624 
625 	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
626 		DEBUGOUT("Link was indicated but link is down\n");
627 		return IXGBE_ERR_LINK_SETUP;
628 	}
629 
630 	return IXGBE_SUCCESS;
631 }
632 
633 /**
634  * ixgbe_check_mac_link_82598 - Get link/speed status
635  * @hw: pointer to hardware structure
636  * @speed: pointer to link speed
637  * @link_up: true is link is up, false otherwise
638  * @link_up_wait_to_complete: bool used to wait for link up or not
639  *
640  * Reads the links register to determine if link is up and the current speed
641  **/
ixgbe_check_mac_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)642 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
643 				      ixgbe_link_speed *speed, bool *link_up,
644 				      bool link_up_wait_to_complete)
645 {
646 	u32 links_reg;
647 	u32 i;
648 	u16 link_reg, adapt_comp_reg;
649 
650 	DEBUGFUNC("ixgbe_check_mac_link_82598");
651 
652 	/*
653 	 * SERDES PHY requires us to read link status from undocumented
654 	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
655 	 * indicates link down.  OxC00C is read to check that the XAUI lanes
656 	 * are active.  Bit 0 clear indicates active; set indicates inactive.
657 	 */
658 	if (hw->phy.type == ixgbe_phy_nl) {
659 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
660 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
661 		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
662 				     &adapt_comp_reg);
663 		if (link_up_wait_to_complete) {
664 			for (i = 0; i < hw->mac.max_link_up_time; i++) {
665 				if ((link_reg & 1) &&
666 				    ((adapt_comp_reg & 1) == 0)) {
667 					*link_up = true;
668 					break;
669 				} else {
670 					*link_up = false;
671 				}
672 				msec_delay(100);
673 				hw->phy.ops.read_reg(hw, 0xC79F,
674 						     IXGBE_TWINAX_DEV,
675 						     &link_reg);
676 				hw->phy.ops.read_reg(hw, 0xC00C,
677 						     IXGBE_TWINAX_DEV,
678 						     &adapt_comp_reg);
679 			}
680 		} else {
681 			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
682 				*link_up = true;
683 			else
684 				*link_up = false;
685 		}
686 
687 		if (*link_up == false)
688 			goto out;
689 	}
690 
691 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
692 	if (link_up_wait_to_complete) {
693 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
694 			if (links_reg & IXGBE_LINKS_UP) {
695 				*link_up = true;
696 				break;
697 			} else {
698 				*link_up = false;
699 			}
700 			msec_delay(100);
701 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
702 		}
703 	} else {
704 		if (links_reg & IXGBE_LINKS_UP)
705 			*link_up = true;
706 		else
707 			*link_up = false;
708 	}
709 
710 	if (links_reg & IXGBE_LINKS_SPEED)
711 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
712 	else
713 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
714 
715 	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
716 	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
717 		*link_up = false;
718 
719 out:
720 	return IXGBE_SUCCESS;
721 }
722 
723 /**
724  * ixgbe_setup_mac_link_82598 - Set MAC link speed
725  * @hw: pointer to hardware structure
726  * @speed: new link speed
727  * @autoneg_wait_to_complete: true when waiting for completion is needed
728  *
729  * Set the link speed in the AUTOC register and restarts link.
730  **/
ixgbe_setup_mac_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)731 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
732 				      ixgbe_link_speed speed,
733 				      bool autoneg_wait_to_complete)
734 {
735 	bool autoneg = false;
736 	s32 status = IXGBE_SUCCESS;
737 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
738 	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
739 	u32 autoc = curr_autoc;
740 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
741 
742 	DEBUGFUNC("ixgbe_setup_mac_link_82598");
743 
744 	/* Check to see if speed passed in is supported. */
745 	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
746 	speed &= link_capabilities;
747 
748 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
749 		status = IXGBE_ERR_LINK_SETUP;
750 
751 	/* Set KX4/KX support according to speed requested */
752 	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
753 		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
754 		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
755 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
756 			autoc |= IXGBE_AUTOC_KX4_SUPP;
757 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
758 			autoc |= IXGBE_AUTOC_KX_SUPP;
759 		if (autoc != curr_autoc)
760 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
761 	}
762 
763 	if (status == IXGBE_SUCCESS) {
764 		/*
765 		 * Setup and restart the link based on the new values in
766 		 * ixgbe_hw This will write the AUTOC register based on the new
767 		 * stored values
768 		 */
769 		status = ixgbe_start_mac_link_82598(hw,
770 						    autoneg_wait_to_complete);
771 	}
772 
773 	return status;
774 }
775 
776 
777 /**
778  * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
779  * @hw: pointer to hardware structure
780  * @speed: new link speed
781  * @autoneg_wait_to_complete: true if waiting is needed to complete
782  *
783  * Sets the link speed in the AUTOC register in the MAC and restarts link.
784  **/
ixgbe_setup_copper_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)785 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
786 					 ixgbe_link_speed speed,
787 					 bool autoneg_wait_to_complete)
788 {
789 	s32 status;
790 
791 	DEBUGFUNC("ixgbe_setup_copper_link_82598");
792 
793 	/* Setup the PHY according to input speed */
794 	status = hw->phy.ops.setup_link_speed(hw, speed,
795 					      autoneg_wait_to_complete);
796 	/* Set up MAC */
797 	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
798 
799 	return status;
800 }
801 
802 /**
803  * ixgbe_reset_hw_82598 - Performs hardware reset
804  * @hw: pointer to hardware structure
805  *
806  * Resets the hardware by resetting the transmit and receive units, masks and
807  * clears all interrupts, performing a PHY reset, and performing a link (MAC)
808  * reset.
809  **/
ixgbe_reset_hw_82598(struct ixgbe_hw * hw)810 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
811 {
812 	s32 status = IXGBE_SUCCESS;
813 	s32 phy_status = IXGBE_SUCCESS;
814 	u32 ctrl;
815 	u32 gheccr;
816 	u32 i;
817 	u32 autoc;
818 	u8  analog_val;
819 
820 	DEBUGFUNC("ixgbe_reset_hw_82598");
821 
822 	/* Call adapter stop to disable tx/rx and clear interrupts */
823 	status = hw->mac.ops.stop_adapter(hw);
824 	if (status != IXGBE_SUCCESS)
825 		goto reset_hw_out;
826 
827 	/*
828 	 * Power up the Atlas Tx lanes if they are currently powered down.
829 	 * Atlas Tx lanes are powered down for MAC loopback tests, but
830 	 * they are not automatically restored on reset.
831 	 */
832 	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
833 	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
834 		/* Enable Tx Atlas so packets can be transmitted again */
835 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
836 					     &analog_val);
837 		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
838 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
839 					      analog_val);
840 
841 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
842 					     &analog_val);
843 		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
844 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
845 					      analog_val);
846 
847 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
848 					     &analog_val);
849 		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
850 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
851 					      analog_val);
852 
853 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
854 					     &analog_val);
855 		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
856 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
857 					      analog_val);
858 	}
859 
860 	/* Reset PHY */
861 	if (hw->phy.reset_disable == false) {
862 		/* PHY ops must be identified and initialized prior to reset */
863 
864 		/* Init PHY and function pointers, perform SFP setup */
865 		phy_status = hw->phy.ops.init(hw);
866 		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
867 			goto reset_hw_out;
868 		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
869 			goto mac_reset_top;
870 
871 		hw->phy.ops.reset(hw);
872 	}
873 
874 mac_reset_top:
875 	/*
876 	 * Issue global reset to the MAC.  This needs to be a SW reset.
877 	 * If link reset is used, it might reset the MAC when mng is using it
878 	 */
879 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
880 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
881 	IXGBE_WRITE_FLUSH(hw);
882 
883 	/* Poll for reset bit to self-clear indicating reset is complete */
884 	for (i = 0; i < 10; i++) {
885 		usec_delay(1);
886 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
887 		if (!(ctrl & IXGBE_CTRL_RST))
888 			break;
889 	}
890 	if (ctrl & IXGBE_CTRL_RST) {
891 		status = IXGBE_ERR_RESET_FAILED;
892 		DEBUGOUT("Reset polling failed to complete.\n");
893 	}
894 
895 	msec_delay(50);
896 
897 	/*
898 	 * Double resets are required for recovery from certain error
899 	 * conditions.  Between resets, it is necessary to stall to allow time
900 	 * for any pending HW events to complete.
901 	 */
902 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
903 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
904 		goto mac_reset_top;
905 	}
906 
907 	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
908 	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
909 	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
910 
911 	/*
912 	 * Store the original AUTOC value if it has not been
913 	 * stored off yet.  Otherwise restore the stored original
914 	 * AUTOC value since the reset operation sets back to deaults.
915 	 */
916 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
917 	if (hw->mac.orig_link_settings_stored == false) {
918 		hw->mac.orig_autoc = autoc;
919 		hw->mac.orig_link_settings_stored = true;
920 	} else if (autoc != hw->mac.orig_autoc) {
921 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
922 	}
923 
924 	/* Store the permanent mac address */
925 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
926 
927 	/*
928 	 * Store MAC address from RAR0, clear receive address registers, and
929 	 * clear the multicast table
930 	 */
931 	hw->mac.ops.init_rx_addrs(hw);
932 
933 reset_hw_out:
934 	if (phy_status != IXGBE_SUCCESS)
935 		status = phy_status;
936 
937 	return status;
938 }
939 
940 /**
941  * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
942  * @hw: pointer to hardware struct
943  * @rar: receive address register index to associate with a VMDq index
944  * @vmdq: VMDq set index
945  **/
ixgbe_set_vmdq_82598(struct ixgbe_hw * hw,u32 rar,u32 vmdq)946 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
947 {
948 	u32 rar_high;
949 	u32 rar_entries = hw->mac.num_rar_entries;
950 
951 	DEBUGFUNC("ixgbe_set_vmdq_82598");
952 
953 	/* Make sure we are using a valid rar index range */
954 	if (rar >= rar_entries) {
955 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
956 		return IXGBE_ERR_INVALID_ARGUMENT;
957 	}
958 
959 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
960 	rar_high &= ~IXGBE_RAH_VIND_MASK;
961 	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
962 	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
963 	return IXGBE_SUCCESS;
964 }
965 
966 /**
967  * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
968  * @hw: pointer to hardware struct
969  * @rar: receive address register index to associate with a VMDq index
970  * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
971  **/
ixgbe_clear_vmdq_82598(struct ixgbe_hw * hw,u32 rar,u32 vmdq)972 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
973 {
974 	u32 rar_high;
975 	u32 rar_entries = hw->mac.num_rar_entries;
976 
977 	UNREFERENCED_1PARAMETER(vmdq);
978 
979 	/* Make sure we are using a valid rar index range */
980 	if (rar >= rar_entries) {
981 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
982 		return IXGBE_ERR_INVALID_ARGUMENT;
983 	}
984 
985 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
986 	if (rar_high & IXGBE_RAH_VIND_MASK) {
987 		rar_high &= ~IXGBE_RAH_VIND_MASK;
988 		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
989 	}
990 
991 	return IXGBE_SUCCESS;
992 }
993 
994 /**
995  * ixgbe_set_vfta_82598 - Set VLAN filter table
996  * @hw: pointer to hardware structure
997  * @vlan: VLAN id to write to VLAN filter
998  * @vind: VMDq output index that maps queue to VLAN id in VFTA
999  * @vlan_on: boolean flag to turn on/off VLAN in VFTA
1000  * @vlvf_bypass: boolean flag - unused
1001  *
1002  * Turn on/off specified VLAN in the VLAN filter table.
1003  **/
ixgbe_set_vfta_82598(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on,bool vlvf_bypass)1004 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1005 			 bool vlan_on, bool vlvf_bypass)
1006 {
1007 	u32 regindex;
1008 	u32 bitindex;
1009 	u32 bits;
1010 	u32 vftabyte;
1011 
1012 	UNREFERENCED_1PARAMETER(vlvf_bypass);
1013 
1014 	DEBUGFUNC("ixgbe_set_vfta_82598");
1015 
1016 	if (vlan > 4095)
1017 		return IXGBE_ERR_PARAM;
1018 
1019 	/* Determine 32-bit word position in array */
1020 	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
1021 
1022 	/* Determine the location of the (VMD) queue index */
1023 	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1024 	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1025 
1026 	/* Set the nibble for VMD queue index */
1027 	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1028 	bits &= (~(0x0F << bitindex));
1029 	bits |= (vind << bitindex);
1030 	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1031 
1032 	/* Determine the location of the bit for this VLAN id */
1033 	bitindex = vlan & 0x1F;   /* lower five bits */
1034 
1035 	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1036 	if (vlan_on)
1037 		/* Turn on this VLAN id */
1038 		bits |= (1 << bitindex);
1039 	else
1040 		/* Turn off this VLAN id */
1041 		bits &= ~(1 << bitindex);
1042 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1043 
1044 	return IXGBE_SUCCESS;
1045 }
1046 
1047 /**
1048  * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1049  * @hw: pointer to hardware structure
1050  *
1051  * Clears the VLAN filter table, and the VMDq index associated with the filter
1052  **/
ixgbe_clear_vfta_82598(struct ixgbe_hw * hw)1053 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1054 {
1055 	u32 offset;
1056 	u32 vlanbyte;
1057 
1058 	DEBUGFUNC("ixgbe_clear_vfta_82598");
1059 
1060 	for (offset = 0; offset < hw->mac.vft_size; offset++)
1061 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1062 
1063 	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1064 		for (offset = 0; offset < hw->mac.vft_size; offset++)
1065 			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1066 					0);
1067 
1068 	return IXGBE_SUCCESS;
1069 }
1070 
1071 /**
1072  * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1073  * @hw: pointer to hardware structure
1074  * @reg: analog register to read
1075  * @val: read value
1076  *
1077  * Performs read operation to Atlas analog register specified.
1078  **/
ixgbe_read_analog_reg8_82598(struct ixgbe_hw * hw,u32 reg,u8 * val)1079 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1080 {
1081 	u32  atlas_ctl;
1082 
1083 	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1084 
1085 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1086 			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1087 	IXGBE_WRITE_FLUSH(hw);
1088 	usec_delay(10);
1089 	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1090 	*val = (u8)atlas_ctl;
1091 
1092 	return IXGBE_SUCCESS;
1093 }
1094 
1095 /**
1096  * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1097  * @hw: pointer to hardware structure
1098  * @reg: atlas register to write
1099  * @val: value to write
1100  *
1101  * Performs write operation to Atlas analog register specified.
1102  **/
ixgbe_write_analog_reg8_82598(struct ixgbe_hw * hw,u32 reg,u8 val)1103 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1104 {
1105 	u32  atlas_ctl;
1106 
1107 	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1108 
1109 	atlas_ctl = (reg << 8) | val;
1110 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1111 	IXGBE_WRITE_FLUSH(hw);
1112 	usec_delay(10);
1113 
1114 	return IXGBE_SUCCESS;
1115 }
1116 
1117 /**
1118  * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1119  * @hw: pointer to hardware structure
1120  * @dev_addr: address to read from
1121  * @byte_offset: byte offset to read from dev_addr
1122  * @eeprom_data: value read
1123  *
1124  * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1125  **/
ixgbe_read_i2c_phy_82598(struct ixgbe_hw * hw,u8 dev_addr,u8 byte_offset,u8 * eeprom_data)1126 static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1127 				    u8 byte_offset, u8 *eeprom_data)
1128 {
1129 	s32 status = IXGBE_SUCCESS;
1130 	u16 sfp_addr = 0;
1131 	u16 sfp_data = 0;
1132 	u16 sfp_stat = 0;
1133 	u16 gssr;
1134 	u32 i;
1135 
1136 	DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1137 
1138 	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1139 		gssr = IXGBE_GSSR_PHY1_SM;
1140 	else
1141 		gssr = IXGBE_GSSR_PHY0_SM;
1142 
1143 	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
1144 		return IXGBE_ERR_SWFW_SYNC;
1145 
1146 	if (hw->phy.type == ixgbe_phy_nl) {
1147 		/*
1148 		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1149 		 * 0xC30D. These registers are used to talk to the SFP+
1150 		 * module's EEPROM through the SDA/SCL (I2C) interface.
1151 		 */
1152 		sfp_addr = (dev_addr << 8) + byte_offset;
1153 		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1154 		hw->phy.ops.write_reg_mdi(hw,
1155 					  IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1156 					  IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1157 					  sfp_addr);
1158 
1159 		/* Poll status */
1160 		for (i = 0; i < 100; i++) {
1161 			hw->phy.ops.read_reg_mdi(hw,
1162 						IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1163 						IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1164 						&sfp_stat);
1165 			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1166 			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1167 				break;
1168 			msec_delay(10);
1169 		}
1170 
1171 		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1172 			DEBUGOUT("EEPROM read did not pass.\n");
1173 			status = IXGBE_ERR_SFP_NOT_PRESENT;
1174 			goto out;
1175 		}
1176 
1177 		/* Read data */
1178 		hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1179 					IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1180 
1181 		*eeprom_data = (u8)(sfp_data >> 8);
1182 	} else {
1183 		status = IXGBE_ERR_PHY;
1184 	}
1185 
1186 out:
1187 	hw->mac.ops.release_swfw_sync(hw, gssr);
1188 	return status;
1189 }
1190 
1191 /**
1192  * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1193  * @hw: pointer to hardware structure
1194  * @byte_offset: EEPROM byte offset to read
1195  * @eeprom_data: value read
1196  *
1197  * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1198  **/
ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw * hw,u8 byte_offset,u8 * eeprom_data)1199 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1200 				u8 *eeprom_data)
1201 {
1202 	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1203 					byte_offset, eeprom_data);
1204 }
1205 
1206 /**
1207  * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1208  * @hw: pointer to hardware structure
1209  * @byte_offset: byte offset at address 0xA2
1210  * @sff8472_data: value read
1211  *
1212  * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1213  **/
ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw * hw,u8 byte_offset,u8 * sff8472_data)1214 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1215 					u8 *sff8472_data)
1216 {
1217 	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1218 					byte_offset, sff8472_data);
1219 }
1220 
1221 /**
1222  * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1223  * @hw: pointer to hardware structure
1224  *
1225  * Determines physical layer capabilities of the current configuration.
1226  **/
ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw * hw)1227 u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1228 {
1229 	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1230 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1231 	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1232 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1233 	u16 ext_ability = 0;
1234 
1235 	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1236 
1237 	hw->phy.ops.identify(hw);
1238 
1239 	/* Copper PHY must be checked before AUTOC LMS to determine correct
1240 	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1241 	switch (hw->phy.type) {
1242 	case ixgbe_phy_tn:
1243 	case ixgbe_phy_cu_unknown:
1244 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1245 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1246 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1247 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1248 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1249 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1250 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1251 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1252 		goto out;
1253 	default:
1254 		break;
1255 	}
1256 
1257 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1258 	case IXGBE_AUTOC_LMS_1G_AN:
1259 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1260 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1261 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1262 		else
1263 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1264 		break;
1265 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1266 		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1267 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1268 		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1269 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1270 		else /* XAUI */
1271 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1272 		break;
1273 	case IXGBE_AUTOC_LMS_KX4_AN:
1274 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1275 		if (autoc & IXGBE_AUTOC_KX_SUPP)
1276 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1277 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1278 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1279 		break;
1280 	default:
1281 		break;
1282 	}
1283 
1284 	if (hw->phy.type == ixgbe_phy_nl) {
1285 		hw->phy.ops.identify_sfp(hw);
1286 
1287 		switch (hw->phy.sfp_type) {
1288 		case ixgbe_sfp_type_da_cu:
1289 			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1290 			break;
1291 		case ixgbe_sfp_type_sr:
1292 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1293 			break;
1294 		case ixgbe_sfp_type_lr:
1295 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1296 			break;
1297 		default:
1298 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1299 			break;
1300 		}
1301 	}
1302 
1303 	switch (hw->device_id) {
1304 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1305 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1306 		break;
1307 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1308 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1309 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1310 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1311 		break;
1312 	case IXGBE_DEV_ID_82598EB_XF_LR:
1313 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1314 		break;
1315 	default:
1316 		break;
1317 	}
1318 
1319 out:
1320 	return physical_layer;
1321 }
1322 
1323 /**
1324  * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1325  * port devices.
1326  * @hw: pointer to the HW structure
1327  *
1328  * Calls common function and corrects issue with some single port devices
1329  * that enable LAN1 but not LAN0.
1330  **/
ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw * hw)1331 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1332 {
1333 	struct ixgbe_bus_info *bus = &hw->bus;
1334 	u16 pci_gen = 0;
1335 	u16 pci_ctrl2 = 0;
1336 
1337 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1338 
1339 	ixgbe_set_lan_id_multi_port_pcie(hw);
1340 
1341 	/* check if LAN0 is disabled */
1342 	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1343 	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1344 
1345 		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1346 
1347 		/* if LAN0 is completely disabled force function to 0 */
1348 		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1349 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1350 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1351 
1352 			bus->func = 0;
1353 		}
1354 	}
1355 }
1356 
1357 /**
1358  * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1359  * @hw: pointer to hardware structure
1360  *
1361  **/
ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw * hw)1362 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1363 {
1364 	u32 regval;
1365 	u32 i;
1366 
1367 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1368 
1369 	/* Enable relaxed ordering */
1370 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1371 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1372 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1373 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1374 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1375 	}
1376 
1377 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1378 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1379 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1380 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1381 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1382 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1383 	}
1384 
1385 }
1386 
1387 /**
1388  * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1389  * @hw: pointer to hardware structure
1390  * @num_pb: number of packet buffers to allocate
1391  * @headroom: reserve n KB of headroom
1392  * @strategy: packet buffer allocation strategy
1393  **/
ixgbe_set_rxpba_82598(struct ixgbe_hw * hw,int num_pb,u32 headroom,int strategy)1394 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1395 				  u32 headroom, int strategy)
1396 {
1397 	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1398 	u8 i = 0;
1399 	UNREFERENCED_1PARAMETER(headroom);
1400 
1401 	if (!num_pb)
1402 		return;
1403 
1404 	/* Setup Rx packet buffer sizes */
1405 	switch (strategy) {
1406 	case PBA_STRATEGY_WEIGHTED:
1407 		/* Setup the first four at 80KB */
1408 		rxpktsize = IXGBE_RXPBSIZE_80KB;
1409 		for (; i < 4; i++)
1410 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1411 		/* Setup the last four at 48KB...don't re-init i */
1412 		rxpktsize = IXGBE_RXPBSIZE_48KB;
1413 		/* Fall Through */
1414 	case PBA_STRATEGY_EQUAL:
1415 	default:
1416 		/* Divide the remaining Rx packet buffer evenly among the TCs */
1417 		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1418 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1419 		break;
1420 	}
1421 
1422 	/* Setup Tx packet buffer sizes */
1423 	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1424 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1425 }
1426 
1427 /**
1428  * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
1429  * @hw: pointer to hardware structure
1430  * @regval: register value to write to RXCTRL
1431  *
1432  * Enables the Rx DMA unit
1433  **/
ixgbe_enable_rx_dma_82598(struct ixgbe_hw * hw,u32 regval)1434 s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
1435 {
1436 	DEBUGFUNC("ixgbe_enable_rx_dma_82598");
1437 
1438 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
1439 
1440 	return IXGBE_SUCCESS;
1441 }
1442